def migrate_volume_segment(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()
        src_install_path = self._normalize_install_path(cmd.srcInstallPath)
        dst_install_path = self._normalize_install_path(cmd.dstInstallPath)
        src_size = self._get_file_size(src_install_path)
        dst_size = self._get_dst_volume_size(dst_install_path, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if dst_size > src_size:
            if cmd.isXsky:
                # xsky / ceph -> xsky, size must be equal
                rsp.success = False
                rsp.error = "Failed to migrate volume segment because dst size: %s > src size: %s" % (dst_size, src_size)
                return jsonobject.dumps(rsp)
            elif isXsky() == False:
                # ceph -> ceph, don't check size
                rsp.success = True
            else:
                # xsky -> ceph, not supported
                rsp.success = False
                rsp.error = "Failed to migrate volume segment because xsky migrate to ceph is not supported now"
                return jsonobject.dumps(rsp)
        if dst_size < src_size:
            ret = self._resize_dst_volume(dst_install_path, src_size, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
            if ret != 0:
                rsp.success = False
                rsp.error = "Failed to resize volume before migrate."
                return jsonobject.dumps(rsp)


        ret = self._migrate_volume_segment(cmd.parentUuid, cmd.resourceUuid, cmd.srcInstallPath, cmd.dstInstallPath, cmd.dstMonHostname, cmd.dstMonSshUsername, cmd.dstMonSshPassword, cmd.dstMonSshPort)
        if ret != 0:
            rsp.success = False
            rsp.error = "Failed to migrate volume segment from one ceph primary storage to another."
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    def _add_zone(zone, zone_duplication):
        action = api_actions.CreateZoneAction()
        action.sessionUuid = session_uuid
        if zone_duplication == 0:
            action.name = zone.name_
            action.description = zone.description__
            if zone.uuid__:
                action.resourceUuid = zone.uuid__
        else:
            action.name = generate_dup_name(zone.name_, zone_duplication, 'z')
            action.description = generate_dup_name(zone.description__, zone_duplication, 'zone')

        try:
            evt = action.run()
            deploy_logger(jsonobject.dumps(evt))
            zinv = evt.inventory
        except:
            exc_info.append(sys.exc_info())

        if xmlobject.has_element(zone, 'backupStorageRef'):
            for ref in xmlobject.safe_list(zone.backupStorageRef):
                bss = res_ops.get_resource(res_ops.BACKUP_STORAGE, session_uuid, name=ref.text_)
                bs = get_first_item_from_list(bss, 'Backup Storage', ref.text_, 'attach backup storage to zone')

                action = api_actions.AttachBackupStorageToZoneAction()
                action.sessionUuid = session_uuid
                action.backupStorageUuid = bs.uuid
                action.zoneUuid = zinv.uuid
                try:
                    evt = action.run()
                    deploy_logger(jsonobject.dumps(evt))
                except:
                    exc_info.append(sys.exc_info())
    def connect(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = ConnectResponse()

        # page table extension
        if shell.run('lscpu | grep -q -w GenuineIntel') == 0:
            new_ept = False if cmd.pageTableExtensionDisabled else True
            rsp.error = self._set_intel_ept(new_ept)
            if rsp.error is not None:
                rsp.success = False
                return jsonobject.dumps(rsp)

        self.host_uuid = cmd.hostUuid
        self.config[kvmagent.HOST_UUID] = self.host_uuid
        self.config[kvmagent.SEND_COMMAND_URL] = cmd.sendCommandUrl
        Report.serverUuid = self.host_uuid
        Report.url = cmd.sendCommandUrl
        logger.debug(http.path_msg(self.CONNECT_PATH, 'host[uuid: %s] connected' % cmd.hostUuid))
        rsp.libvirtVersion = self.libvirt_version
        rsp.qemuVersion = self.qemu_version

        # create udev rule
        self.handle_usb_device_events()

        ignore_msrs = 1 if cmd.ignoreMsrs else 0
        shell.run("/bin/echo %s > /sys/module/kvm/parameters/ignore_msrs" % ignore_msrs)

        vm_plugin.cleanup_stale_vnc_iptable_chains()
        apply_iptables_result = self.apply_iptables_rules(cmd.iptablesRules)
        rsp.iptablesSucc = apply_iptables_result
        return jsonobject.dumps(rsp)
    def setup_heartbeat_file(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = SetupMountablePrimaryStorageHeartbeatResponse()

        for hb in cmd.heartbeatFilePaths:
            hb_dir = os.path.dirname(hb)
            mount_path = os.path.dirname(hb_dir)
            if not linux.is_mounted(mount_path):
                rsp.error = '%s is not mounted, setup heartbeat file[%s] failed' % (mount_path, hb)
                rsp.success = False
                return jsonobject.dumps(rsp)

        for hb in cmd.heartbeatFilePaths:
            t = self.heartbeat_timer.get(hb, None)
            if t:
                t.cancel()

            hb_dir = os.path.dirname(hb)
            if not os.path.exists(hb_dir):
                os.makedirs(hb_dir, 0755)

            t = thread.timer(cmd.heartbeatInterval, self._heartbeat_func, args=[hb], stop_on_exception=False)
            t.start()
            self.heartbeat_timer[hb] = t
            logger.debug('create heartbeat file at[%s]' % hb)

        return jsonobject.dumps(rsp)
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)


        create_img = shell.ShellCmd('rbd create %s --image-format 2 --size 1' % cmd.testImagePath)
        create_img(False)
        if create_img.return_code != 0:
            rsp.success = False
            rsp.failure = 'UnableToCreateFile'
            rsp.error = "%s %s" % (create_img.stderr, create_img.stdout)
        else:
            rm_img = shell.ShellCmd('rbd rm %s' % cmd.testImagePath)
            rm_img(False)

        return jsonobject.dumps(rsp)
    def scan_host(self, req):
        rsp = ScanRsp()

        success = 0
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        for i in range(0, cmd.times):
            if shell.run("nmap -sP -PI %s | grep 'Host is up'" % cmd.ip) == 0:
                success += 1

            time.sleep(cmd.interval)

        if success == cmd.successTimes:
            rsp.result = self.RET_SUCCESS
            return jsonobject.dumps(rsp)

        if success == 0:
            rsp.result = self.RET_FAILURE
            return jsonobject.dumps(rsp)

        # WE SUCCEED A FEW TIMES, IT SEEMS THE CONNECTION NOT STABLE
        success = 0
        for i in range(0, cmd.successTimes):
            if shell.run("nmap -sP -PI %s | grep 'Host is up'" % cmd.ip) == 0:
                success += 1

            time.sleep(cmd.successInterval)

        if success == cmd.successTimes:
            rsp.result = self.RET_SUCCESS
            return jsonobject.dumps(rsp)

        rsp.result = self.RET_NOT_STABLE
        return jsonobject.dumps(rsp)
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        pool, objname = cmd.testImagePath.split('/')

        create_img = shell.ShellCmd("echo zstack | rados -p '%s' put '%s' -" % (pool, objname))
        create_img(False)
        if create_img.return_code != 0:
            rsp.success = False
            rsp.failure = 'UnableToCreateFile'
            rsp.error = "%s %s" % (create_img.stderr, create_img.stdout)
        else:
            shell.run("rados -p '%s' rm '%s'" % (pool, objname))

        return jsonobject.dumps(rsp)
    def set_bootstrap_info(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        info = jsonobject.dumps(cmd.info, True)
        socket_path = cmd.socketPath
        
        s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
        s.setsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF, 1)
        buf_size = s.getsockopt(socket.SOL_SOCKET, socket.SO_SNDBUF)
        info_len = len(info)
        if info_len < buf_size:
            # as there is no fflush() in python, we have to create a message
            # matching to the socket buffer to force it to send the message immediately
            padding_len = buf_size - info_len
            padding = ' ' * padding_len
            info = '%s%s' % (info, padding)

        try:
            logger.debug('send appliance vm bootstrap info to %s\n%s' % (socket_path, info))
            s.connect(socket_path)
            s.sendall(info)
        finally:
            s.close()
        
        rsp = PrepareBootstrapInfoRsp()
        return jsonobject.dumps(rsp)
 def create_root_volume_from_template(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = CreateRootVolumeFromTemplateResponse()
     if not os.path.exists(cmd.templatePathInCache):
         rsp.error = self.ERR_UNABLE_TO_FIND_IMAGE_IN_CACHE
         rsp.success = False
         return jsonobject.dumps(rsp)
         
     try:
         dirname = os.path.dirname(cmd.installUrl)
         if not os.path.exists(dirname):
             os.makedirs(dirname, 0775)
             
         linux.qcow2_clone(cmd.templatePathInCache, cmd.installUrl)
         logger.debug('successfully create root volume[%s] from template in cache[%s]' % (cmd.installUrl, cmd.templatePathInCache))
         meta = VolumeMeta()
         meta.account_uuid = cmd.accountUuid
         meta.hypervisor_type = cmd.hypervisorType
         meta.name = cmd.name
         meta.uuid = cmd.volumeUuid
         meta.size = os.path.getsize(cmd.templatePathInCache)
         meta_path = self._json_meta_file_name(cmd.installUrl)
         with open(meta_path, 'w') as fd:
             fd.write(jsonobject.dumps(meta, pretty=True))
         self._set_capacity_to_response(cmd.uuid, rsp)
         logger.debug('successfully create root volume[%s] from template in cache[%s]' % (cmd.installUrl, cmd.templatePathInCache))
     except Exception as e:
         content = traceback.format_exc()
         logger.warn(content)
         err = 'unable to clone qcow2 template[%s] to %s' % (cmd.templatePathInCache, cmd.installUrl)
         rsp.error = err
         rsp.success = False
         
     return jsonobject.dumps(rsp)
    def download_image(self, req):
        #TODO: report percentage to mgmt server
        def percentage_callback(percent, url):
            logger.debug('Downloading %s ... %s%%' % (url, percent))

        def use_wget(url, name, workdir, timeout):
            return linux.wget(url, workdir=workdir, rename=name, timeout=timeout, interval=2, callback=percentage_callback, callback_data=url)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DownloadResponse()
        supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE]
        if cmd.urlScheme not in supported_schemes:
            rsp.success = False
            rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % (cmd.urlScheme, supported_schemes)
            return jsonobject.dumps(rsp)

        path = os.path.dirname(cmd.installPath)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        image_name = os.path.basename(cmd.installPath)
        install_path = cmd.installPath

        timeout = cmd.timeout if cmd.timeout else 7200
        if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]:
            try:
                ret = use_wget(cmd.url, image_name, path, timeout)
                if ret != 0:
                    rsp.success = False
                    rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % (image_name, cmd.url, ret)
                    return jsonobject.dumps(rsp)
            except linux.LinuxError as e:
                traceback.format_exc()
                rsp.success = False
                rsp.error = str(e)
                return jsonobject.dumps(rsp)
        elif cmd.urlScheme == self.URL_FILE:
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            logger.debug("src_path is: %s" % src_path)
            shell.call('yes | cp %s %s' % (src_path, install_path))



        os.chmod(cmd.installPath, stat.S_IRUSR + stat.S_IRGRP + stat.S_IROTH)


        image_format =  bash_o("qemu-img info %s | grep -w '^file format' | awk '{print $3}'" % install_path).strip('\n')
        size = os.path.getsize(install_path)
        md5sum = 'not calculated'
        logger.debug('successfully downloaded %s to %s' % (cmd.url, install_path))
        (total, avail) = self.get_capacity()
        rsp.md5Sum = md5sum
        rsp.actualSize = size
        rsp.size = linux.qcow2_virtualsize(install_path)
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        rsp.format = image_format
        return jsonobject.dumps(rsp)
    def create_empty_volume(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = CreateEmptyVolumeResponse()
        try:
            dirname = os.path.dirname(cmd.installUrl)
            if not os.path.exists(dirname):
                os.makedirs(dirname)
                
            linux.qcow2_create(cmd.installUrl, cmd.size)
        except Exception as e:
            logger.warn(linux.get_exception_stacktrace())
            rsp.error = 'unable to create empty volume[uuid:%s, name:%s], %s' % (cmd.uuid, cmd.name, str(e))
            rsp.success = False
            return jsonobject.dumps(rsp)
        
        meta = VolumeMeta()
        meta.account_uuid = cmd.accountUuid
        meta.hypervisor_type = cmd.hypervisorType
        meta.name = cmd.name
        meta.uuid = cmd.volumeUuid
        meta.size = cmd.size
        meta_path = self._json_meta_file_name(cmd.installUrl)
        with open(meta_path, 'w') as fd:
            fd.write(jsonobject.dumps(meta, pretty=True))

        self._set_capacity_to_response(cmd.uuid, rsp)
        logger.debug('successfully create empty volume[uuid:%s, name:%s, size:%s] at %s' % (cmd.uuid, cmd.name, cmd.size, cmd.installUrl))
        return jsonobject.dumps(rsp)
Exemple #12
0
 def remove_vip(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     for vip in cmd.vips:
         linux.delete_vip_by_ip_if_exists(vip.ip)
         logger.debug('removed vip %s' % jsonobject.dumps(vip))
     
     rsp = RemoveVipRsp()
     return jsonobject.dumps(rsp)
Exemple #13
0
 def create_vip(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     for vip in cmd.vips:
         linux.create_vip_if_not_exists(vip.ownerEthernetMac, vip.ip, vip.netmask)
         logger.debug('created vip %s' % jsonobject.dumps(vip))
     
     rsp = CreateVipRsp()
     return jsonobject.dumps(rsp)
 def callback(self, req):
     rsp = jsonobject.loads(req[http.REQUEST_BODY])
     print jsonobject.dumps(rsp)
     
     cmd = vm_plugin.RebootVmCmd()
     cmd.uuid = self.uuid
     cmd.timeout = 30
     url = kvmagent._build_url_for_test([vm_plugin.KVM_REBOOT_VM_PATH])
     rsp = http.json_dump_post(url, cmd, headers={http.TASK_UUID:uuidhelper.uuid(), http.CALLBACK_URI:self.CALLBACK_URL2})
Exemple #15
0
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        facts = bash_o('ceph -s -f json')
        mon_facts = jsonobject.loads(facts)
        found = False
        for mon in mon_facts.monmap.mons:
            if cmd.monAddr in mon.addr:
                found = True
                break

        rsp = PingRsp()

        if not found:
            rsp.success = False
            rsp.failure = "MonAddrChanged"
            rsp.error = 'The mon addr is changed on the mon server[uuid:%s], not %s anymore.' \
                        'Reconnect the ceph primary storage' \
                        ' may solve this issue' % (cmd.monUuid, cmd.monAddr)
            return jsonobject.dumps(rsp)

        def retry(times=3, sleep_time=3):
            def wrap(f):
                @functools.wraps(f)
                def inner(*args, **kwargs):
                    for i in range(0, times):
                        try:
                            return f(*args, **kwargs)
                        except Exception as e:
                            logger.error(e)
                            time.sleep(sleep_time)
                    rsp.error = ("Still failed after retry. Below is detail:\n %s" % e)

                return inner

            return wrap

        @retry()
        def doPing():
            # try to delete test file, ignore the result
            pool, objname = cmd.testImagePath.split('/')
            bash_r("rados -p '%s' rm '%s'" % (pool, objname))
            r, o, e = bash_roe("echo zstack | timeout 60 rados -p '%s' put '%s' -" % (pool, objname))
            if r != 0:
                rsp.success = False
                rsp.failure = "UnableToCreateFile"
                if r == 124:
                    # timeout happened
                    rsp.error = 'failed to create heartbeat object on ceph, timeout after 60s, %s %s' % (e, o)
                    raise Exception(rsp.error)
                else:
                    rsp.error = "%s %s" % (e, o)

        doPing()

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    def _add_cluster(action, zone_ref, cluster, cluster_ref):
        evt = action.run()
        deploy_logger(jsonobject.dumps(evt))
        cinv = evt.inventory

        try:
            if xmlobject.has_element(cluster, 'primaryStorageRef'):
                for pref in xmlobject.safe_list(cluster.primaryStorageRef):
                    ps_name = generate_dup_name(generate_dup_name(pref.text_, zone_ref, 'z'), cluster_ref, 'c')

                    pinvs = res_ops.get_resource(res_ops.PRIMARY_STORAGE, session_uuid, name=ps_name)
                    pinv = get_first_item_from_list(pinvs, 'Primary Storage', ps_name, 'Cluster')

                    action_ps = api_actions.AttachPrimaryStorageToClusterAction()
                    action_ps.sessionUuid = session_uuid
                    action_ps.clusterUuid = cinv.uuid
                    action_ps.primaryStorageUuid = pinv.uuid
                    evt = action_ps.run()
                    deploy_logger(jsonobject.dumps(evt))
        except:
            exc_info.append(sys.exc_info())

        if cluster.allL2NetworkRef__ == 'true':
            # find all L2 network in zone and attach to cluster
            cond = res_ops.gen_query_conditions('zoneUuid', '=',
                                                action.zoneUuid)
            l2_count = res_ops.query_resource_count(res_ops.L2_NETWORK,
                                                    cond, session_uuid)
            l2invs = res_ops.query_resource_fields(res_ops.L2_NETWORK,
                                                   [{'name': 'zoneUuid', 'op': '=', 'value': action.zoneUuid}],
                                                   session_uuid, ['uuid'], 0, l2_count)
        else:
            l2invs = []
            if xmlobject.has_element(cluster, 'l2NetworkRef'):
                for l2ref in xmlobject.safe_list(cluster.l2NetworkRef):
                    l2_name = generate_dup_name(generate_dup_name(l2ref.text_, zone_ref, 'z'), cluster_ref, 'c')

                    cond = res_ops.gen_query_conditions('zoneUuid', '=',
                                                        action.zoneUuid)
                    cond = res_ops.gen_query_conditions('name', '=', l2_name,
                                                        cond)

                    l2inv = res_ops.query_resource_fields(res_ops.L2_NETWORK,
                                                          cond, session_uuid, ['uuid'])
                    if not l2inv:
                        raise DeployError("Can't find l2 network [%s] in database." % l2_name)
                    l2invs.extend(l2inv)

        for l2inv in l2invs:
            action = api_actions.AttachL2NetworkToClusterAction()
            action.sessionUuid = session_uuid
            action.clusterUuid = cinv.uuid
            action.l2NetworkUuid = l2inv.uuid
            thread = threading.Thread(target=_thread_for_action, args=(action,))
            wait_for_thread_queue()
            thread.start()
    def establish_new_proxy(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = EstablishProxyRsp()
        
        def check_parameters():
            if not cmd.targetHostname:
                raise ConsoleProxyError('targetHostname cannot be null')
            if not cmd.targetPort:
                raise ConsoleProxyError('targetPort cannot be null')
            if not cmd.token:
                raise ConsoleProxyError('token cannot be null')
            if not cmd.proxyHostname:
                raise ConsoleProxyError('proxyHostname cannot be null')
        
        try:
            check_parameters()
        except ConsoleProxyError as e:
            err = linux.get_exception_stacktrace()
            logger.warn(err)
            rsp.error = str(e)
            rsp.success = False
            return jsonobject.dumps(rsp)

        proxyPort = linux.get_free_port()
        token_file = os.path.join(self.TOKEN_FILE_DIR, self._make_token_file_name(cmd))
        with open(token_file, 'w') as fd:
            fd.write('%s: %s:%s' % (cmd.token, cmd.targetHostname, cmd.targetPort))
        
        timeout = cmd.idleTimeout
        if not timeout:
            timeout = 600

        log_file = os.path.join(self.PROXY_LOG_DIR, self._make_proxy_log_file_name(cmd))
        proxy_cmd = '''python -c "from zstacklib.utils import log; import websockify; log.configure_log('%s'); websockify.websocketproxy.websockify_init()" %s:%s -D --target-config=%s --idle-timeout=%s''' % (log_file, cmd.proxyHostname, proxyPort, token_file, timeout)
        logger.debug(proxy_cmd)
        shell.call(proxy_cmd)
        shell.call("iptables-save | grep -- '-A INPUT -p tcp -m tcp --dport %s' > /dev/null || iptables -I INPUT -p tcp -m tcp --dport %s -j ACCEPT" % (proxyPort, proxyPort))
        
        info =  {
                 'proxyHostname': cmd.proxyHostname,
                 'proxyPort' : cmd.proxyPort,
                 'targetHostname' : cmd.targetHostname,
                 'targetPort': cmd.targetPort,
                 'token': cmd.token,
                 'logFile': log_file,
                 'tokenFile': token_file
                }
        info_str = jsonobject.dumps(info)
        self.db.set(cmd.token, info_str)
        
        rsp.proxyPort = proxyPort
        
        logger.debug('successfully establish new proxy%s' % info_str)

        return jsonobject.dumps(rsp)
    def _do_l3_deploy(l3, l2inv_uuid, l3Name, session_uuid):
        action = api_actions.CreateL3NetworkAction()
        action.sessionUuid = session_uuid
        action.description = l3.description__
        if l3.system__ and l3.system__ != 'False':
            action.system = 'true'
        action.l2NetworkUuid = l2inv_uuid
        action.name = l3Name
        if l3.uuid__:
            action.resourceUuid = l3.uuid__
        action.type = inventory.L3_BASIC_NETWORK_TYPE
        if l3.domain_name__:
            action.dnsDomain = l3.domain_name__

        try:
            evt = action.run()
        except:
            exc_info.append(sys.exc_info())

        deploy_logger(jsonobject.dumps(evt))
        l3_inv = evt.inventory

        # add dns
        if xmlobject.has_element(l3, 'dns'):
            for dns in xmlobject.safe_list(l3.dns):
                action = api_actions.AddDnsToL3NetworkAction()
                action.sessionUuid = session_uuid
                action.dns = dns.text_
                action.l3NetworkUuid = l3_inv.uuid
                try:
                    evt = action.run()
                except:
                    exc_info.append(sys.exc_info())
                deploy_logger(jsonobject.dumps(evt))

        # add ip range.
        if xmlobject.has_element(l3, 'ipRange'):
            do_add_ip_range(l3.ipRange, l3_inv.uuid, session_uuid)

        # add network service.
        providers = {}
        action = api_actions.QueryNetworkServiceProviderAction()
        action.sessionUuid = session_uuid
        action.conditions = []
        try:
            reply = action.run()
        except:
            exc_info.append(sys.exc_info())
        for pinv in reply:
            providers[pinv.name] = pinv.uuid

        if xmlobject.has_element(l3, 'networkService'):
            do_add_network_service(l3.networkService, l3_inv.uuid,
                                   providers, session_uuid)
    def merge_snapshot_to_volume(self, req):
        rsp = MergeSnapshotRsp()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        vm = get_vm_by_uuid(cmd.vmUuid, exception_if_not_existing=True)

        if vm.state != vm.VM_STATE_RUNNING:
            rsp.error = 'vm[uuid:%s] is not running, cannot do live snapshot chain merge' % vm.uuid
            rsp.success = False
            return jsonobject.dumps(rsp)

        vm.merge_snapshot(cmd)
        return jsonobject.dumps(rsp)
    def check_physical_network_interface(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = CheckPhysicalNetworkInterfaceResponse()
        for i in cmd.interfaceNames:
            shell_cmd = shell.ShellCmd("ip link | grep '%s'" % i)
            shell_cmd(False)
            if shell_cmd.return_code != 0:
                rsp.failedInterfaceNames = [i]
                rsp.success = False
                return jsonobject.dumps(rsp)

        logger.debug(http.path_msg(CHECK_PHYSICAL_NETWORK_INTERFACE_PATH, 'checked physical interfaces: %s' % cmd.interfaceNames))
        return jsonobject.dumps(rsp)
Exemple #21
0
 def check_bits(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     path = self._normalize_install_path(cmd.installPath)
     rsp = CheckIsBitsExistingRsp()
     try:
         shell.call('rbd info %s' % path)
     except Exception as e:
         if 'No such file or directory' in str(e):
             rsp.existing = False
             return jsonobject.dumps(rsp)
         else:
             raise e
     rsp.existing = True
     return jsonobject.dumps(rsp)
Exemple #22
0
    def get_volume_base_image_path(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = GetVolumeBaseImagePathRsp()

        if not os.path.basename(cmd.volumeInstallDir).endswith(cmd.volumeUuid):
            raise Exception('maybe you pass a wrong install dir')

        path = linux.get_qcow2_base_image_recusively(cmd.volumeInstallDir, cmd.imageCacheDir)
        if not path:
            return jsonobject.dumps(rsp)

        rsp.path = path
        rsp.size = linux.get_qcow2_file_chain_size(path)
        return jsonobject.dumps(rsp)
    def download_from_kvmhost(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentRsp()

        install_abs_path = translate_absolute_path_from_install_path(cmd.primaryStorageInstallPath)

        # todo: assume agent will not restart, maybe need clean
        last_task = self.load_and_save_task(req, rsp, os.path.exists, install_abs_path)
        if last_task and last_task.agent_pid == os.getpid():
            rsp = self.wait_task_complete(last_task)
            return jsonobject.dumps(rsp)

        self.do_download_from_sftp(cmd, install_abs_path)
        return jsonobject.dumps(rsp)
    def create_root_volume_from_template(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AgentResponse()

        if not os.path.exists(cmd.templatePathInCache):
            rsp.error = "UNABLE_TO_FIND_IMAGE_IN_CACHE"
            rsp.success = False
            return jsonobject.dumps(rsp)

        dirname = os.path.dirname(cmd.installUrl)
        if not os.path.exists(dirname):
            os.makedirs(dirname, 0775)

        linux.qcow2_clone(cmd.templatePathInCache, cmd.installUrl)
        return jsonobject.dumps(rsp)
 def create_bootstrap_iso(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     isoinfo = jsonobject.dumps(cmd.isoInfo, True)
     tmpfile = linux.write_to_temp_file(isoinfo)
     isodir = tempfile.mkdtemp()
     try:
         dst = os.path.join(isodir, 'cmdline.json')
         shell.ShellCmd('cp %s %s' % (tmpfile, dst))()
         shell.ShellCmd('/usr/bin/mkisofs -quiet -r -o %s %s' % (cmd.isoPath, isodir))()
         return jsonobject.dumps(CreateVritualRouterBootstrapIsoRsp())
     finally:
         if not isodir:
             shutil.rmtree(isodir)
         if not tmpfile:
             os.remove(tmpfile)
 def set_bootstrap_info(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     info = jsonobject.dumps(cmd.info, True)
     socket_path = cmd.socketPath
     
     s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
     try:
         logger.debug('send appliance vm bootstrap info to %s\n%s' % (socket_path, info))
         s.connect(socket_path)
         s.sendall(info)
     finally:
         s.close()
     
     rsp = PrepareBootstrapInfoRsp()
     return jsonobject.dumps(rsp)
Exemple #27
0
 def add_dhcp_entry(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     entries = []
     gateways = []
     for e in cmd.dhcpEntries:
         entry = DhcpEntry.from_dhcp_info(e)
         entries.append(entry)
         gateways.append(entry.gateway)
         
     if cmd.rebuild:
         self._rebuild_all(entries)
     else:
         self._merge(entries)
     
     rsp = AddDhcpEntryRsp()
     try:
         if self._add_dhcp_range_if_need(gateways):
             self._restart_dnsmasq()
         else:
             self._refresh_dnsmasq()
     except virtualrouter.VirtualRouterError as e:
         logger.warn(linux.get_exception_stacktrace())
         rsp.error = str(e)
         rsp.success = False
     
     return jsonobject.dumps(rsp)
 def downloadfromimagestore(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     self.imagestore_client.download_from_imagestore(cmd.cacheDir, cmd.hostname, cmd.backupStorageInstallPath,
                                                     cmd.primaryStorageInstallPath)
     rsp = AliyunNasResponse()
     rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.uuid)
     return jsonobject.dumps(rsp)
Exemple #29
0
    def remove_dhcp_entry(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = RemoveDhcpEntryRsp()
        try:
            for e in cmd.dhcpEntries:
                net_dev = shell.call("ifconfig|grep -i %s|awk '{print $1}'" % e.vrNicMac)
                net_dev = net_dev.strip('\t\r\n ')
                mac2 = e.mac.replace(':', '')
                shell.call("sed -i '/%s/d' %s; \
                        sed -i '/^$/d' %s; \
                        sed -i '/%s/d' %s; \
                        sed -i '/^$/d' %s; \
                        sed -i '/%s/d' %s; \
                        sed -i '/^$/d' %s; \
                        dhcp_release %s %s %s"\
                        % (e.mac, self.HOST_DHCP_FILE, \
                        self.HOST_DHCP_FILE, \
                        mac2, self.HOST_OPTION_FILE, \
                        self.HOST_OPTION_FILE, \
                        e.ip, self.HOST_DNS_FILE, \
                        self.HOST_DNS_FILE, \
                        net_dev, e.ip, e.mac))

        except virtualrouter.VirtualRouterError as e:
            logger.warn(linux.get_exception_stacktrace())
            rsp.error = str(e)
            rsp.success = False

        return jsonobject.dumps(rsp)
    def deletebits(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = AliyunNasResponse()
        self.delNasBits(cmd.folder, cmd.path)

        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(cmd.uuid)
        return jsonobject.dumps(rsp)
Exemple #31
0
    def installtdc(self, req):
        def overwriteConfig(config, cfile):
            c = open(cfile, 'w')
            c.write(config)
            c.close()

        def updateTdcConfig(config, cfile):
            '''
               1. read /opt/tdc/apsara_global_config.json if existed
               2. compare with config
               3. overwrite if it is different
            '''
            if not os.path.exists(cfile):
                d = os.path.dirname(cfile)
                if not os.path.exists(d):
                    os.makedirs(d, 0755)
                overwriteConfig(config, cfile)
                return True

            updated = False
            c = open(cfile)
            if config != c.read().strip():
                overwriteConfig(config, cfile)
                updated = True

            c.close()
            return updated

        def installTdc(cmd):
            logger.debug('install tdc pkg')
            if cmd.version != tdcversion:
                return "no matched tdc version found, agent need version %d" % tdcversion

            startCmd = shell.ShellCmd("/opt/tdc/tdc_admin lsi")
            startCmd(False)
            if startCmd.return_code != 0:
                linux.mkdir("/apsara", 0755)
                e = shell.ShellCmd(
                    'rpm -qi tdc-unified-8.2.0.release.el5.x86_64')
                e(False)
                if e.return_code != 0:
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn clean metadata"
                        .format(kvmagent.get_host_yum_release()))
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install -y tdc-unified-8.2.0.release.el5.x86_64"
                        .format(kvmagent.get_host_yum_release()))
                shell.call("service tdc restart")

                startCmd = shell.ShellCmd("/opt/tdc/tdc_admin lsi")
                startCmd(False)
                if startCmd.return_code != 0:
                    return "tdc_admin lsi failed: %s" % startCmd.stderr
            return None

        def installVrbd():
            logger.debug('modprobe vrbd')
            lsModCmd = shell.ShellCmd("lsmod|grep vrbd")
            lsModCmd(False)
            if lsModCmd.return_code != 0:
                e = shell.ShellCmd(
                    'rpm -qi kernel-3.10.0-693.11.1.el7.x86_64-vrbd-1.0-0.1.release1.alios7.x86_64'
                )
                e(False)
                if e.return_code != 0:
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn clean metadata"
                        .format(kvmagent.get_host_yum_release()))
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install -y kernel-3.10.0-693.11.1.el7.x86_64-vrbd-1.0-0.1.release1.alios7.x86_64"
                        .format(kvmagent.get_host_yum_release()))
                shell.call("modprobe vrbd")
            else:
                return
            lsModCmd(False)
            if lsModCmd.return_code != 0:
                return "cannot attach vrbd mod"

        rsp = kvmagent.AgentResponse()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        rsp.error = installTdc(cmd) or installVrbd()
        if rsp.error is not None:
            return jsonobject.dumps(rsp)

        if cmd.tdcConfig and cmd.nuwaConfig and cmd.nuwaCfg:
            tdc = updateTdcConfig(cmd.tdcConfig,
                                  '/opt/tdc/apsara_global_flag.json')
            nuwa1 = updateTdcConfig(
                cmd.nuwaConfig,
                '/apsara/conf/conffiles/nuwa/client/nuwa_config.json')
            nuwa2 = updateTdcConfig(cmd.nuwaCfg, '/apsara/nuwa/nuwa.cfg')
            if tdc or nuwa1 or nuwa2:
                logger.debug('config changed, restart tdc service')
                shell.call("service tdc restart")

        return jsonobject.dumps(rsp)
Exemple #32
0
 def get_local_file_size(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = GetLocalFileSizeRsp()
     rsp.size = linux.get_local_file_size(cmd.path)
     return jsonobject.dumps(rsp)
Exemple #33
0
 def apply_eips(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     self._apply_eips(cmd.eips)
     return jsonobject.dumps(AgentRsp())
 def download_from_imagestore(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     self.imagestore_client.download_from_imagestore(self.mount_path.get(cmd.uuid), cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
     rsp = kvmagent.AgentResponse()
     self._set_capacity_to_response(cmd.uuid, rsp)
     return jsonobject.dumps(rsp)
Exemple #35
0
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        hostname = cmd.hostname
        prikey = cmd.sshKey
        port = cmd.sshPort

        pool, image_name = self._parse_install_path(
            cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))

        _0()

        try:
            shell.call(
                'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s'
                % (port, prikey_file, hostname, cmd.backupStorageInstallPath,
                   pool, tmp_image_name))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemple #36
0
 def get_physical_capacity(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = AgentResponse()
     rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(
         cmd.storagePath)
     return jsonobject.dumps(rsp)
 def ping(self, req):
     rsp = PingResponse()
     rsp.uuid = self.uuid
     return jsonobject.dumps(rsp)
Exemple #38
0
    def do_command(self, line):
        def check_session(apiname):
            if not self.session_uuid and apiname not in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME, self.LOGIN_BY_LDAP_MESSAGE_NAME]:
                self.print_error('''Please login before running any API message
example: %sLogInByAccount accountName=admin password=your_super_secure_admin_password''' % prompt)
                return False
            return True

        def is_api_param_a_list(apiname, param):
            optional_list = eval('isinstance(inventory.%s().%s, \
                    inventory.OptionalList)' % (apiname, param))
            not_none_list = eval('isinstance(inventory.%s().%s, \
                    inventory.NotNoneList)' % (apiname, param))
            if optional_list or not_none_list:
                return True

        def build_params():
            def eval_string(key, value_string):
                try:
                    return eval(value_string)
                except Exception as e:
                    err_msg = """
Parse command parameters error:
  eval '%s' error for: '%s'
  the right format is like: "[{'KEY':'VALUE'}, {'KEY':['VALUE1', 'VALUE2']}]"
                          """ % (value_string, key)
                    self.print_error(err_msg)
                    raise e

            pairs = shlex.split(line)
            if pairs[0] in self.cli_cmd:
                cmd = pairs[0]
                if len(pairs) > 1:
                    return cmd, pairs[1:]
                else:
                    return cmd, None

            apiname = 'API%sMsg' % pairs[0]
            if apiname not in inventory.api_names:
                raise CliError('"%s" is not an API message' % apiname)

            #'=' will be used for more meanings than 'equal' in Query API
            if apiname.startswith('APIQuery') and not apiname in NOT_QUERY_MYSQL_APIS:
                return apiname, pairs[1:]

            all_params = {}
            for param_str in pairs[1:]:
                params = param_str.split('=', 1)
                if len(params) != 2:
                    raise CliError('Invalid parameter[%s], the parameter must be split by "="' % param_str)

                if apiname == 'APIAddSecurityGroupRuleMsg' and params[0] == 'rules':
                    all_params[params[0]] = eval(params[1])
                elif apiname in ['APIGetHostMonitoringDataMsg', 'APIGetVmMonitoringDataMsg', 'APIMonitoringPassThroughMsg'] and params[0] == 'query':
                    all_params[params[0]] = eval(params[1])
                elif apiname == 'APIAttachNetworkServiceToL3NetworkMsg' and params[0] == 'networkServices':
                    all_params[params[0]] = eval_string(params[0], params[1])
                elif apiname == 'APIDetachNetworkServiceFromL3NetworkMsg' and params[0] == 'networkServices':
                    all_params[params[0]] = eval_string(params[0], params[1])
                elif apiname == 'APICreatePolicyMsg' and params[0] == 'statements':
                    all_params[params[0]] = eval_string(params[0], params[1])
                elif is_api_param_a_list(apiname, params[0]):
                    all_params[params[0]] = params[1].split(',')
                else:
                    all_params[params[0]] = params[1]

            return (apiname, all_params)

        def generate_query_params(apiname, params):
            '''
            Query params will include conditions expression, which includes ops:
            =, !=, >, <, >=, <=, ?=, !?=, ~=, !~=
            ?= means 'in'
            !?= means 'not in'
            ~= means 'like'
            !~= means 'not like'
            =null means 'is null'
            !=null means 'is not null'
            '''

            null = 'null'
            eq = '='
            gt = '>'
            lt = '<'
            nt = '!'
            lk = '~'
            qs = '?'
            ps = '+'
            ms = '-'
            perc = '%'
            underscore = '_'

            conditions = []
            new_params = {}

            for param in params:
                if eq in param:
                    key,value = param.split(eq, 1)
                    if not key in query_param_keys:
                        if key.endswith(nt):
                            if value != null:
                                conditions.append({'name':key[:-1], \
                                        'op':'!=', 'value': value})
                            else:
                                conditions.append({'name':key[:-1], \
                                        'op':'is not null', 'value': ''})

                        elif key.endswith(gt):
                            conditions.append({'name':key[:-1], \
                                    'op':'>=', 'value': value})

                        elif key.endswith(lt):
                            conditions.append({'name':key[:-1], \
                                    'op':'<=', 'value': value})

                        elif key.endswith('%s%s' % (nt, qs)):
                            conditions.append({'name':key[:-2], \
                                    'op':'not in', 'value': value})

                        elif key.endswith(qs):
                            conditions.append({'name':key[:-1], \
                                    'op':'in', 'value': value})

                        elif key.endswith('%s%s' % (nt, lk)):
                            #will help to add pattern %, if user not input
                            if not perc in value and not underscore in value:
                                value = '%s%s%s' % (perc, value, perc)
                            conditions.append({'name':key[:-2], \
                                    'op':'not like', 'value': value})

                        elif key.endswith(lk):
                            #will help to add pattern %, if user not input
                            if not perc in value and not underscore in value:
                                value = '%s%s%s' % (perc, value, perc)
                            conditions.append({'name':key[:-1], \
                                    'op':'like', 'value': value})

                        else:
                            if value != null:
                                conditions.append({'name':key, \
                                        'op':eq, 'value': value})
                            else:
                                conditions.append({'name':key, \
                                        'op':'is null', 'value': ''})

                    elif key == 'conditions':
                        conditions.extend(eval(value))

                    elif key == 'fields':
                        #remove the last ','
                        if value.endswith(','):
                            value = value[:-1]
                        new_params[key] = value.split(',')

                    else:
                        if is_api_param_a_list(apiname, key):
                            new_params[key] = value.split(',')
                        else:
                            new_params[key] = value

                elif gt in param:
                    key,value = param.split(gt, 1)
                    conditions.append({'name':key, \
                            'op':gt, 'value': value})

                elif lt in param:
                    key,value = param.split(lt, 1)
                    conditions.append({'name':key, \
                            'op':lt, 'value': value})

            new_params['conditions'] = conditions
            return new_params

        def create_msg(apiname, params):
            creator = self.msg_creator.get(apiname)
            if creator:
                return creator(apiname, params)

            if apiname.startswith('APIQuery')  and not apiname in NOT_QUERY_MYSQL_APIS:
                params = generate_query_params(apiname, params)

            msg = eval('inventory.%s()' % apiname)
            for key in params.keys():
                value = params[key]
                setattr(msg, key, value)
            return msg

        def set_session_to_api(msg):
            session = inventory.Session()
            session.uuid = self.session_uuid
            msg.session = session


        (apiname, all_params) = build_params()
        if apiname in self.cli_cmd:
            #self.write_more(apiname, None)
            self.cli_cmd_func[apiname](all_params)
            return

        if not check_session(apiname):
            raise CliError("No session uuid defined")

        msg = create_msg(apiname, all_params)
        set_session_to_api(msg)
        try:
            if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME, self.CREATE_ACCOUNT_NAME, self.CREATE_USER_NAME]:
                if not msg.password:
                    raise CliError('"password" must be specified')
                msg.password = hashlib.sha512(msg.password).hexdigest()

            if apiname in [self.USER_RESET_PASSWORD_NAME, self.ACCOUNT_RESET_PASSWORD_NAME]:
                if msg.password:
                    msg.password = hashlib.sha512(msg.password).hexdigest()

            if apiname == self.LOGOUT_MESSAGE_NAME:
                if not msg.sessionUuid:
                    setattr(msg, 'sessionUuid', self.session_uuid)

            start_time = time.time()
            (name, event) = self.api.async_call_wait_for_complete(msg, fail_soon=True)
            end_time = time.time()

            if apiname in [self.LOGIN_MESSAGE_NAME, self.LOGIN_BY_USER_NAME, self.LOGIN_BY_LDAP_MESSAGE_NAME]:
                self.session_uuid = event.inventory.uuid
                open(SESSION_FILE, 'w').write(self.session_uuid)

            result = jsonobject.dumps(event, True)
            print '%s\n' % result
            #print 'Time costing: %fs' % (end_time - start_time)
            self.write_more(line, result)
        except urllib3.exceptions.MaxRetryError as urlerr:
            self.print_error('Is %s reachable? Please make sure the management node is running.' % self.api.api_url)
            self.print_error(str(urlerr))
            raise ("Server: %s is not reachable" % self.hostname)
        except Exception as e:
            self.print_error(str(e))
            self.write_more(line, str(e), False)
            raise e
Exemple #39
0
    def get_usb_devices(self, req):
        class UsbDeviceInfo(object):
            def __init__(self):
                self.busNum = ""
                self.devNum = ""
                self.idVendor = ""
                self.idProduct = ""
                self.iManufacturer = ""
                self.iProduct = ""
                self.iSerial = ""
                self.usbVersion = ""

            def toString(self):
                return self.busNum + ':' + self.devNum + ':' + self.idVendor + ':' + self.idProduct + ':' + self.iManufacturer + ':' + self.iProduct + ':' + self.iSerial + ':' + self.usbVersion + ";"

        # use 'lsusb.py -U' to get device ID, like '0751:9842'
        rsp = GetUsbDevicesRsp()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        r, o, e = bash_roe("lsusb.py -U")
        if r != 0:
            rsp.success = False
            rsp.error = "%s %s" % (e, o)
            return jsonobject.dumps(rsp)

        idSet = set()
        usbDevicesInfo = ''
        for line in o.split('\n'):
            line = line.split()
            if len(line) < 2:
                continue
            idSet.add(line[1])

        for devId in idSet:
            # use 'lsusb -v -d ID' to get device info[s]
            r, o, e = bash_roe("lsusb -v -d %s" % devId)
            if r != 0:
                rsp.success = False
                rsp.error = "%s %s" % (e, o)
                return jsonobject.dumps(rsp)

            for line in o.split('\n'):
                line = line.strip().split()
                if len(line) < 2:
                    continue

                if line[0] == 'Bus':
                    info = UsbDeviceInfo()
                    info.idVendor, info.idProduct = devId.split(':')
                    info.busNum = line[1]
                    info.devNum = line[3].rsplit(':')[0]
                elif line[0] == 'idVendor':
                    info.iManufacturer = ' '.join(
                        line[2:]) if len(line) > 2 else ""
                elif line[0] == 'idProduct':
                    info.iProduct = ' '.join(line[2:]) if len(line) > 2 else ""
                elif line[0] == 'bcdUSB':
                    info.usbVersion = line[1]
                elif line[0] == 'iManufacturer' and len(line) > 2:
                    info.iManufacturer = ' '.join(line[2:])
                elif line[0] == 'iProduct' and len(line) > 2:
                    info.iProduct = ' '.join(line[2:])
                elif line[0] == 'iSerial':
                    info.iSerial = ' '.join(line[2:]) if len(line) > 2 else ""
                    if info.busNum == '' or info.devNum == '' or info.idVendor == '' or info.idProduct == '':
                        rsp.success = False
                        rsp.error = "cannot get enough info of usb device"
                        return jsonobject.dumps(rsp)
                    else:
                        usbDevicesInfo += info.toString()
        rsp.usbDevicesInfo = usbDevicesInfo
        return jsonobject.dumps(rsp)
    def setup_self_fencer(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        @thread.AsyncThread
        def heartbeat_file_fencer(mount_path, ps_uuid, mounted_by_zstack):
            def try_remount_fs():
                if mount_path_is_nfs(mount_path):
                    shell.run("systemctl start nfs-client.target")

                while self.run_filesystem_fencer(ps_uuid, created_time):
                    if linux.is_mounted(
                            path=mount_path) and touch_heartbeat_file():
                        self.report_storage_status([ps_uuid], 'Connected')
                        logger.debug(
                            "fs[uuid:%s] is reachable again, report to management"
                            % ps_uuid)
                        break
                    try:
                        logger.debug(
                            'fs[uuid:%s] is unreachable, it will be remounted after 180s'
                            % ps_uuid)
                        time.sleep(180)
                        if not self.run_filesystem_fencer(
                                ps_uuid, created_time):
                            break
                        linux.remount(url, mount_path, options)
                        self.report_storage_status([ps_uuid], 'Connected')
                        logger.debug(
                            "remount fs[uuid:%s] success, report to management"
                            % ps_uuid)
                        break
                    except:
                        logger.warn(
                            'remount fs[uuid:%s] fail, try again soon' %
                            ps_uuid)
                        kill_progresses_using_mount_path(mount_path)

                logger.debug('stop remount fs[uuid:%s]' % ps_uuid)

            def after_kill_vm():
                if not killed_vm_pids or not mounted_by_zstack:
                    return

                try:
                    kill_and_umount(mount_path, mount_path_is_nfs(mount_path))
                except UmountException:
                    if shell.run('ps -p %s' % ' '.join(killed_vm_pids)) == 0:
                        virsh_list = shell.call(
                            "timeout 10 virsh list --all || echo 'cannot obtain virsh list'"
                        )
                        logger.debug("virsh_list:\n" + virsh_list)
                        logger.error(
                            'kill vm[pids:%s] failed because of unavailable fs[mountPath:%s].'
                            ' please retry "umount -f %s"' %
                            (killed_vm_pids, mount_path, mount_path))
                        return

                try_remount_fs()

            def touch_heartbeat_file():
                touch = shell.ShellCmd(
                    'timeout %s touch %s' %
                    (cmd.storageCheckerTimeout, heartbeat_file_path))
                touch(False)
                if touch.return_code != 0:
                    logger.warn(
                        'unable to touch %s, %s %s' %
                        (heartbeat_file_path, touch.stderr, touch.stdout))
                return touch.return_code == 0

            heartbeat_file_path = os.path.join(
                mount_path, 'heartbeat-file-kvm-host-%s.hb' % cmd.hostUuid)
            created_time = time.time()
            with self.fencer_lock:
                self.run_filesystem_fencer_timestamp[ps_uuid] = created_time
            try:
                failure = 0
                url = shell.call("mount | grep -e '%s' | awk '{print $1}'" %
                                 mount_path).strip()
                options = shell.call(
                    "mount | grep -e '%s' | awk -F '[()]' '{print $2}'" %
                    mount_path).strip()

                while self.run_filesystem_fencer(ps_uuid, created_time):
                    time.sleep(cmd.interval)
                    if touch_heartbeat_file():
                        failure = 0
                        continue

                    failure += 1
                    if failure == cmd.maxAttempts:
                        logger.warn(
                            'failed to touch the heartbeat file[%s] %s times, we lost the connection to the storage,'
                            'shutdown ourselves' %
                            (heartbeat_file_path, cmd.maxAttempts))
                        self.report_storage_status([ps_uuid], 'Disconnected')
                        killed_vm_pids = kill_vm(cmd.maxAttempts, [mount_path],
                                                 True)
                        after_kill_vm()

                logger.debug('stop heartbeat[%s] for filesystem self-fencer' %
                             heartbeat_file_path)

            except:
                content = traceback.format_exc()
                logger.warn(content)

        for mount_path, uuid, mounted_by_zstack in zip(cmd.mountPaths,
                                                       cmd.uuids,
                                                       cmd.mountedByZStack):
            if not linux.timeout_isdir(mount_path):
                raise Exception('the mount path[%s] is not a directory' %
                                mount_path)

            heartbeat_file_fencer(mount_path, uuid, mounted_by_zstack)

        return jsonobject.dumps(AgentRsp())
Exemple #41
0
    def list(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = ListResponse()

        rsp.paths = kvmagent.listPath(cmd.path)
        return jsonobject.dumps(rsp)
 def cancel_filesystem_self_fencer(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     with self.fencer_lock:
         for ps_uuid in cmd.psUuids:
             self.run_filesystem_fencer_timestamp.pop(ps_uuid, None)
     return jsonobject.dumps(AgentRsp())
 def cancel_ceph_self_fencer(self, req):
     self.run_ceph_fencer = False
     return jsonobject.dumps(AgentRsp())
 def write_image_metadata(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     meta_data = cmd.metaData
     self._write_image_metadata(meta_data.installPath, meta_data)
     rsp = WriteImageMetaDataResponse()
     return jsonobject.dumps(rsp)
Exemple #45
0
 def callback(self, req):
     rsp = jsonobject.loads(req[http.REQUEST_BODY])
     print jsonobject.dumps(rsp)
    def download_image(self, req):
        #TODO: report percentage to mgmt server
        def percentage_callback(percent, url):
            logger.debug('Downloading %s ... %s%%' % (url, percent))
                
        def use_wget(url, name, workdir, timeout):
            return linux.wget(url, workdir=workdir, rename=name, timeout=timeout, interval=2, callback=percentage_callback, callback_data=url)
        
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DownloadResponse()
        supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE]
        if cmd.urlScheme not in supported_schemes:
            rsp.success = False
            rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % (cmd.urlScheme, supported_schemes)
            return jsonobject.dumps(rsp)
        
        path = os.path.dirname(cmd.installPath)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        image_name = os.path.basename(cmd.installPath)
        install_path = cmd.installPath
        
        timeout = cmd.timeout if cmd.timeout else 7200
        if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]:
            try:
                ret = use_wget(cmd.url, image_name, path, timeout)
                if ret != 0:
                    rsp.success = False
                    rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % (image_name, cmd.url, ret)
                    return jsonobject.dumps(rsp)
            except linux.LinuxError as e:
                traceback.format_exc()
                rsp.success = False
                rsp.error = str(e)
                return jsonobject.dumps(rsp)
        elif cmd.urlScheme == self.URL_FILE:
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)

            shell.call('yes | cp %s %s' % (src_path, install_path))



        os.chmod(cmd.installPath, stat.S_IRUSR + stat.S_IRGRP + stat.S_IROTH)

        size = os.path.getsize(install_path)
        image_format =  bash_o("qemu-img info %s | grep -w '^file format' | awk '{print $3}'" % install_path).strip('\n')
        if "raw" in image_format:
            if "ISO" in bash_o("file %s" % install_path):
                image_format = "iso"
        md5sum = 'not calculated'
        logger.debug('successfully downloaded %s to %s' % (cmd.url, install_path))
        (total, avail) = self.get_capacity()
        rsp.md5Sum = md5sum
        rsp.actualSize = size
        rsp.size = linux.qcow2_virtualsize(install_path)
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        rsp.format = image_format
        return jsonobject.dumps(rsp)
 def get_capacity(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = GetCapacityResponse()
     self._set_capacity_to_response(cmd.uuid, rsp)
     return jsonobject.dumps(rsp)
 def get_volume_size(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = GetVolumeSizeRsp()
     rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(
         cmd.installPath)
     return jsonobject.dumps(rsp)
Exemple #49
0
 def delete_eip(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     self._delete_eips([cmd.eip])
     return jsonobject.dumps(AgentRsp())
 def get_volume_base_image_path(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = GetVolumeBaseImagePathRsp()
     rsp.path = linux.get_qcow2_base_image_path_recusively(cmd.installPath)
     return jsonobject.dumps(rsp)
Exemple #51
0
    def download(self, req):
        rsp = DownloadRsp()

        def _get_origin_format(path):
            qcow2_length = 0x9007
            if path.startswith('http://') or path.startswith(
                    'https://') or path.startswith('ftp://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            elif path.startswith('sftp://'):
                fd, tmp_file = tempfile.mkstemp()
                get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % (
                    qcow2_length, pipe_path, tmp_file)
                clean_cmd = "pkill -f %s" % pipe_path
                shell.run(
                    '%s & %s && %s' %
                    (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd))
                qhdr = os.read(fd, qcow2_length)
                if os.path.exists(tmp_file):
                    os.remove(tmp_file)
            else:
                resp = open(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            if len(qhdr) < qcow2_length:
                return "raw"
            if qhdr[:4] == 'QFI\xfb':
                if qhdr[16:20] == '\x00\x00\x00\00':
                    return "qcow2"
                else:
                    return "derivedQcow2"

            if qhdr[0x8001:0x8006] == 'CD001':
                return 'iso'

            if qhdr[0x8801:0x8806] == 'CD001':
                return 'iso'

            if qhdr[0x9001:0x9006] == 'CD001':
                return 'iso'
            return "raw"

        def get_origin_format(fpath, fail_if_has_backing_file=True):
            image_format = _get_origin_format(fpath)
            if image_format == "derivedQcow2" and fail_if_has_backing_file:
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)
            return image_format

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        url = urlparse.urlparse(cmd.url)
        if url.scheme in ('http', 'https', 'ftp'):
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            _, PFILE = tempfile.mkstemp()
            content_length = shell.call('curl -sI %s|grep Content-Length' %
                                        cmd.url).strip().split()[1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug(
                    "getProgress in ceph-bs-agent, synced: %s, total: %s" %
                    (synced, total))
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or len(last.split()) < 1:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif url.scheme == 'sftp':
            port = (url.port, 22)[url.port is None]
            _, PFILE = tempfile.mkstemp()
            pipe_path = PFILE + "fifo"
            scp_to_pipe_cmd = "scp -P %d -o StrictHostKeyChecking=no %s@%s:%s %s" % (
                port, url.username, url.hostname, url.path, pipe_path)
            sftp_command = "sftp -o StrictHostKeyChecking=no -o BatchMode=no -P %s -b /dev/stdin %s@%s" % (
                port, url.username, url.hostname) + " <<EOF\n%s\nEOF\n"
            if url.password is not None:
                scp_to_pipe_cmd = 'sshpass -p "%s" %s' % (url.password,
                                                          scp_to_pipe_cmd)
                sftp_command = 'sshpass -p "%s" %s' % (url.password,
                                                       sftp_command)

            actual_size = shell.call(
                sftp_command %
                ("ls -l " + url.path)).splitlines()[1].strip().split()[4]
            os.mkfifo(pipe_path)
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            def _get_progress(synced):
                logger.debug("getProgress in add image")
                if not os.path.exists(PFILE):
                    return synced
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or not last.isdigit():
                    return synced
                report.progress_report(int(last) * 90 / 100, "report")
                return synced

            get_content_from_pipe_cmd = "pv -s %s -n %s 2>%s" % (
                actual_size, pipe_path, PFILE)
            import_from_pipe_cmd = "rbd import --image-format 2 - %s/%s" % (
                pool, tmp_image_name)
            _, _, err = bash_progress_1(
                'set -o pipefail; %s & %s | %s' %
                (scp_to_pipe_cmd, get_content_from_pipe_cmd,
                 import_from_pipe_cmd), _get_progress)

            if os.path.exists(PFILE):
                os.remove(PFILE)

            if os.path.exists(pipe_path):
                os.remove(pipe_path)

            if err:
                raise err

        elif url.scheme == 'file':
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            image_format = get_origin_format(src_path, True)
            # roll back tmp ceph file after import it
            _1()

            shell.check_run("rbd import --image-format 2 %s %s/%s" %
                            (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.check_run(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.check_run('rbd mv %s/%s %s/%s' %
                            (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.check_run('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        if image_format == "qcow2":
            rsp.format = "raw"
        else:
            rsp.format = image_format

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    def copy_bits_to_remote(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        chain = sum([linux.qcow2_get_file_chain(p) for p in cmd.paths], [])
        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report()
        report.processType = "LocalStorageMigrateVolume"
        report.resourceUuid = cmd.uuid
        PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()

        total = 0
        for path in set(chain):
            total = total + os.path.getsize(path)

        written = 0

        def _get_progress(synced):
            logger.debug(
                "getProgress in localstorage-agent, synced: %s, total: %s" %
                (synced, total))
            if not os.path.exists(PFILE):
                return synced
            fpread = open(PFILE, 'r')
            lines = fpread.readlines()
            if not lines:
                fpread.close()
                return synced
            last = str(lines[-1]).strip().split('\r')[-1]
            if not last or len(last.split()) < 1:
                fpread.close()
                return synced
            line = last.split()[0]
            if not line.isdigit():
                return synced
            if total > 0:
                synced = long(line)
                if synced < total:
                    percent = int(
                        round(
                            float(written + synced) / float(total) * 80 + 10))
                    report.progress_report(percent, "report")
                    synced = written
            fpread.close()
            return synced

        err = None
        for path in set(chain):
            PATH = path
            PASSWORD = cmd.dstPassword
            USER = cmd.dstUsername
            IP = cmd.dstIp
            PORT = (cmd.dstPort and cmd.dstPort or "22")

            DIR = os.path.dirname(path)
            bash_errorout(
                '/usr/bin/sshpass -p {{PASSWORD}} ssh -o StrictHostKeyChecking=no -t -t -p {{PORT}} {{USER}}@{{IP}} "sudo mkdir -p {{DIR}}"'
            )
            _, _, err = bash_progress_1(
                'rsync -av --progress --relative {{PATH}} --rsh="/usr/bin/sshpass -p {{PASSWORD}} ssh -o StrictHostKeyChecking=no -p {{PORT}} -l {{USER}}" {{IP}}:/ 1>{{PFILE}}',
                _get_progress)
            if err:
                raise err
            written += os.path.getsize(path)
            bash_errorout(
                '/usr/bin/sshpass -p {{PASSWORD}} ssh -o StrictHostKeyChecking=no -p {{PORT}} {{USER}}@{{IP}} "/bin/sync {{PATH}}"'
            )
            percent = int(round(float(written) / float(total) * 80 + 10))
            report.progress_report(percent, "report")

        if os.path.exists(PFILE):
            os.remove(PFILE)
        rsp = AgentResponse()
        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
        return jsonobject.dumps(rsp)
 def check_bits(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = CheckBitsRsp()
     rsp.existing = os.path.exists(cmd.path)
     return jsonobject.dumps(rsp)
 def get_physical_capacity(self, req):
     rsp = AgentResponse()
     rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity()
     return jsonobject.dumps(rsp)
 def delete_bootstrap_iso(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     shell.ShellCmd('rm -f %s' % cmd.isoPath)()
     return jsonobject.dumps(DeleteVirtualRouterBootstrapIsoRsp())
 def rebase_root_volume_to_backing_file(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     linux.qcow2_rebase_no_check(cmd.backingFilePath, cmd.rootVolumePath)
     return jsonobject.dumps(AgentResponse())
class FusionstorAgent(object):

    INIT_PATH = "/fusionstor/primarystorage/init"
    CREATE_VOLUME_PATH = "/fusionstor/primarystorage/volume/createempty"
    DELETE_PATH = "/fusionstor/primarystorage/delete"
    CLONE_PATH = "/fusionstor/primarystorage/volume/clone"
    FLATTEN_PATH = "/fusionstor/primarystorage/volume/flatten"
    SFTP_DOWNLOAD_PATH = "/fusionstor/primarystorage/sftpbackupstorage/download"
    SFTP_UPLOAD_PATH = "/fusionstor/primarystorage/sftpbackupstorage/upload"
    ECHO_PATH = "/fusionstor/primarystorage/echo"
    CREATE_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/create"
    DELETE_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/delete"
    PROTECT_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/protect"
    ROLLBACK_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/rollback"
    UNPROTECT_SNAPSHOT_PATH = "/fusionstor/primarystorage/snapshot/unprotect"
    CP_PATH = "/fusionstor/primarystorage/volume/cp"
    DELETE_POOL_PATH = "/fusionstor/primarystorage/deletepool"
    GET_VOLUME_SIZE_PATH = "/fusionstor/primarystorage/getvolumesize"
    PING_PATH = "/fusionstor/primarystorage/ping"
    GET_FACTS = "/fusionstor/primarystorage/facts"

    http_server = http.HttpServer(port=7764)
    http_server.logfile_path = log.get_logfile_path()

    def __init__(self):
        self.http_server.register_async_uri(self.INIT_PATH, self.init)
        self.http_server.register_async_uri(self.DELETE_PATH, self.delete)
        self.http_server.register_async_uri(self.CREATE_VOLUME_PATH, self.create)
        self.http_server.register_async_uri(self.CLONE_PATH, self.clone)
        self.http_server.register_async_uri(self.CREATE_SNAPSHOT_PATH, self.create_snapshot)
        self.http_server.register_async_uri(self.DELETE_SNAPSHOT_PATH, self.delete_snapshot)
        self.http_server.register_async_uri(self.PROTECT_SNAPSHOT_PATH, self.protect_snapshot)
        self.http_server.register_async_uri(self.UNPROTECT_SNAPSHOT_PATH, self.unprotect_snapshot)
        self.http_server.register_async_uri(self.ROLLBACK_SNAPSHOT_PATH, self.rollback_snapshot)
        self.http_server.register_async_uri(self.FLATTEN_PATH, self.flatten)
        self.http_server.register_async_uri(self.SFTP_DOWNLOAD_PATH, self.sftp_download)
        self.http_server.register_async_uri(self.SFTP_UPLOAD_PATH, self.sftp_upload)
        self.http_server.register_async_uri(self.CP_PATH, self.cp)
        self.http_server.register_async_uri(self.DELETE_POOL_PATH, self.delete_pool)
        self.http_server.register_async_uri(self.GET_VOLUME_SIZE_PATH, self.get_volume_size)
        self.http_server.register_async_uri(self.PING_PATH, self.ping)
        self.http_server.register_async_uri(self.GET_FACTS, self.get_facts)
        self.http_server.register_sync_uri(self.ECHO_PATH, self.echo)

    def _set_capacity_to_response(self, rsp):
        total, used = lichbd.lichbd_get_capacity()

        rsp.totalCapacity = total
        rsp.availableCapacity = total - used

    def _get_file_size(self, path):
        return lichbd.lichbd_file_size(path)

    def _get_file_actual_size(self, path):
        return lichbd.lichbd_file_actual_size(path)

    @replyerror
    def get_volume_size(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.installPath)
        rsp = GetVolumeSizeRsp()
        rsp.size = self._get_file_size(path)
        rsp.actualSize = self._get_file_actual_size(path)
        return jsonobject.dumps(rsp)

    @replyerror
    def get_facts(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        rsp = GetFactsRsp()
        rsp.fsid = lichbd.lichbd_get_fsid()
        return jsonobject.dumps(rsp)

    @replyerror
    def ping(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = PingRsp()

        if cmd.testImagePath:
            pool = cmd.testImagePath.split('/')[0]
            testImagePath = '%s/this-is-a-test-image-with-long-name' % pool
            shellcmd = lichbd.lichbd_file_info(testImagePath)
            if shellcmd.return_code == errno.ENOENT:
                try:
                    lichbd.lichbd_create_raw(testImagePath, '1b')
                except Exception, e:
                    rsp.success = False
                    rsp.operationFailure = True
                    rsp.error = str(e)
                    logger.debug("%s" % rsp.error)
            elif shellcmd.return_code == 0:
                pass
            else:
                rsp.success = False
                rsp.operationFailure = True
                rsp.error = "%s %s" % (shellcmd.cmd, shellcmd.stderr)
                logger.debug("%s: %s" % (shellcmd.cmd, shellcmd.stderr))

        return jsonobject.dumps(rsp)
Exemple #58
0
 def get_image_size(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     rsp = GetImageSizeRsp()
     path = self._normalize_install_path(cmd.installPath)
     rsp.size = self._get_file_size(path)
     return jsonobject.dumps(rsp)
    def setup_ceph_self_fencer(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        def check_tools():
            ceph = shell.run('which ceph')
            rbd = shell.run('which rbd')

            if ceph == 0 and rbd == 0:
                return True

            return False

        if not check_tools():
            rsp = AgentRsp()
            rsp.error = "no ceph or rbd on current host, please install the tools first"
            rsp.success = False
            return jsonobject.dumps(rsp)

        mon_url = '\;'.join(cmd.monUrls)
        mon_url = mon_url.replace(':', '\\\:')

        self.run_ceph_fencer = True

        def get_ceph_rbd_args():
            if cmd.userKey is None:
                return 'rbd:%s:mon_host=%s' % (cmd.heartbeatImagePath, mon_url)
            return 'rbd:%s:id=zstack:key=%s:auth_supported=cephx\;none:mon_host=%s' % (
                cmd.heartbeatImagePath, cmd.userKey, mon_url)

        def ceph_in_error_stat():
            # HEALTH_OK,HEALTH_WARN,HEALTH_ERR and others(may be empty)...
            health = shell.ShellCmd('timeout %s ceph health' %
                                    cmd.storageCheckerTimeout)
            health(False)
            # If the command times out, then exit with status 124
            if health.return_code == 124:
                return True

            health_status = health.stdout
            return not (health_status.startswith('HEALTH_OK')
                        or health_status.startswith('HEALTH_WARN'))

        def heartbeat_file_exists():
            touch = shell.ShellCmd(
                'timeout %s qemu-img info %s' %
                (cmd.storageCheckerTimeout, get_ceph_rbd_args()))
            touch(False)

            if touch.return_code == 0:
                return True

            logger.warn('cannot query heartbeat image: %s: %s' %
                        (cmd.heartbeatImagePath, touch.stderr))
            return False

        def create_heartbeat_file():
            create = shell.ShellCmd(
                'timeout %s qemu-img create -f raw %s 1' %
                (cmd.storageCheckerTimeout, get_ceph_rbd_args()))
            create(False)

            if create.return_code == 0 or "File exists" in create.stderr:
                return True

            logger.warn('cannot create heartbeat image: %s: %s' %
                        (cmd.heartbeatImagePath, create.stderr))
            return False

        def delete_heartbeat_file():
            shell.run(
                "timeout %s rbd rm --id zstack %s -m %s" %
                (cmd.storageCheckerTimeout, cmd.heartbeatImagePath, mon_url))

        @thread.AsyncThread
        def heartbeat_on_ceph():
            try:
                failure = 0

                while self.run_ceph_fencer:
                    time.sleep(cmd.interval)

                    if heartbeat_file_exists() or create_heartbeat_file():
                        failure = 0
                        continue

                    failure += 1
                    if failure == cmd.maxAttempts:
                        # c.f. We discovered that, Ceph could behave the following:
                        #  1. Create heart-beat file, failed with 'File exists'
                        #  2. Query the hb file in step 1, and failed again with 'No such file or directory'
                        if ceph_in_error_stat():
                            path = (os.path.split(cmd.heartbeatImagePath))[0]
                            kill_vm(cmd.maxAttempts, [path], False)
                        else:
                            delete_heartbeat_file()

                        # reset the failure count
                        failure = 0

                logger.debug('stop self-fencer on ceph primary storage')
            except:
                logger.debug(
                    'self-fencer on ceph primary storage stopped abnormally')
                content = traceback.format_exc()
                logger.warn(content)

        heartbeat_on_ceph()

        return jsonobject.dumps(AgentRsp())
Exemple #60
0
 def delete_target(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     self._delete_target(cmd.target, cmd.uuid)
     logger.debug('deleted iscsi target[%s]' % cmd.target)
     rsp = AgentCapacityResponse()
     return jsonobject.dumps(rsp)