コード例 #1
0
ファイル: mevoco.py プロジェクト: weizai118/zstack-utility
    def batch_apply_userdata(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        if cmd.rebuild:
            # kill all lighttped processes which will be restarted later
            shell.call('pkill -9 lighttpd || true')

        namespaces = {}
        for u in cmd.userdata:
            if u.namespaceName not in namespaces:
                namespaces[u.namespaceName] = u
            else:
                if namespaces[u.namespaceName].dhcpServerIp != u.dhcpServerIp:
                    raise Exception('same namespace [%s] but has different dhcpServerIp: %s, %s ' % (
                        u.namespaceName, namespaces[u.namespaceName].dhcpServerIp, u.dhcpServerIp))
                if namespaces[u.namespaceName].bridgeName != u.bridgeName:
                    raise Exception('same namespace [%s] but has different dhcpServerIp: %s, %s ' % (
                    u.namespaceName, namespaces[u.namespaceName].bridgeName, u.bridgeName))
                if namespaces[u.namespaceName].port != u.port:
                    raise Exception('same namespace [%s] but has different dhcpServerIp: %s, %s ' % (
                    u.namespaceName, namespaces[u.namespaceName].port, u.port))

        for n in namespaces.values():
            self._apply_userdata_xtables(n)

        for u in cmd.userdata:
            self._apply_userdata_vmdata(u)

        for n in namespaces.values():
            self._apply_userdata_restart_httpd(n)

        return jsonobject.dumps(kvmagent.AgentResponse())
コード例 #2
0
    def cleanup_userdata(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        BR_NAME = cmd.bridgeName
        CHAIN_NAME = "USERDATA-%s" % BR_NAME

        o = bash_o("ebtables-save | grep {{CHAIN_NAME}} | grep -- -A")
        o = o.strip(" \t\r\n")
        if o:
            cmds = []
            for l in o.split("\n"):
                # we don't distinguish if the rule is in filter table or nat table
                # but try both. The wrong table will silently fail
                cmds.append(EBTABLES_CMD +
                            " -t filter %s" % l.replace("-A", "-D"))
                cmds.append(EBTABLES_CMD +
                            " -t nat %s" % l.replace("-A", "-D"))

            bash_r("\n".join(cmds))

        bash_errorout(
            "ps aux | grep lighttpd | grep {{BR_NAME}} | grep -w userdata | awk '{print $2}' | xargs -r kill -9"
        )

        return jsonobject.dumps(kvmagent.AgentResponse())
コード例 #3
0
    def commit_to_imagestore(self, cmd, req):
        fpath = cmd.primaryStorageInstallPath

        # Synchronize cached writes for 'fpath'
        shell.call('/bin/sync ' + fpath)

        # Add the image to registry
        cmdstr = '%s -json  -callbackurl %s -taskid %s -imageUuid %s add -desc \'%s\' -file %s' % (self.ZSTORE_CLI_PATH, req[http.REQUEST_HEADER].get(http.CALLBACK_URI),
                req[http.REQUEST_HEADER].get(http.TASK_UUID), cmd.imageUuid, cmd.description, fpath)

        logger.debug('adding %s to local image store' % fpath)
        shell.call(cmdstr.encode(encoding="utf-8"))
        logger.debug('%s added to local image store' % fpath)

        name, imageid = self._get_image_reference(fpath)

        rsp = kvmagent.AgentResponse()
        rsp.backupStorageInstallPath = self._build_install_path(name, imageid)
        rsp.size = linux.qcow2_size_and_actual_size(cmd.primaryStorageInstallPath)[0]

        # we need to sum all the disk size within the chain ...
        chain = linux.qcow2_get_file_chain(cmd.primaryStorageInstallPath)
        rsp.actualSize = sum([ linux.qcow2_size_and_actual_size(f)[1] for f in chain ])

        return jsonobject.dumps(rsp)
コード例 #4
0
    def upload_to_imagestore(self, cmd, req):
        crsp = self.commit_to_imagestore(cmd, req)

        extpara = ""
        taskid = req[http.REQUEST_HEADER].get(http.TASK_UUID)
        if cmd.threadContext:
            if cmd.threadContext['task-stage']:
                extpara += " -stage %s" % cmd.threadContext['task-stage']
            if cmd.threadContext.api:
                taskid = cmd.threadContext.api

        cmdstr = '%s -url %s:%s -callbackurl %s -taskid %s -imageUuid %s %s push %s' % (
            self.ZSTORE_CLI_PATH, cmd.hostname, self.ZSTORE_DEF_PORT,
            req[http.REQUEST_HEADER].get(http.CALLBACK_URI), taskid,
            cmd.imageUuid, extpara, cmd.primaryStorageInstallPath)
        logger.debug('pushing %s to image store' %
                     cmd.primaryStorageInstallPath)
        shell.call(cmdstr)
        logger.debug('%s pushed to image store' %
                     cmd.primaryStorageInstallPath)

        rsp = kvmagent.AgentResponse()
        rsp.backupStorageInstallPath = jsonobject.loads(
            crsp).backupStorageInstallPath
        return jsonobject.dumps(rsp)
コード例 #5
0
 def download_from_imagestore(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     self.imagestore_client.download_from_imagestore(
         self.mount_path.get(cmd.uuid), cmd.hostname,
         cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
     rsp = kvmagent.AgentResponse()
     self._set_capacity_to_response(cmd.uuid, rsp)
     return jsonobject.dumps(rsp)
コード例 #6
0
    def cancel_download_from_kvmhost(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = kvmagent.AgentResponse()

        install_abs_path = cmd.primaryStorageInstallPath
        shell.run("pkill -9 -f '%s'" % install_abs_path)

        linux.rm_file_force(cmd.primaryStorageInstallPath)
        return jsonobject.dumps(rsp)
コード例 #7
0
 def download_from_imagestore(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     mount_path = self.mount_path.get(cmd.uuid)
     self.check_nfs_mounted(mount_path)
     cachedir = None if cmd.isData else mount_path
     self.imagestore_client.download_from_imagestore(cachedir, cmd.hostname, cmd.backupStorageInstallPath, cmd.primaryStorageInstallPath)
     rsp = kvmagent.AgentResponse()
     self._set_capacity_to_response(cmd.uuid, rsp)
     return jsonobject.dumps(rsp)
コード例 #8
0
ファイル: mevoco.py プロジェクト: zenosslk/zstack-utility
    def batch_apply_userdata(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        if cmd.rebuild:
            # kill all lighttped processes which will be restarted later
            shell.call('pkill -9 lighttpd || true')

        for u in cmd.userdata:
            self._apply_userdata(u)
        return jsonobject.dumps(kvmagent.AgentResponse())
コード例 #9
0
    def add_interface_to_bridge(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = kvmagent.AgentResponse()
        oldbr = shell.call("""brctl show | awk '$4 == "%s" {print $1}'""" % cmd.physicalInterfaceName).strip()
        if oldbr == cmd.bridgeName:
            return jsonobject.dumps(rsp)

        if oldbr:
            shell.run("brctl delif %s %s" % (oldbr, cmd.physicalInterfaceName))
        shell.check_run("brctl addif %s %s" % (cmd.bridgeName, cmd.physicalInterfaceName))
        return jsonobject.dumps(rsp)
コード例 #10
0
    def detachvolume(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        logger.debug('detach volume %s' % cmd.volumeId)
        rsp = kvmagent.AgentResponse()
        s = shell.ShellCmd("/opt/tdc/tdc_admin destroy-vrbd --device_id=%s" %
                           cmd.volumeId)
        s(False)
        if s.return_code != 0:
            rsp.success = False
            rsp.error = "detach volume failed: %s" % s.stderr

        return jsonobject.dumps(rsp)
コード例 #11
0
    def fusionstor_query(self, req):
        protocol = lichbd.get_protocol()
        if protocol == 'lichbd':
            lichbd.makesure_qemu_img_with_lichbd()
        elif protocol == 'sheepdog' or protocol == 'nbd':
            pass
        else:
            raise shell.ShellError('Do not supprot protocols, only supprot lichbd, sheepdog and nbd')

        o = shell.call('lich.node --stat 2>/dev/null')
        if 'running' not in o:
            raise shell.ShellError('the lichd process of this node is not running, Please check the lichd service')

        return jsonobject.dumps(kvmagent.AgentResponse())
コード例 #12
0
ファイル: imagestore.py プロジェクト: tomzhang/zstack-utility
    def upload_to_imagestore(self, host, primaryStorageInstallPath):
        imf = self._get_image_json_file(primaryStorageInstallPath)
        if not os.path.isfile(imf):
            self.commit_to_imagestore(primaryStorageInstallPath)

        cmdstr = '%s -url %s:%s push %s' % (self.ZSTORE_CLI_PATH, host,
                                            self.ZSTORE_DEF_PORT,
                                            primaryStorageInstallPath)
        logger.debug('pushing %s to image store' % primaryStorageInstallPath)
        shell.call(cmdstr)
        logger.debug('%s pushed to image store' % primaryStorageInstallPath)

        rsp = kvmagent.AgentResponse()
        name, imageid = self._get_image_reference(primaryStorageInstallPath)
        rsp.backupStorageInstallPath = self._build_install_path(name, imageid)
        return jsonobject.dumps(rsp)
コード例 #13
0
    def upload_to_imagestore(self, cmd, req):
        imf = self._get_image_json_file(cmd.primaryStorageInstallPath)
        if not os.path.isfile(imf):
            self.commit_to_imagestore(cmd, req)

        cmdstr = '%s -url %s:%s -callbackurl %s -taskid %s -imageUuid %s push %s' % (
        self.ZSTORE_CLI_PATH, cmd.hostname, self.ZSTORE_DEF_PORT, req[http.REQUEST_HEADER].get(http.CALLBACK_URI),
                req[http.REQUEST_HEADER].get(http.TASK_UUID), cmd.imageUuid, cmd.primaryStorageInstallPath)
        logger.debug(cmdstr)
        logger.debug('pushing %s to image store' % cmd.primaryStorageInstallPath)
        shell.call(cmdstr)
        logger.debug('%s pushed to image store' % cmd.primaryStorageInstallPath)

        rsp = kvmagent.AgentResponse()
        name, imageid = self._get_image_reference(cmd.primaryStorageInstallPath)
        rsp.backupStorageInstallPath = self._build_install_path(name, imageid)
        return jsonobject.dumps(rsp)
コード例 #14
0
ファイル: imagestore.py プロジェクト: tomzhang/zstack-utility
    def commit_to_imagestore(self, primaryStorageInstallPath):
        fpath = primaryStorageInstallPath

        # Synchronize cached writes for 'fpath'
        shell.call('/bin/sync ' + fpath)

        # Add the image to registry
        cmdstr = '%s -json add -file %s' % (self.ZSTORE_CLI_PATH, fpath)

        logger.debug('adding %s to local image store' % fpath)
        shell.call(cmdstr)
        logger.debug('%s added to local image store' % fpath)

        name, imageid = self._get_image_reference(fpath)

        rsp = kvmagent.AgentResponse()
        rsp.backupStorageInstallPath = self._build_install_path(name, imageid)
        rsp.size, rsp.actualSize = linux.qcow2_size_and_actual_size(
            primaryStorageInstallPath)
        return jsonobject.dumps(rsp)
コード例 #15
0
    def commit_to_imagestore(self, cmd, req):
        fpath = cmd.primaryStorageInstallPath

        # Synchronize cached writes for 'fpath'
        linux.sync()

        # Add the image to registry
        cmdstr = '%s -json  -callbackurl %s -taskid %s -imageUuid %s add -desc \'%s\' -file %s' % (self.ZSTORE_CLI_PATH, req[http.REQUEST_HEADER].get(http.CALLBACK_URI),
                req[http.REQUEST_HEADER].get(http.TASK_UUID), cmd.imageUuid, cmd.description, fpath)
        logger.debug('adding %s to local image store' % fpath)
        output = shell.call(cmdstr.encode(encoding="utf-8"))
        logger.debug('%s added to local image store' % fpath)

        imf = jsonobject.loads(output.splitlines()[-1])

        rsp = kvmagent.AgentResponse()
        rsp.backupStorageInstallPath = self._build_install_path(imf.name, imf.id)
        rsp.size = imf.virtualsize
        rsp.actualSize = imf.size

        return jsonobject.dumps(rsp)
コード例 #16
0
    def installtdc(self, req):
        def overwriteConfig(config, cfile):
            c = open(cfile, 'w')
            c.write(config)
            c.close()

        def updateTdcConfig(config, cfile):
            '''
               1. read /opt/tdc/apsara_global_config.json if existed
               2. compare with config
               3. overwrite if it is different
            '''
            if not os.path.exists(cfile):
                d = os.path.dirname(cfile)
                if not os.path.exists(d):
                    os.makedirs(d, 0755)
                overwriteConfig(config, cfile)
                return True

            updated = False
            c = open(cfile)
            if config != c.read().strip():
                overwriteConfig(config, cfile)
                updated = True

            c.close()
            return updated

        logger.debug('install tdc pkg')
        rsp = kvmagent.AgentResponse()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        if cmd.version != tdcversion:
            rsp.error = "no matched tdc version found, agent need version %d" % tdcversion
        else:
            startCmd = shell.ShellCmd("/opt/tdc/tdc_admin lsi")
            startCmd(False)
            if startCmd.return_code != 0:
                linux.mkdir("/apsara", 0755)
                kernel_version = shell.call("uname -r")
                yum_cmd = "yum --enablerepo=zstack-mn,qemu-kvm-ev-mn clean metadata"
                shell.call(yum_cmd)
                e = shell.ShellCmd(
                    'rpm -qi kernel-%s-vrbd-1.0-0.1.release1.alios7.x86_64' %
                    kernel_version.strip())
                e(False)
                if e.return_code != 0:
                    yum_cmd = "yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install -y kernel-%s-vrbd-1.0-0.1.release1.alios7.x86_64" % kernel_version.strip(
                    )
                    shell.call(yum_cmd)
                e = shell.ShellCmd(
                    'rpm -qi tdc-unified-8.2.0.release.el5.x86_64')
                e(False)
                if e.return_code != 0:
                    yum_cmd = "yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install -y tdc-unified-8.2.0.release.el5.x86_64"
                    shell.call(yum_cmd)
                shell.call("service tdc restart")

                startCmd = shell.ShellCmd("/opt/tdc/tdc_admin lsi")
                startCmd(False)
                if startCmd.return_code != 0:
                    rsp.success = False
                    rsp.error = "tdc_admin lsi failed: %s" % startCmd.stderr
                    return jsonobject.dumps(rsp)

            if cmd.tdcConfig and cmd.nuwaConfig and cmd.nuwaCfg:
                tdc = updateTdcConfig(cmd.tdcConfig,
                                      '/opt/tdc/apsara_global_config.json')
                nuwa1 = updateTdcConfig(
                    cmd.nuwaConfig,
                    '/apsara/conf/conffiles/nuwa/client/nuwa_config.json')
                nuwa2 = updateTdcConfig(cmd.nuwaCfg, '/apsara/nuwa/nuwa.cfg')
                if tdc or nuwa1 or nuwa2:
                    shell.call("service tdc restart")

        return jsonobject.dumps(rsp)
コード例 #17
0
ファイル: mevoco.py プロジェクト: lstfyt/zstack-utility
 def batch_apply_userdata(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     for u in cmd.userdata:
         self._apply_userdata(u)
     return jsonobject.dumps(kvmagent.AgentResponse())
コード例 #18
0
 def convert_image_raw(self, cmd):
     destPath = cmd.srcPath.replace('.qcow2', '.raw')
     linux.qcow2_convert_to_raw(cmd.srcPath, destPath)
     rsp = kvmagent.AgentResponse()
     rsp.destPath = destPath
     return jsonobject.dumps(rsp)
コード例 #19
0
ファイル: prometheus.py プロジェクト: hctwgl/cmp-vstack1
    def start_collectd_exporter(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = kvmagent.AgentResponse()

        eths = bash_o("ls /sys/class/net").split()
        interfaces = []
        for eth in eths:
            eth = eth.strip(' \t\n\r')
            if eth == 'lo': continue
            elif eth.startswith('vnic'): continue
            elif eth.startswith('outer'): continue
            elif eth.startswith('br_'): continue
            elif not eth: continue
            else:
                interfaces.append(eth)

        conf_path = os.path.join(os.path.dirname(cmd.binaryPath),
                                 'collectd.conf')

        conf = '''Interval {{INTERVAL}}
FQDNLookup false

LoadPlugin syslog
LoadPlugin aggregation
LoadPlugin cpu
LoadPlugin disk
LoadPlugin interface
LoadPlugin memory
LoadPlugin network
LoadPlugin virt

<Plugin aggregation>
	<Aggregation>
		#Host "unspecified"
		Plugin "cpu"
		#PluginInstance "unspecified"
		Type "cpu"
		#TypeInstance "unspecified"

		GroupBy "Host"
		GroupBy "TypeInstance"

		CalculateNum false
		CalculateSum false
		CalculateAverage true
		CalculateMinimum false
		CalculateMaximum false
		CalculateStddev false
	</Aggregation>
</Plugin>

<Plugin cpu>
  ReportByCpu true
  ReportByState true
  ValuesPercentage true
</Plugin>

<Plugin disk>
  Disk "/^sd/"
  Disk "/^hd/"
  Disk "/^vd/"
  IgnoreSelected false
</Plugin>

<Plugin "interface">
{% for i in INTERFACES -%}
  Interface "{{i}}"
{% endfor -%}
  IgnoreSelected false
</Plugin>

<Plugin memory>
	ValuesAbsolute true
	ValuesPercentage false
</Plugin>

<Plugin virt>
	Connection "qemu:///system"
	RefreshInterval {{INTERVAL}}
	HostnameFormat name
</Plugin>

<Plugin network>
	Server "localhost" "25826"
</Plugin>

'''

        tmpt = Template(conf)
        conf = tmpt.render({
            'INTERVAL': cmd.interval,
            'INTERFACES': interfaces,
        })

        need_restart_collectd = False
        if os.path.exists(conf_path):
            with open(conf_path, 'r') as fd:
                old_conf = fd.read()

            if old_conf != conf:
                with open(conf_path, 'w') as fd:
                    fd.write(conf)
                need_restart_collectd = True
        else:
            with open(conf_path, 'w') as fd:
                fd.write(conf)
            need_restart_collectd = True

        pid = linux.find_process_by_cmdline(['collectd', conf_path])
        if not pid:
            bash_errorout('collectd -C %s' % conf_path)
        else:
            if need_restart_collectd:
                bash_errorout('kill -9 %s' % pid)
                bash_errorout('collectd -C %s' % conf_path)

        pid = linux.find_process_by_cmdline(['collectd_exporter'])
        if not pid:
            EXPORTER_PATH = cmd.binaryPath
            LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH),
                                    'collectd_exporter.log')
            bash_errorout('chmod +x {{EXPORTER_PATH}}')
            bash_errorout(
                "nohup {{EXPORTER_PATH}} -collectd.listen-address :25826 >{{LOG_FILE}} 2>&1 < /dev/null &\ndisown"
            )

        return jsonobject.dumps(rsp)
コード例 #20
0
ファイル: prometheus.py プロジェクト: tsunli/zstack-utility
    def start_prometheus_exporter(self, req):
        @in_bash
        def start_collectd(cmd):
            conf_path = os.path.join(os.path.dirname(cmd.binaryPath), 'collectd.conf')

            conf = '''Interval {{INTERVAL}}
# version {{VERSION}}
FQDNLookup false

LoadPlugin syslog
LoadPlugin aggregation
LoadPlugin cpu
LoadPlugin disk
LoadPlugin interface
LoadPlugin memory
LoadPlugin network
LoadPlugin virt

<Plugin aggregation>
	<Aggregation>
		#Host "unspecified"
		Plugin "cpu"
		#PluginInstance "unspecified"
		Type "cpu"
		#TypeInstance "unspecified"

		GroupBy "Host"
		GroupBy "TypeInstance"

		CalculateNum false
		CalculateSum false
		CalculateAverage true
		CalculateMinimum false
		CalculateMaximum false
		CalculateStddev false
	</Aggregation>
</Plugin>

<Plugin cpu>
  ReportByCpu true
  ReportByState true
  ValuesPercentage true
</Plugin>

<Plugin disk>
  Disk "/^sd[a-z]$/"
  Disk "/^hd[a-z]$/"
  Disk "/^vd[a-z]$/"
  IgnoreSelected false
</Plugin>

<Plugin "interface">
{% for i in INTERFACES -%}
  Interface "{{i}}"
{% endfor -%}
  IgnoreSelected false
</Plugin>

<Plugin memory>
	ValuesAbsolute true
	ValuesPercentage false
</Plugin>

<Plugin virt>
	Connection "qemu:///system"
	RefreshInterval {{INTERVAL}}
	HostnameFormat name
    PluginInstanceFormat name
    BlockDevice "/:hd[a-z]/"
    IgnoreSelected true
    ExtraStats "vcpu memory"
</Plugin>

<Plugin network>
	Server "localhost" "25826"
</Plugin>

'''

            tmpt = Template(conf)
            conf = tmpt.render({
                'INTERVAL': cmd.interval,
                'INTERFACES': interfaces,
                'VERSION': cmd.version,
            })

            need_restart_collectd = False
            if os.path.exists(conf_path):
                with open(conf_path, 'r') as fd:
                    old_conf = fd.read()

                if old_conf != conf:
                    with open(conf_path, 'w') as fd:
                        fd.write(conf)
                    need_restart_collectd = True
            else:
                with open(conf_path, 'w') as fd:
                    fd.write(conf)
                need_restart_collectd = True

            cpid = linux.find_process_by_command('collectd', [conf_path])
            mpid = linux.find_process_by_command('collectdmon', [conf_path])

            if not cpid:
                bash_errorout('collectdmon -- -C %s' % conf_path)
            else:
                bash_errorout('kill -TERM %s' % cpid)
                if need_restart_collectd:
                    if not mpid:
                        bash_errorout('collectdmon -- -C %s' % conf_path)
                    else:
                        bash_errorout('kill -HUP %s' % mpid)
                else:
                    if not mpid:
                        bash_errorout('collectdmon -- -C %s' % conf_path)

        def run_in_systemd(binPath, args, log):
            def get_systemd_name(path):
                if "collectd_exporter" in path:
                    return "collectd_exporter"
                elif "node_exporter" in path:
                    return "node_exporter"
                elif "pushgateway" in path:
                    return "pushgateway"

            def reload_and_restart_service(service_name):
                bash_errorout("systemctl daemon-reload && systemctl restart %s.service" % service_name)

            service_name = get_systemd_name(binPath)
            service_path = '/etc/systemd/system/%s.service' % service_name

            service_conf = '''
[Unit]
Description=prometheus %s
After=network.target

[Service]
ExecStart=/bin/sh -c '%s %s > %s 2>&1'
ExecStop=/bin/sh -c 'pkill -TERM -f %s'

Restart=always
RestartSec=30s
[Install]
WantedBy=multi-user.target
''' % (service_name, binPath, args, '/dev/null' if log.endswith('/pushgateway.log') else log, binPath)

            if not os.path.exists(service_path):
                linux.write_file(service_path, service_conf, True)
                os.chmod(service_path, 0644)
                reload_and_restart_service(service_name)
                return

            if linux.read_file(service_path) != service_conf:
                linux.write_file(service_path, service_conf, True)
                logger.info("%s.service conf changed" % service_name)

            os.chmod(service_path, 0644)
            # restart service regardless of conf changes, for ZSTAC-23539
            reload_and_restart_service(service_name)

        @lock.file_lock("/run/collectd-conf.lock", locker=lock.Flock())
        def start_collectd_exporter(cmd):
            start_collectd(cmd)
            start_exporter(cmd)

        @in_bash
        def start_exporter(cmd):
            EXPORTER_PATH = cmd.binaryPath
            LOG_FILE = os.path.join(os.path.dirname(EXPORTER_PATH), cmd.binaryPath + '.log')
            ARGUMENTS = cmd.startupArguments
            if not ARGUMENTS:
                ARGUMENTS = ""
            os.chmod(EXPORTER_PATH, 0o755)
            run_in_systemd(EXPORTER_PATH, ARGUMENTS, LOG_FILE)

        para = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = kvmagent.AgentResponse()

        eths = bash_o("ls /sys/class/net").split()
        interfaces = []
        for eth in eths:
            eth = eth.strip(' \t\n\r')
            if eth == 'lo': continue
            if eth == 'bonding_masters': continue
            elif eth.startswith('vnic'): continue
            elif eth.startswith('outer'): continue
            elif eth.startswith('br_'): continue
            elif not eth: continue
            else:
                interfaces.append(eth)

        for cmd in para.cmds:
            if "collectd_exporter" in cmd.binaryPath:
                start_collectd_exporter(cmd)
            else:
                start_exporter(cmd)

        return jsonobject.dumps(rsp)
コード例 #21
0
    def installtdc(self, req):
        def overwriteConfig(config, cfile):
            c = open(cfile, 'w')
            c.write(config)
            c.close()

        def updateTdcConfig(config, cfile):
            '''
               1. read /opt/tdc/apsara_global_config.json if existed
               2. compare with config
               3. overwrite if it is different
            '''
            if not os.path.exists(cfile):
                d = os.path.dirname(cfile)
                if not os.path.exists(d):
                    os.makedirs(d, 0755)
                overwriteConfig(config, cfile)
                return True

            updated = False
            c = open(cfile)
            if config != c.read().strip():
                overwriteConfig(config, cfile)
                updated = True

            c.close()
            return updated

        def installTdc(cmd):
            logger.debug('install tdc pkg')
            if cmd.version != tdcversion:
                return "no matched tdc version found, agent need version %d" % tdcversion

            startCmd = shell.ShellCmd("/opt/tdc/tdc_admin lsi")
            startCmd(False)
            if startCmd.return_code != 0:
                linux.mkdir("/apsara", 0755)
                e = shell.ShellCmd(
                    'rpm -qi tdc-unified-8.2.0.release.el5.x86_64')
                e(False)
                if e.return_code != 0:
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn clean metadata"
                        .format(kvmagent.get_host_yum_release()))
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install -y tdc-unified-8.2.0.release.el5.x86_64"
                        .format(kvmagent.get_host_yum_release()))
                shell.call("service tdc restart")

                startCmd = shell.ShellCmd("/opt/tdc/tdc_admin lsi")
                startCmd(False)
                if startCmd.return_code != 0:
                    return "tdc_admin lsi failed: %s" % startCmd.stderr
            return None

        def installVrbd():
            logger.debug('modprobe vrbd')
            lsModCmd = shell.ShellCmd("lsmod|grep vrbd")
            lsModCmd(False)
            if lsModCmd.return_code != 0:
                e = shell.ShellCmd(
                    'rpm -qi kernel-3.10.0-693.11.1.el7.x86_64-vrbd-1.0-0.1.release1.alios7.x86_64'
                )
                e(False)
                if e.return_code != 0:
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn clean metadata"
                        .format(kvmagent.get_host_yum_release()))
                    shell.call(
                        "export YUM0={}; yum --disablerepo=* --enablerepo=zstack-mn,qemu-kvm-ev-mn install -y kernel-3.10.0-693.11.1.el7.x86_64-vrbd-1.0-0.1.release1.alios7.x86_64"
                        .format(kvmagent.get_host_yum_release()))
                shell.call("modprobe vrbd")
            else:
                return
            lsModCmd(False)
            if lsModCmd.return_code != 0:
                return "cannot attach vrbd mod"

        rsp = kvmagent.AgentResponse()
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        rsp.error = installTdc(cmd) or installVrbd()
        if rsp.error is not None:
            return jsonobject.dumps(rsp)

        if cmd.tdcConfig and cmd.nuwaConfig and cmd.nuwaCfg:
            tdc = updateTdcConfig(cmd.tdcConfig,
                                  '/opt/tdc/apsara_global_flag.json')
            nuwa1 = updateTdcConfig(
                cmd.nuwaConfig,
                '/apsara/conf/conffiles/nuwa/client/nuwa_config.json')
            nuwa2 = updateTdcConfig(cmd.nuwaCfg, '/apsara/nuwa/nuwa.cfg')
            if tdc or nuwa1 or nuwa2:
                logger.debug('config changed, restart tdc service')
                shell.call("service tdc restart")

        return jsonobject.dumps(rsp)