def apply_iptables_save_doc(content):
     f = linux.write_to_temp_file(content)
     try:
         logger.debug('apply iptables:\n %s' % content)
         shell.call('/sbin/iptables-restore < %s' % f)
     finally:
         os.remove(f)
    def commit_to_imagestore(self, cmd, req):
        fpath = cmd.primaryStorageInstallPath

        # Synchronize cached writes for 'fpath'
        shell.call('/bin/sync ' + fpath)

        # Add the image to registry
        cmdstr = '%s -json  -callbackurl %s -taskid %s -imageUuid %s add -file %s' % (self.ZSTORE_CLI_PATH, req[http.REQUEST_HEADER].get(http.CALLBACK_URI),
                req[http.REQUEST_HEADER].get(http.TASK_UUID), cmd.imageUuid, fpath)

        logger.debug('adding %s to local image store' % fpath)
        shell.call(cmdstr)
        logger.debug('%s added to local image store' % fpath)

        name, imageid = self._get_image_reference(fpath)

        rsp = kvmagent.AgentResponse()
        rsp.backupStorageInstallPath = self._build_install_path(name, imageid)
        rsp.size = linux.qcow2_size_and_actual_size(cmd.primaryStorageInstallPath)[0]

        # we need to sum all the disk size within the chain ...
        chain = linux.qcow2_get_file_chain(cmd.primaryStorageInstallPath)
        rsp.actualSize = sum([ linux.qcow2_size_and_actual_size(f)[1] for f in chain ])

        return jsonobject.dumps(rsp)
    def _delete_eips(self, eips):
        delete_eip_cmd = '''
PUB_ODEV={{pub_odev}}
NS_NAME="{{ns_name}}"

exit_on_error() {
    if [ $? -ne 0 ]; then
        echo "error on line $1"
        exit 1
    fi
}

ip netns | grep $NS_NAME > /dev/null
if [ $? -eq 0 ]; then
   ip netns delete $NS_NAME
   exit_on_error $LINENO
fi

ip link | grep $PUB_ODEV > /dev/null
if [ $? -eq 0 ]; then
    ip link del $PUB_ODEV
    exit_on_error $LINENO
fi

exit 0
'''
        for eip in eips:
            ctx = {
                "pub_odev": "%s_o" % eip.vip.replace(".", ""),
                "ns_name": "%s_%s" % (eip.publicBridgeName, eip.vip.replace(".", "_"))
            }
            ctx.update(eip.__dict__)
            tmpt = Template(delete_eip_cmd)
            cmd = tmpt.render(ctx)
            shell.call(cmd)
 def download_from_imagestore(self, cachedir, host, backupStorageInstallPath, primaryStorageInstallPath):
     name, imageid = self._parse_image_reference(backupStorageInstallPath)
     cmdstr = '%s -url %s:%s -cachedir %s pull -installpath %s %s:%s' % (self.ZSTORE_CLI_PATH, host, self.ZSTORE_DEF_PORT, cachedir, primaryStorageInstallPath, name, imageid)
     logger.debug('pulling %s:%s from image store' % (name, imageid))
     shell.call(cmdstr)
     logger.debug('%s:%s pulled to local cache' % (name, imageid))
     return
Exemple #5
0
    def remove_dhcp_entry(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = RemoveDhcpEntryRsp()
        try:
            for e in cmd.dhcpEntries:
                net_dev = shell.call("ifconfig|grep -i %s|awk '{print $1}'" % e.vrNicMac)
                net_dev = net_dev.strip('\t\r\n ')
                mac2 = e.mac.replace(':', '')
                shell.call("sed -i '/%s/d' %s; \
                        sed -i '/^$/d' %s; \
                        sed -i '/%s/d' %s; \
                        sed -i '/^$/d' %s; \
                        sed -i '/%s/d' %s; \
                        sed -i '/^$/d' %s; \
                        dhcp_release %s %s %s"\
                        % (e.mac, self.HOST_DHCP_FILE, \
                        self.HOST_DHCP_FILE, \
                        mac2, self.HOST_OPTION_FILE, \
                        self.HOST_OPTION_FILE, \
                        e.ip, self.HOST_DNS_FILE, \
                        self.HOST_DNS_FILE, \
                        net_dev, e.ip, e.mac))

        except virtualrouter.VirtualRouterError as e:
            logger.warn(linux.get_exception_stacktrace())
            rsp.error = str(e)
            rsp.success = False

        return jsonobject.dumps(rsp)
def shell_cmd_thread(shell_cmd, ignore_exception = False):
    try:
        shell.call(shell_cmd)
    except Exception as e:
        if not ignore_exception:
            node_exception.append(sys.exc_info())
            raise e
    def check_origin_data_exist(self, root_vol=True):
        if root_vol:
            vol_installPath = self.former_root_vol_install_path
            vol_uuid = self.former_root_vol_uuid
            vol_size = self.former_root_vol_size
        else:
            vol_installPath = self.former_data_vol_installPath
            vol_uuid = self.former_data_volume_uuid
            vol_size = self.former_data_volume_size
        if self.origin_ps.type == 'Ceph':
            ceph_mon_ip = self.origin_ps.mons[0].monAddr
            self.chk_cmd = 'sshpass -p password ssh -o LogLevel=quiet -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no root@%s "rbd info %s --format=json"' \
                        % (ceph_mon_ip, vol_installPath.split('ceph://')[-1])
            data_info = shell.call(self.chk_cmd)
            origin_meta = jsonobject.loads(data_info)
#             assert origin_meta.name == vol_uuid
            if root_vol:
                assert origin_meta.size == vol_size
            else:
                assert origin_meta.size >= vol_size
            assert 'rbd_data' in origin_meta.block_name_prefix
            ps_trash = ps_ops.get_trash_on_primary_storage(self.origin_ps.uuid).storageTrashSpecs
            trash_install_path_list = [trsh.installPath for trsh in ps_trash]
            assert vol_installPath in trash_install_path_list
        else:
            nfs_ip, mount_path = self.origin_ps.url.split(':')
            self.chk_cmd = 'sshpass -p password ssh -o LogLevel=quiet -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
                            root@%s "qemu-img info %s"' % (nfs_ip, mount_path + vol_installPath.split(self.origin_ps.uuid)[-1])
            data_info = shell.call(self.chk_cmd)
            assert str(vol_size) in data_info
            ps_trash = ps_ops.get_trash_on_primary_storage(self.origin_ps.uuid).storageTrashSpecs
            trash_install_path_list = [trsh.installPath for trsh in ps_trash]
            assert '/'.join(vol_installPath.split('/')[:8]) in trash_install_path_list
        return self
    def _deploy_db(self, keep_db = False):
        if not keep_db:
            extra_opts = '--drop'
        else:
            extra_opts = '--keep-db'

        if not self.need_deploy_db:
            return
        ssh.make_ssh_no_password(self.db_server, 'root', \
                self.db_server_root_password)

        if not self.db_admin_password:
            cmd = 'zstack-ctl install_db --debug --host=%s --login-password=zstack.mysql.password' % self.db_server
        else:
            cmd = 'zstack-ctl install_db --debug --host=%s \
                    --login-password=%s' \
                    % (self.db_server, \
                    self.db_admin_password)

        print('installing db ...')
        shell.call(cmd)

        cmd = 'zstack-ctl deploydb %s --host=%s' % (extra_opts, self.db_server)
        if self.db_admin_password:
            cmd = '%s --root-password=%s' % (cmd, self.db_admin_password )
        else:
            cmd = '%s --root-password=zstack.mysql.password' % cmd

        if self.db_password:
            cmd = '%s --zstack-password=%s' % (cmd, self.db_password)

        print('deploying db ...')
        shell.call(cmd)
 def purge_snapshots(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     vpath = self._normalize_install_path(cmd.volumePath)
     shell.call('rbd snap purge %s' % vpath)
     rsp = AgentResponse()
     self._set_capacity_to_response(rsp)
     return jsonobject.dumps(rsp)
Exemple #10
0
 def _create_subvolume(self, src, dst):
     src_volume = os.path.dirname(src)
     shell.call('mkdir -p %s' % os.path.dirname(dst))
     shell.call('btrfs subvolume snapshot %s %s' % (src_volume, dst))
     src_file_name = os.path.basename(src)
     dst_path = os.path.join(dst, src_file_name)
     return dst_path
Exemple #11
0
    def create(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        path = self._normalize_install_path(cmd.installPath)
        rsp = CreateEmptyVolumeRsp()

        call_string = None
        if isXsky():
            # do NOT round to MB
            call_string = 'rbd create --size %dB --image-format 2 %s' % (cmd.size, path)
            rsp.size = cmd.size
        else:
            size_M = sizeunit.Byte.toMegaByte(cmd.size) + 1
            call_string = 'rbd create --size %s --image-format 2 %s' % (size_M, path)
            rsp.size = cmd.size + sizeunit.MegaByte.toByte(1)

        if cmd.shareable:
            call_string = call_string + " --image-shared"

        skip_cmd = "rbd info %s ||" % path if cmd.skipIfExisting else ""
        shell.call(skip_cmd + call_string)


        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemple #12
0
    def init(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        o = shell.call('ceph mon_status')
        mon_status = jsonobject.loads(o)
        fsid = mon_status.monmap.fsid_

        existing_pools = shell.call('ceph osd lspools')
        for pool in cmd.pools:
            if pool.predefined and pool.name not in existing_pools:
                raise Exception('cannot find pool[%s] in the ceph cluster, you must create it manually' % pool.name)
            elif pool.name not in existing_pools:
                shell.call('ceph osd pool create %s 128' % pool.name)

        rsp = InitRsp()

        if cmd.nocephx is False:
            o = shell.call("ceph -f json auth get-or-create client.zstack mon 'allow r' osd 'allow *' 2>/dev/null").strip(
                ' \n\r\t')
            o = jsonobject.loads(o)
            rsp.userKey = o[0].key_

        rsp.fsid = fsid
        self._set_capacity_to_response(rsp)

        return jsonobject.dumps(rsp)
Exemple #13
0
    def _make_conf_path(self, bridge_name):
        folder = os.path.join(self.DNSMASQ_CONF_FOLDER, bridge_name)
        if not os.path.exists(folder):
            shell.call('mkdir -p %s' % folder)

        # the conf is created at the initializing time
        conf = os.path.join(folder, 'dnsmasq.conf')

        dhcp = os.path.join(folder, 'hosts.dhcp')
        if not os.path.exists(dhcp):
            shell.call('touch %s' % dhcp)

        dns = os.path.join(folder, 'hosts.dns')
        if not os.path.exists(dns):
            shell.call('touch %s' % dns)

        option = os.path.join(folder, 'hosts.option')
        if not os.path.exists(option):
            shell.call('touch %s' % option)

        log = os.path.join(folder, 'dnsmasq.log')
        if not os.path.exists(log):
            shell.call('touch %s' % log)

        return conf, dhcp, dns, option, log
    def download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        lichbd_file = os.path.join(pool, image_name)
        tmp_lichbd_file = os.path.join(pool, tmp_image_name)

        lichbd.lichbd_mkpool(os.path.dirname(lichbd_file))
        shell.call('set -o pipefail; wget --no-check-certificate -q -O - %s | lichbd import - %s -p lichbd' % (cmd.url, tmp_lichbd_file))

        @rollbackable
        def _1():
            if lichbd.lichbd_file_exist(tmp_lichbd_file):
                lichbd.lichbd_rm(tmp_lichbd_file)
            lichbd.lichbd_rm(lichbd_file)
        _1()

        qemu_img = lichbd.lichbd_get_qemu_img_path()
        file_format = shell.call("set -o pipefail;%s info rbd:%s/%s 2>/dev/null | grep 'file format' | cut -d ':' -f 2" % (qemu_img, pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        lichbd.lichbd_mv(lichbd_file, tmp_lichbd_file)
        size = lichbd.lichbd_file_size(lichbd_file)
        rsp = DownloadRsp()
        rsp.size = size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemple #15
0
    def unprotect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap unprotect %s' % spath)

        return jsonobject.dumps(AgentResponse())
    def _upgrade_local_zstack(self):
        cmd = 'WEBSITE=localhost bash %s -f %s -u -r %s' % \
                (self.zstack_install_script, self.zstack_pkg, \
                self.install_path)

        shell.call(cmd)
        self._extra_deployment()
    def execute_plan_without_deploy_test_agent(self):
        if os.environ.get('ZSTACK_ALREADY_INSTALLED') != "yes":
            try:
                self._stop_nodes()
                shell.call('zstack-ctl kairosdb --stop')
                shell.call('zstack-ctl cassandra --stop')
            except:
                pass
    
            self._install_local_zstack()
            self._deploy_db()
            self._deploy_rabbitmq()
            self._install_management_nodes()
            self._set_extra_node_config()
        else:
            self._change_node_ip()
            self._install_management_nodes()
            self._set_extra_node_config()
        try:
            with open('/root/.bashrc', 'a+') as bashrc:
                bashrc.write('export ZSTACK_ALREADY_INSTALLED=yes\n')
        except:
            pass

        self._start_multi_nodes(restart=True)
        #NOTE: Only one key pair will take effect
        self._copy_sshkey_from_node()
        self._enable_jacoco_dump()
    def download_image(self, req):
        #TODO: report percentage to mgmt server
        def percentage_callback(percent, url):
            logger.debug('Downloading %s ... %s%%' % (url, percent))

        def use_wget(url, name, workdir, timeout):
            return linux.wget(url, workdir=workdir, rename=name, timeout=timeout, interval=2, callback=percentage_callback, callback_data=url)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        rsp = DownloadResponse()
        supported_schemes = [self.URL_HTTP, self.URL_HTTPS, self.URL_FILE]
        if cmd.urlScheme not in supported_schemes:
            rsp.success = False
            rsp.error = 'unsupported url scheme[%s], SimpleSftpBackupStorage only supports %s' % (cmd.urlScheme, supported_schemes)
            return jsonobject.dumps(rsp)

        path = os.path.dirname(cmd.installPath)
        if not os.path.exists(path):
            os.makedirs(path, 0777)
        image_name = os.path.basename(cmd.installPath)
        install_path = cmd.installPath

        timeout = cmd.timeout if cmd.timeout else 7200
        if cmd.urlScheme in [self.URL_HTTP, self.URL_HTTPS]:
            try:
                ret = use_wget(cmd.url, image_name, path, timeout)
                if ret != 0:
                    rsp.success = False
                    rsp.error = 'http/https download failed, [wget -O %s %s] returns value %s' % (image_name, cmd.url, ret)
                    return jsonobject.dumps(rsp)
            except linux.LinuxError as e:
                traceback.format_exc()
                rsp.success = False
                rsp.error = str(e)
                return jsonobject.dumps(rsp)
        elif cmd.urlScheme == self.URL_FILE:
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            logger.debug("src_path is: %s" % src_path)
            shell.call('yes | cp %s %s' % (src_path, install_path))



        os.chmod(cmd.installPath, stat.S_IRUSR + stat.S_IRGRP + stat.S_IROTH)


        image_format =  bash_o("qemu-img info %s | grep -w '^file format' | awk '{print $3}'" % install_path).strip('\n')
        size = os.path.getsize(install_path)
        md5sum = 'not calculated'
        logger.debug('successfully downloaded %s to %s' % (cmd.url, install_path))
        (total, avail) = self.get_capacity()
        rsp.md5Sum = md5sum
        rsp.actualSize = size
        rsp.size = linux.qcow2_virtualsize(install_path)
        rsp.totalCapacity = total
        rsp.availableCapacity = avail
        rsp.format = image_format
        return jsonobject.dumps(rsp)
Exemple #19
0
def kill_vm(maxAttempts, mountPaths=None, isFileSystem=None):
    zstack_uuid_pattern = "'[0-9a-f]{8}[0-9a-f]{4}[1-5][0-9a-f]{3}[89ab][0-9a-f]{3}[0-9a-f]{12}'"

    virsh_list = shell.call("virsh list --all")
    logger.debug("virsh_list:\n" + virsh_list)

    vm_in_process_uuid_list = shell.call("virsh list | egrep -o " + zstack_uuid_pattern + " | sort | uniq")
    logger.debug('vm_in_process_uuid_list:\n' + vm_in_process_uuid_list)

    # kill vm's qemu process
    vm_pids_dict = {}
    for vm_uuid in vm_in_process_uuid_list.split('\n'):
        vm_uuid = vm_uuid.strip(' \t\n\r')
        if not vm_uuid:
            continue

        if mountPaths and isFileSystem is not None \
                and not is_need_kill(vm_uuid, mountPaths, isFileSystem):
            continue

        vm_pid = shell.call("ps aux | grep qemu-kvm | grep -v grep | awk '/%s/{print $2}'" % vm_uuid)
        vm_pid = vm_pid.strip(' \t\n\r')
        vm_pids_dict[vm_uuid] = vm_pid

    for vm_uuid, vm_pid in vm_pids_dict.items():
        kill = shell.ShellCmd('kill -9 %s' % vm_pid)
        kill(False)
        if kill.return_code == 0:
            logger.warn('kill the vm[uuid:%s, pid:%s] because we lost connection to the storage.'
                        'failed to read the heartbeat file %s times' % (vm_uuid, vm_pid, maxAttempts))
        else:
            logger.warn('failed to kill the vm[uuid:%s, pid:%s] %s' % (vm_uuid, vm_pid, kill.stderr))

    return vm_pids_dict
Exemple #20
0
def listPath(path):
    s = []
    pwd = shell.call('pwd', True, path).strip()
    sub_paths = shell.call('ls %s' % path).split("\n")
    for f in sub_paths:
        if f.strip():
            s.append("%s/%s" % (pwd, f.strip()))
    return s
Exemple #21
0
    def rollback_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap rollback %s' % spath)
        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    def _deploy_rabbitmq(self):
        ssh.make_ssh_no_password(self.rabbitmq_server, 'root', \
                self.rabbitmq_server_root_passwd)

        cmd = "zstack-ctl install_rabbitmq --host=%s" % self.rabbitmq_server

        print('deploying rabbitmq ...')
        shell.call(cmd)
Exemple #23
0
    def protect_snapshot(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        spath = self._normalize_install_path(cmd.snapshotPath)

        shell.call('rbd snap protect %s' % spath, exception=not cmd.ignoreError)

        rsp = AgentResponse()
        return jsonobject.dumps(rsp)
Exemple #24
0
    def delete(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        shell.call('rbd rm %s/%s' % (pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    def _start_multi_nodes(self, restart = False):
        nodes = []
        threads = []
        for node in self.nodes:
            #The reserved node is used by test cases. 
            if not restart and node.reserve__:
                continue

            if not node.dockerImage__:
                print 'Deploy node in hosts'
                #consider some zstack-server is running in vm, the server 
                # startup speed is slow. Increase timeout to 180s.
                cmd = 'zstack-ctl stop_node --host=%s ; zstack-ctl start_node --host=%s --timeout=180' % (node.ip_, node.ip_)
                thread = threading.Thread(target=shell_cmd_thread, args=(cmd, True, ))
                threads.append(thread)
            else:
                print 'Deploy node in docker'
                docker_node = DockerNode(self)
                docker_node.set_docker_image(node.dockerImage__)
                docker_node.set_node_ip(node.ip__)
                docker_node.prepare_node()
                nodes.append(docker_node)
                thread = threading.Thread(target=docker_node.start_node)
                threads.append(thread)

        for thread in threads:
            thread.start()

        self._wait_for_thread_completion('start management node', 200)

        if node_exception:
            print 'node start meets exception:'
            info1 = node_exception[0][1]
            info2 = node_exception[0][2]
            raise info1, None, info2
                
        current_time = time.time()
        #largest timeout time for multi nodes startup is 300s
        timeout_time = current_time + 300
        for node in self.nodes:
            #The reserved node is used by test cases. 
            if node.reserve__:
                continue
            new_time = time.time() 
            if new_time >= timeout_time:
                new_timeout = 1
            else:
                new_timeout = timeout_time - new_time

            if not linux.wait_callback_success(\
                    node_ops.is_management_node_start, \
                    node.ip_, timeout=new_timeout, interval=0.5):
                raise ActionError('multi node does not startup on host: %s' \
                        % node.ip_)

        zstack_home = '%s/apache-tomcat/webapps/zstack/' % self.install_path
        cmd = 'zstack-ctl setenv ZSTACK_HOME=%s' % zstack_home
        shell.call(cmd)
Exemple #26
0
    def flatten(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        path = self._normalize_install_path(cmd.path)

        shell.call('rbd flatten %s' % path)

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
    def resize(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        install_path = cmd.installPath
        rsp = ResizeVolumeRsp()
        shell.call("qemu-img resize %s %s" % (install_path, cmd.size))
        ret = linux.qcow2_virtualsize(install_path)
        rsp.size = ret
        return jsonobject.dumps(rsp)
Exemple #28
0
def get_netmask_of_nic(nic_name):
    netmask = shell.call("ifconfig %s | grep Mask | sed s/^.*Mask://" % nic_name)
    if not netmask:
        netmask = shell.call("ifconfig %s | grep netmask|awk -F'netmask' '{print $2}'|awk '{print $1}'" % nic_name)

    netmask = netmask.strip()
    if netmask == '':
        raise LinuxError('cannot find netmask of %s, it may have no ip assigned' % nic_name)
    return netmask
Exemple #29
0
def get_block_devices():
    # 1. get multi path devices
    # 2. get multi path device information from raw device
    # 3. get information of other devices
    mpath_devices = []
    block_devices = []  # type: List[SharedBlockCandidateStruct]
    slave_devices = []
    cmd = shell.ShellCmd("multipath -l -v1")
    cmd(is_exception=False)
    if cmd.return_code == 0 and cmd.stdout.strip() != "":
        mpath_devices = cmd.stdout.strip().split("\n")

    for mpath_device in mpath_devices:  # type: str
        try:
            cmd = shell.ShellCmd("realpath /dev/mapper/%s | grep -E -o 'dm-.*'" % mpath_device)
            cmd(is_exception=False)
            if cmd.return_code != 0 or cmd.stdout.strip() == "":
                continue

            dm = cmd.stdout.strip()
            slaves = shell.call("ls /sys/class/block/%s/slaves/" % dm).strip().split("\n")
            if slaves is None or len(slaves) == 0:
                struct = SharedBlockCandidateStruct()
                cmd = shell.ShellCmd("udevadm info -n %s | grep dm-uuid-mpath | grep -o 'dm-uuid-mpath-\S*' | head -n 1 | awk -F '-' '{print $NF}'" % dm)
                cmd(is_exception=True)
                struct.wwids = [cmd.stdout.strip().strip("()")]
                struct.type = "mpath"
                block_devices.append(struct)
                continue

            slave_devices.extend(slaves)
            struct = get_device_info(slaves[0])
            cmd = shell.ShellCmd("udevadm info -n %s | grep dm-uuid-mpath | grep -o 'dm-uuid-mpath-\S*' | head -n 1 | awk -F '-' '{print $NF}'" % dm)
            cmd(is_exception=True)
            struct.wwids = [cmd.stdout.strip().strip("()")]
            struct.type = "mpath"
            block_devices.append(struct)
        except Exception as e:
            logger.warn(linux.get_exception_stacktrace())
            continue

    disks = shell.call("lsblk -p -o NAME,TYPE | grep disk | awk '{print $1}'").strip().split()
    for disk in disks:
        try:
            if disk.split("/")[-1] in slave_devices or is_slave_of_multipath(disk):
                continue
            d = get_device_info(disk.strip().split("/")[-1])
            if len(d.wwids) is 0:
                continue
            if get_pv_uuid_by_path("/dev/disk/by-id/%s" % d.wwids[0]) not in ("", None):
                d.type = "lvm-pv"
            block_devices.append(d)
        except Exception as e:
            logger.warn(linux.get_exception_stacktrace())
            continue

    return block_devices
Exemple #30
0
    def _delete_target(self, target_name, conf_uuid):
        conf_file = os.path.join('/etc/tgt/conf.d/%s.conf' % conf_uuid)
        shell.call('rm -f %s' % conf_file)

        output = shell.call('tgt-admin --show')
        if target_name not in output:
            return

        update_target(target_name)
Exemple #31
0
 def _2():
     shell.call('rbd rm %s/%s' % (pool, image_name))
Exemple #32
0
def lichbd_get_fsid():
    fsid = None
    fsid = shell.call(
        "cat /opt/fusionstack/etc/lich.conf | grep uuid | awk '{print $2}' | awk -F ';' '{print $1}'"
    ).strip()
    return fsid
Exemple #33
0
 def _0():
     shell.call('rm -f %s' % pid_file)
Exemple #34
0
def lichbd_add_node(monHostname):
    disks = shell.call('/opt/fusionstack/lich/bin/lich addnode %s' %
                       monHostname)
Exemple #35
0
    def sftp_download(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        hostname = cmd.hostname
        prikey = cmd.sshKey
        port = cmd.sshPort

        pool, image_name = self._parse_install_path(
            cmd.primaryStorageInstallPath)
        tmp_image_name = 'tmp-%s' % image_name

        prikey_file = linux.write_to_temp_file(prikey)

        @rollbackable
        def _0():
            tpath = "%s/%s" % (pool, tmp_image_name)
            shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))

        _0()

        try:
            shell.call(
                'set -o pipefail; ssh -p %d -o StrictHostKeyChecking=no -i %s root@%s "cat %s" | rbd import --image-format 2 - %s/%s'
                % (port, prikey_file, hostname, cmd.backupStorageInstallPath,
                   pool, tmp_image_name))
        finally:
            os.remove(prikey_file)

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        _1()

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))

        rsp = AgentResponse()
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemple #36
0
 def cleanup_addons():
     for chan in self.domain_xmlobject.devices.get_child_node_as_list(
             'channel'):
         if chan.type_ == 'unix':
             path = chan.source.path_
             shell.call('rm -f %s' % path)
Exemple #37
0
 def _do_import(task, fpath):
     shell.call("cat %s | rbd import --image-format 2 - %s" %
                (fpath, task.tmpPath))
Exemple #38
0
 def release_userdata(self, req):
     cmd = jsonobject.loads(req[http.REQUEST_BODY])
     html_folder = os.path.join(self.USERDATA_ROOT, cmd.namespaceName, 'html', cmd.vmIp)
     shell.call('rm -rf %s' % html_folder)
     return jsonobject.dumps(ReleaseUserdataRsp())
Exemple #39
0
    def _apply_userdata_xtables(self, to):
        p = UserDataEnv(to.bridgeName, to.namespaceName)
        INNER_DEV = None
        DHCP_IP = None
        NS_NAME = to.namespaceName

        if not to.hasattr("dhcpServerIp"):
            p.prepare()
            INNER_DEV = p.inner_dev
        else:
            DHCP_IP = to.dhcpServerIp
            INNER_DEV = bash_errorout(
                "ip netns exec {{NS_NAME}} ip addr | grep -w {{DHCP_IP}} | awk '{print $NF}'").strip(' \t\r\n')
        if not INNER_DEV:
            p.prepare()
            INNER_DEV = p.inner_dev
        if not INNER_DEV:
            raise Exception('cannot find device for the DHCP IP[%s]' % DHCP_IP)

        ret = bash_r('ip netns exec {{NS_NAME}} ip addr | grep 169.254.169.254 > /dev/null')
        if (ret != 0 and INNER_DEV != None):
            bash_errorout('ip netns exec {{NS_NAME}} ip addr add 169.254.169.254 dev {{INNER_DEV}}')

        r, o = bash_ro('ip netns exec {{NS_NAME}} ip r | wc -l')
        if not to.hasattr("dhcpServerIp") and int(o) == 0:
            bash_errorout('ip netns exec {{NS_NAME}} ip r add default dev {{INNER_DEV}}')

        # set ebtables
        BR_NAME = to.bridgeName
        # BR_NAME is "br_%s_%s"
        ETH_NAME = BR_NAME.replace('br_', '', 1).replace('_', '.', 1)
        MAC = bash_errorout("ip netns exec {{NS_NAME}} ip link show {{INNER_DEV}} | grep -w ether | awk '{print $2}'").strip(' \t\r\n')
        CHAIN_NAME="USERDATA-%s" % BR_NAME
        # max length of ebtables chain name is 31
        if (len(BR_NAME) <= 12):
            EBCHAIN_NAME = "USERDATA-%s-%s" % (BR_NAME, to.l3NetworkUuid[0:8])
        else:
            EBCHAIN_NAME = "USERDATA-%s-%s" % (BR_NAME[len(BR_NAME) - 12 : len(BR_NAME)], to.l3NetworkUuid[0:8])

        ret = bash_r(EBTABLES_CMD + ' -t nat -L {{EBCHAIN_NAME}} >/dev/null 2>&1')
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -t nat -N {{EBCHAIN_NAME}}')

        if bash_r(EBTABLES_CMD + ' -t nat -L PREROUTING | grep -- "--logical-in {{BR_NAME}} -j {{EBCHAIN_NAME}}"') != 0:
            bash_errorout(EBTABLES_CMD + ' -t nat -I PREROUTING --logical-in {{BR_NAME}} -j {{EBCHAIN_NAME}}')

        # ebtables has a bug that will eliminate 0 in MAC, for example, aa:bb:0c will become aa:bb:c
        cidr = ip.IpAddress(to.vmIp).toCidr(to.netmask)
        RULE = "-p IPv4 --ip-dst 169.254.169.254 --ip-source %s -j dnat --to-dst %s --dnat-target ACCEPT" % (cidr, MAC.replace(":0", ":"))
        ret = bash_r(EBTABLES_CMD + ' -t nat -L {{EBCHAIN_NAME}} | grep -- "{{RULE}}" > /dev/null')
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -t nat -I {{EBCHAIN_NAME}} {{RULE}}')

        ret = bash_r(EBTABLES_CMD + ' -t nat -L {{EBCHAIN_NAME}} | grep -- "-j RETURN" > /dev/null')
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -t nat -A {{EBCHAIN_NAME}} -j RETURN')

        ret = bash_r(EBTABLES_CMD + ' -L {{EBCHAIN_NAME}} >/dev/null 2>&1')
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -N {{EBCHAIN_NAME}}')

        ret = bash_r(EBTABLES_CMD + ' -L FORWARD | grep -- "-p ARP --arp-ip-dst 169.254.169.254 -j {{EBCHAIN_NAME}}" > /dev/null')
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -I FORWARD -p ARP --arp-ip-dst 169.254.169.254 -j {{EBCHAIN_NAME}}')

        ret = bash_r(EBTABLES_CMD + ' -L {{EBCHAIN_NAME}} | grep -- "-i {{ETH_NAME}} -j DROP" > /dev/null')
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -I {{EBCHAIN_NAME}} -i {{ETH_NAME}} -j DROP')

        ret = bash_r(EBTABLES_CMD + ' -L {{EBCHAIN_NAME}} | grep -- "-o {{ETH_NAME}} -j DROP" > /dev/null')
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -I {{EBCHAIN_NAME}} -o {{ETH_NAME}} -j DROP')

        ret = bash_r("ebtables-save | grep '\-A {{EBCHAIN_NAME}} -j RETURN'")
        if ret != 0:
            bash_errorout(EBTABLES_CMD + ' -A {{EBCHAIN_NAME}} -j RETURN')

        self.work_userdata_iptables(CHAIN_NAME, to)

        conf_folder = os.path.join(self.USERDATA_ROOT, to.namespaceName)
        if not os.path.exists(conf_folder):
            shell.call('mkdir -p %s' % conf_folder)

        conf_path = os.path.join(conf_folder, 'lighttpd.conf')
        http_root = os.path.join(conf_folder, 'html')

        conf = '''\
server.document-root = "{{http_root}}"

server.port = {{port}}
server.bind = "169.254.169.254"
dir-listing.activate = "enable"
index-file.names = ( "index.html" )

server.modules += ( "mod_rewrite" )

$HTTP["remoteip"] =~ "^(.*)$" {
    url.rewrite-once = (
        "^/.*/meta-data/(.+)$" => "../%1/meta-data/$1",
        "^/.*/meta-data$" => "../%1/meta-data",
        "^/.*/meta-data/$" => "../%1/meta-data/",
        "^/.*/user-data$" => "../%1/user-data",
        "^/.*/user_data$" => "../%1/user_data",
        "^/.*/meta_data.json$" => "../%1/meta_data.json",
        "^/.*/password$" => "../%1/password",
        "^/.*/$" => "../%1/$1"
    )
    dir-listing.activate = "enable"
}

mimetype.assign = (
  ".html" => "text/html",
  ".txt" => "text/plain",
  ".jpg" => "image/jpeg",
  ".png" => "image/png"
)'''

        tmpt = Template(conf)
        conf = tmpt.render({
            'http_root': http_root,
            'port': to.port
        })

        if not os.path.exists(conf_path):
            with open(conf_path, 'w') as fd:
                fd.write(conf)
        else:
            with open(conf_path, 'r') as fd:
                current_conf = fd.read()

            if current_conf != conf:
                with open(conf_path, 'w') as fd:
                    fd.write(conf)
Exemple #40
0
 def connect(self, req):
     shell.call(EBTABLES_CMD + ' -F')
     shell.call(EBTABLES_CMD + ' -t nat -F')
     return jsonobject.dumps(ConnectRsp())
 def _clear_mirror_dst_config(self, bridge_name, device_name,
                              mirror_device_name):
     shell.call("ip link set dev %s master %s" % (device_name, bridge_name))
     '''
    def _set_mirror_src_config(self, device_name, mirror_device_name,
                               direction):
        if (direction == "Egress" or direction == "Bidirection"):
            shell_cmd = shell.ShellCmd(
                "tc qdisc show dev %s |grep 'qdisc ingress'" % device_name)
            shell_cmd(False)
            if shell_cmd.return_code != 0:
                shell.call("tc qdisc add dev %s ingress" % device_name)
            shell_cmd = shell.ShellCmd(
                " tc filter list dev %s parent ffff: |grep '%s'" %
                (device_name, mirror_device_name))
            shell_cmd(False)
            if shell_cmd.return_code != 0:
                shell.call(
                    'tc filter add dev %s parent ffff: protocol all u32 match u8 0 0 action mirred egress mirror dev %s'
                    % (device_name, mirror_device_name))
            else:
                shell.call(
                    'tc filter replace dev %s parent ffff: protocol all u32 match u8 0 0 action mirred egress mirror dev %s'
                    % (device_name, mirror_device_name))

        if (direction == "Ingress" or direction == "Bidirection"):
            shell_cmd = shell.ShellCmd(
                "tc qdisc show dev %s |grep 'qdisc prio 1:'" % device_name)
            shell_cmd(False)
            if shell_cmd.return_code != 0:
                shell.call("tc qdisc add dev %s handle 1: root prio" %
                           device_name)
            shell_cmd = shell.ShellCmd(
                " tc filter list dev %s parent 1: |grep '%s'" %
                (device_name, mirror_device_name))
            shell_cmd(False)
            if shell_cmd.return_code != 0:
                shell.call(
                    'tc filter add dev %s parent 1: protocol all u32 match u8 0 0 action mirred egress mirror dev %s'
                    % (device_name, mirror_device_name))
            else:
                shell.call(
                    'tc filter replace dev %s parent 1: protocol all u32 match u8 0 0 action mirred egress mirror dev %s'
                    % (device_name, mirror_device_name))
Exemple #43
0
    def copy_bits_to_remote(self, req):
        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        chain = sum([linux.qcow2_get_file_chain(p) for p in cmd.paths], [])
        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "LocalStorageMigrateVolume"
        report.resourceUuid = cmd.uuid
        PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()

        start = 10
        end = 90
        if cmd.stage:
            start, end = get_scale(cmd.stage)

        total = 0
        for path in set(chain):
            total = total + os.path.getsize(path)

        written = 0

        def _get_progress(synced):
            logger.debug(
                "getProgress in localstorage-agent, synced: %s, total: %s" %
                (synced, total))
            if not os.path.exists(PFILE):
                return synced
            fpread = open(PFILE, 'r')
            lines = fpread.readlines()
            if not lines:
                fpread.close()
                return synced
            last = str(lines[-1]).strip().split('\r')[-1]
            if not last or len(last.split()) < 1:
                fpread.close()
                return synced
            line = last.split()[0]
            if not line.isdigit():
                return synced
            if total > 0:
                synced = long(line)
                if synced < total:
                    percent = int(
                        round(
                            float(written + synced) / float(total) *
                            (end - start) + start))
                    report.progress_report(percent, "report")
                    synced = written
            fpread.close()
            return synced

        for path in set(chain):
            PATH = path
            PASSWORD = cmd.dstPassword
            USER = cmd.dstUsername
            IP = cmd.dstIp
            PORT = (cmd.dstPort and cmd.dstPort or "22")
            DIR = os.path.dirname(path)

            if cmd.dstUsername == 'root':
                _, _, err = bash_progress_1(
                    'rsync -av --progress --relative {{PATH}} --rsh="/usr/bin/sshpass -p "{{PASSWORD}}" ssh -o StrictHostKeyChecking=no -p {{PORT}} -l {{USER}}" {{IP}}:/ 1>{{PFILE}}',
                    _get_progress)
                if err:
                    raise err
            else:
                raise Exception("cannot support migrate to non-root user host")
            written += os.path.getsize(path)
            bash_errorout(
                '/usr/bin/sshpass -p "{{PASSWORD}}" ssh -o StrictHostKeyChecking=no -p {{PORT}} {{USER}}@{{IP}} "/bin/sync {{PATH}}"'
            )
            percent = int(
                round(float(written) / float(total) * (end - start) + start))
            report.progress_report(percent, "report")

        if os.path.exists(PFILE):
            os.remove(PFILE)
        rsp = AgentResponse()
        rsp.totalCapacity, rsp.availableCapacity = self._get_disk_capacity(
            cmd.storagePath)
        return jsonobject.dumps(rsp)
Exemple #44
0
        def apply(dhcp):
            bridge_name = dhcp[0].bridgeName
            namespace_name = dhcp[0].namespaceName
            conf_file_path, dhcp_path, dns_path, option_path, log_path = self._make_conf_path(namespace_name)

            conf_file = '''\
domain-needed
bogus-priv
no-hosts
addn-hosts={{dns}}
dhcp-option=vendor:MSFT,2,1i
dhcp-lease-max=65535
dhcp-hostsfile={{dhcp}}
dhcp-optsfile={{option}}
log-facility={{log}}
interface={{iface_name}}
except-interface=lo
bind-interfaces
leasefile-ro
{% for g in gateways -%}
dhcp-range={{g}},static
{% endfor -%}
'''

            br_num = shell.call("ip netns list-id | grep -w %s | awk '{print $2}'" % namespace_name)
            br_num = br_num.strip(' \t\r\n')
            if not br_num:
                raise Exception('cannot find the ID for the namespace[%s]' % namespace_name)

            tmpt = Template(conf_file)
            conf_file = tmpt.render({
                'dns': dns_path,
                'dhcp': dhcp_path,
                'option': option_path,
                'log': log_path,
                'iface_name': 'inner%s' % br_num,
                'gateways': [d.gateway for d in dhcp if d.gateway]
            })

            restart_dnsmasq = cmd.rebuild
            if not os.path.exists(conf_file_path) or cmd.rebuild:
                with open(conf_file_path, 'w') as fd:
                    fd.write(conf_file)
            else:
                with open(conf_file_path, 'r') as fd:
                    c = fd.read()

                if c != conf_file:
                    logger.debug('dnsmasq configure file for bridge[%s] changed, restart it' % bridge_name)
                    restart_dnsmasq = True
                    with open(conf_file_path, 'w') as fd:
                        fd.write(conf_file)
                    logger.debug('wrote dnsmasq configure file for bridge[%s]\n%s' % (bridge_name, conf_file))


            info = []
            for d in dhcp:
                dhcp_info = {'tag': d.mac.replace(':', '')}
                dhcp_info.update(d.__dict__)
                dhcp_info['dns'] = ','.join(d.dns)
                routes = []
                for route in d.hostRoutes:
                    routes.append(','.join([route.prefix, route.nexthop]))
                dhcp_info['routes'] = ','.join(routes)
                info.append(dhcp_info)

                if not cmd.rebuild:
                    self._erase_configurations(d.mac, d.ip, dhcp_path, dns_path, option_path)

            dhcp_conf = '''\
{% for d in dhcp -%}
{% if d.isDefaultL3Network -%}
{{d.mac}},set:{{d.tag}},{{d.ip}},{{d.hostname}},infinite
{% else -%}
{{d.mac}},set:{{d.tag}},{{d.ip}},infinite
{% endif -%}
{% endfor -%}
'''

            tmpt = Template(dhcp_conf)
            dhcp_conf = tmpt.render({'dhcp': info})
            mode = 'a+'
            if cmd.rebuild:
                mode = 'w'

            with open(dhcp_path, mode) as fd:
                fd.write(dhcp_conf)

            option_conf = '''\
{% for o in options -%}
{% if o.isDefaultL3Network -%}
{% if o.gateway -%}
tag:{{o.tag}},option:router,{{o.gateway}}
{% endif -%}
{% if o.dns -%}
tag:{{o.tag}},option:dns-server,{{o.dns}}
{% endif -%}
{% if o.dnsDomain -%}
tag:{{o.tag}},option:domain-name,{{o.dnsDomain}}
{% endif -%}
{% if o.routes -%}
tag:{{o.tag}},option:classless-static-route,{{o.routes}}
{% endif -%}
{% else -%}
tag:{{o.tag}},3
tag:{{o.tag}},6
{% endif -%}
tag:{{o.tag}},option:netmask,{{o.netmask}}
{% if o.mtu -%}
tag:{{o.tag}},option:mtu,{{o.mtu}}
{% endif -%}
{% endfor -%}
    '''
            tmpt = Template(option_conf)
            option_conf = tmpt.render({'options': info})

            with open(option_path, mode) as fd:
                fd.write(option_conf)

            hostname_conf = '''\
{% for h in hostnames -%}
{% if h.isDefaultL3Network and h.hostname -%}
{{h.ip}} {{h.hostname}}
{% endif -%}
{% endfor -%}
    '''
            tmpt = Template(hostname_conf)
            hostname_conf = tmpt.render({'hostnames': info})

            with open(dns_path, mode) as fd:
                fd.write(hostname_conf)

            if restart_dnsmasq:
                self._restart_dnsmasq(namespace_name, conf_file_path)
            else:
                self._refresh_dnsmasq(namespace_name, conf_file_path)
Exemple #45
0
    def download(self, req):
        rsp = DownloadRsp()

        def isDerivedQcow2Image(path):
            if path.startswith('http://') or path.startswith('https://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(72)
                resp.close()
            else:
                resp = open(path)
                qhdr = resp.read(72)
                resp.close
            if len(qhdr) != 72:
                return False
            if qhdr[:4] != 'QFI\xfb':
                return False
            return qhdr[16:20] != '\x00\x00\x00\00'

        def fail_if_has_backing_file(fpath):
            if isDerivedQcow2Image(fpath):
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)

        cmd = jsonobject.loads(req[http.REQUEST_BODY])

        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.call('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        if cmd.url.startswith('http://') or cmd.url.startswith('https://'):
            fail_if_has_backing_file(cmd.url)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()
            if cmd.sendCommandUrl:
                Report.url = cmd.sendCommandUrl

            PFILE = shell.call('mktemp /tmp/tmp-XXXXXX').strip()
            content_length = shell.call('curl -sI %s|grep Content-Length' %
                                        cmd.url).strip().split()[1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug(
                    "getProgress in ceph-bs-agent, synced: %s, total: %s" %
                    (synced, total))
                last = shell.call('tail -1 %s' % PFILE).strip()
                if not last or len(last.split()) < 1:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif cmd.url.startswith('file://'):
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            fail_if_has_backing_file(src_path)
            # roll back tmp ceph file after import it
            _1()
            shell.call("rbd import --image-format 2 %s %s/%s" %
                       (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.call(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.call('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.call('rbd mv %s/%s %s/%s' %
                       (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.call('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemple #46
0
    def download(self, req):
        rsp = DownloadRsp()

        def _get_origin_format(path):
            qcow2_length = 0x9007
            if path.startswith('http://') or path.startswith(
                    'https://') or path.startswith('ftp://'):
                resp = urllib2.urlopen(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            elif path.startswith('sftp://'):
                fd, tmp_file = tempfile.mkstemp()
                get_header_from_pipe_cmd = "timeout 60 head --bytes=%d %s > %s" % (
                    qcow2_length, pipe_path, tmp_file)
                clean_cmd = "pkill -f %s" % pipe_path
                shell.run(
                    '%s & %s && %s' %
                    (scp_to_pipe_cmd, get_header_from_pipe_cmd, clean_cmd))
                qhdr = os.read(fd, qcow2_length)
                if os.path.exists(tmp_file):
                    os.remove(tmp_file)
            else:
                resp = open(path)
                qhdr = resp.read(qcow2_length)
                resp.close()
            if len(qhdr) < qcow2_length:
                return "raw"

            return get_image_format_from_buf(qhdr)

        def get_origin_format(fpath, fail_if_has_backing_file=True):
            image_format = _get_origin_format(fpath)
            if image_format == "derivedQcow2" and fail_if_has_backing_file:
                raise Exception('image has backing file or %s is not exist!' %
                                fpath)
            return image_format

        cmd = jsonobject.loads(req[http.REQUEST_BODY])
        pool, image_name = self._parse_install_path(cmd.installPath)
        tmp_image_name = 'tmp-%s' % image_name

        @rollbackable
        def _1():
            shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))

        def _getRealSize(length):
            '''length looks like: 10245K'''
            logger.debug(length)
            if not length[-1].isalpha():
                return length
            units = {
                "g": lambda x: x * 1024 * 1024 * 1024,
                "m": lambda x: x * 1024 * 1024,
                "k": lambda x: x * 1024,
            }
            try:
                if not length[-1].isalpha():
                    return length
                return units[length[-1].lower()](int(length[:-1]))
            except:
                logger.warn(linux.get_exception_stacktrace())
                return length

        # whether we have an upload request
        if cmd.url.startswith(self.UPLOAD_PROTO):
            self._prepare_upload(cmd)
            rsp.size = 0
            rsp.uploadPath = self._get_upload_path(req)
            self._set_capacity_to_response(rsp)
            return jsonobject.dumps(rsp)

        if cmd.sendCommandUrl:
            Report.url = cmd.sendCommandUrl

        report = Report(cmd.threadContext, cmd.threadContextStack)
        report.processType = "AddImage"
        report.resourceUuid = cmd.imageUuid
        report.progress_report("0", "start")

        url = urlparse.urlparse(cmd.url)
        if url.scheme in ('http', 'https', 'ftp'):
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            _, PFILE = tempfile.mkstemp()
            content_length = shell.call(
                """curl -sLI %s|awk '/[cC]ontent-[lL]ength/{print $NF}'""" %
                cmd.url).splitlines()[-1]
            total = _getRealSize(content_length)

            def _getProgress(synced):
                logger.debug(
                    "getProgress in ceph-bs-agent, synced: %s, total: %s" %
                    (synced, total))
                last = linux.tail_1(PFILE).strip()
                if not last or len(last.split(
                )) < 1 or 'HTTP request sent, awaiting response' in last:
                    return synced
                logger.debug("last synced: %s" % last)
                written = _getRealSize(last.split()[0])
                if total > 0 and synced < written:
                    synced = written
                    if synced < total:
                        percent = int(round(float(synced) / float(total) * 90))
                        report.progress_report(percent, "report")
                return synced

            logger.debug("content-length is: %s" % total)

            _, _, err = bash_progress_1(
                'set -o pipefail;wget --no-check-certificate -O - %s 2>%s| rbd import --image-format 2 - %s/%s'
                % (cmd.url, PFILE, pool, tmp_image_name), _getProgress)
            if err:
                raise err
            actual_size = linux.get_file_size_by_http_head(cmd.url)

            if os.path.exists(PFILE):
                os.remove(PFILE)

        elif url.scheme == 'sftp':
            port = (url.port, 22)[url.port is None]
            _, PFILE = tempfile.mkstemp()
            pipe_path = PFILE + "fifo"
            scp_to_pipe_cmd = "scp -P %d -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s:%s %s" % (
                port, url.username, url.hostname, url.path, pipe_path)
            sftp_command = "sftp -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o BatchMode=no -P %s -b /dev/stdin %s@%s" % (
                port, url.username, url.hostname) + " <<EOF\n%s\nEOF\n"
            if url.password is not None:
                scp_to_pipe_cmd = 'sshpass -p %s %s' % (linux.shellquote(
                    url.password), scp_to_pipe_cmd)
                sftp_command = 'sshpass -p %s %s' % (linux.shellquote(
                    url.password), sftp_command)

            actual_size = shell.call(
                sftp_command %
                ("ls -l " + url.path)).splitlines()[1].strip().split()[4]
            os.mkfifo(pipe_path)
            image_format = get_origin_format(cmd.url, True)
            cmd.url = linux.shellquote(cmd.url)
            # roll back tmp ceph file after import it
            _1()

            def _get_progress(synced):
                logger.debug("getProgress in add image")
                if not os.path.exists(PFILE):
                    return synced
                last = linux.tail_1(PFILE).strip()
                if not last or not last.isdigit():
                    return synced
                report.progress_report(int(last) * 90 / 100, "report")
                return synced

            get_content_from_pipe_cmd = "pv -s %s -n %s 2>%s" % (
                actual_size, pipe_path, PFILE)
            import_from_pipe_cmd = "rbd import --image-format 2 - %s/%s" % (
                pool, tmp_image_name)
            _, _, err = bash_progress_1(
                'set -o pipefail; %s & %s | %s' %
                (scp_to_pipe_cmd, get_content_from_pipe_cmd,
                 import_from_pipe_cmd), _get_progress)

            if os.path.exists(PFILE):
                os.remove(PFILE)

            if os.path.exists(pipe_path):
                os.remove(pipe_path)

            if err:
                raise err

        elif url.scheme == 'file':
            src_path = cmd.url.lstrip('file:')
            src_path = os.path.normpath(src_path)
            if not os.path.isfile(src_path):
                raise Exception('cannot find the file[%s]' % src_path)
            image_format = get_origin_format(src_path, True)
            # roll back tmp ceph file after import it
            _1()

            shell.check_run("rbd import --image-format 2 %s %s/%s" %
                            (src_path, pool, tmp_image_name))
            actual_size = os.path.getsize(src_path)
        else:
            raise Exception('unknown url[%s]' % cmd.url)

        file_format = shell.call(
            "set -o pipefail; qemu-img info rbd:%s/%s | grep 'file format' | cut -d ':' -f 2"
            % (pool, tmp_image_name))
        file_format = file_format.strip()
        if file_format not in ['qcow2', 'raw']:
            raise Exception('unknown image format: %s' % file_format)

        if file_format == 'qcow2':
            conf_path = None
            try:
                with open('/etc/ceph/ceph.conf', 'r') as fd:
                    conf = fd.read()
                    conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                    conf_path = linux.write_to_temp_file(conf)

                shell.check_run(
                    'qemu-img convert -f qcow2 -O rbd rbd:%s/%s rbd:%s/%s:conf=%s'
                    % (pool, tmp_image_name, pool, image_name, conf_path))
                shell.check_run('rbd rm %s/%s' % (pool, tmp_image_name))
            finally:
                if conf_path:
                    os.remove(conf_path)
        else:
            shell.check_run('rbd mv %s/%s %s/%s' %
                            (pool, tmp_image_name, pool, image_name))
        report.progress_report("100", "finish")

        @rollbackable
        def _2():
            shell.check_run('rbd rm %s/%s' % (pool, image_name))

        _2()

        o = shell.call('rbd --format json info %s/%s' % (pool, image_name))
        image_stats = jsonobject.loads(o)

        rsp.size = long(image_stats.size_)
        rsp.actualSize = actual_size
        if image_format == "qcow2":
            rsp.format = "raw"
        else:
            rsp.format = image_format

        self._set_capacity_to_response(rsp)
        return jsonobject.dumps(rsp)
Exemple #47
0
def stream_body(task, fpath, entity, boundary):
    def _progress_consumer(total):
        task.downloadedSize = total

    @thread.AsyncThread
    def _do_import(task, fpath):
        shell.call("cat %s | rbd import --image-format 2 - %s" %
                   (fpath, task.tmpPath))

    while True:
        headers = cherrypy._cpreqbody.Part.read_headers(entity.fp)
        p = CustomPart(entity.fp, headers, boundary, fpath, _progress_consumer)
        if not p.filename:
            continue

        # start consumer
        _do_import(task, fpath)
        try:
            p.process()
        except Exception as e:
            logger.warn('process image %s failed: %s' %
                        (task.imageUuid, str(e)))
            pass
        finally:
            if p.wfd is not None:
                p.wfd.close()
        break

    if task.downloadedSize != task.expectedSize:
        task.fail('incomplete upload, got %d, expect %d' %
                  (task.downloadedSize, task.expectedSize))
        shell.call('rbd rm %s' % task.tmpPath)
        return

    file_format = None

    try:
        file_format = linux.get_img_fmt('rbd:' + task.tmpPath)
    except Exception as e:
        task.fail('upload image %s failed: %s' % (task.imageUuid, str(e)))
        return

    if file_format == 'qcow2':
        if linux.qcow2_get_backing_file('rbd:' + task.tmpPath):
            task.fail('Qcow2 image %s has backing file' % task.imageUuid)
            shell.call('rbd rm %s' % task.tmpPath)
            return

        conf_path = None
        try:
            with open('/etc/ceph/ceph.conf', 'r') as fd:
                conf = fd.read()
                conf = '%s\n%s\n' % (conf, 'rbd default format = 2')
                conf_path = linux.write_to_temp_file(conf)

            shell.call(
                'qemu-img convert -f qcow2 -O rbd rbd:%s rbd:%s:conf=%s' %
                (task.tmpPath, task.dstPath, conf_path))
            shell.call('rbd rm %s' % task.tmpPath)
        finally:
            if conf_path:
                os.remove(conf_path)
    else:
        shell.call('rbd mv %s %s' % (task.tmpPath, task.dstPath))

    task.success()
Exemple #48
0
def lichbd_add_disks(monHostname):
    shell.call('ssh %s /opt/fusionstack/lich/bin/lich.node --raid_add all' %
               monHostname)
    shell.call('ssh %s /opt/fusionstack/lich/bin/lich.node --disk_add all' %
               monHostname)
Exemple #49
0
def lichbd_create_cluster(monHostnames, sshPasswords):
    if len(monHostnames) < 3:
        raise Exception('creating stor cluster needs three nodes at least')

    nodes = ''
    for monHostname in monHostnames:
        nodes = nodes + ' ' + monHostname

    shell.call(
        "echo '#hosts list for nohost mode' > /opt/fusionstack/etc/hosts.conf")
    for monHostname in monHostnames:
        hostname = shell.call('ssh %s hostname' % monHostname)
        hostname = hostname.strip()
        shell.call('echo %s %s >> /opt/fusionstack/etc/hosts.conf' %
                   (monHostname, hostname))

    fields = monHostnames[0].split('.')
    field1 = int(fields[0])
    if field1 > 0 and field1 < 127:
        net = fields[0] + '.0.0.0'
    elif field1 > 127 and field1 <= 191:
        net = fields[0] + '.' + fields[1] + '.0.0'
    elif field1 > 191 and field1 < 224:
        net = fields[0] + '.' + fields[1] + '.' + fields[2] + '.0'
    else:
        raise Exception('invalid ip')

    shell.call(
        "sed -i 's/^\s*\([0-9]\{1,3\}\).*/                %s\/24;/g' /opt/fusionstack/etc/lich.conf"
        % net)
    shell.call(
        "sed -i 's/^\s*\#nohosts on;/       nohosts on;/g' /opt/fusionstack/etc/lich.conf"
    )

    shell.call('/opt/fusionstack/lich/bin/lich prep %s -p "%s"' %
               (nodes, sshPasswords[0]))
    shell.call('/opt/fusionstack/lich/bin/lich create %s' % nodes)
Exemple #50
0
 def _1():
     shell.call(
         "iptables -D INPUT -d %s -p tcp --dport %s --syn -j DROP" %
         (to.vip, to.loadBalancerPort))
    def _start_multi_nodes(self, restart = False):
        nodes = []
        threads = []
        node1 = self.nodes[0]
        for node in self.nodes:
            #The reserved node is used by test cases. 
            if not restart and node.reserve__:
                continue

            if not node.dockerImage__:
                print 'Deploy node in hosts'
                #consider some zstack-server is running in vm, the server 
                # startup speed is slow. Increase timeout to 180s.
                if linux.is_ip_existing(node.ip_):
                    cmd = 'zstack-ctl stop; nohup zstack-ctl start'
                    thread = threading.Thread(target=shell_cmd_thread, args=(cmd, True, ))
                elif not linux.is_ip_existing(node1.ip_):
                    # when first node1 ip is not local, it usualy means woodpecker is running on hosts other than MN
                    cmd = 'zstack-ctl stop_node --host=%s ; zstack-ctl start_node --host=%s --timeout=180' % (node.ip_, node.ip_)
                    thread = threading.Thread(target=ssh.execute, args=(cmd, node1.ip_, node1.username_, node1.password_, ))
                else:
                    cmd = 'zstack-ctl stop_node --host=%s ; zstack-ctl start_node --host=%s --timeout=180' % (node.ip_, node.ip_)
                    thread = threading.Thread(target=shell_cmd_thread, args=(cmd, True, ))
                threads.append(thread)
            else:
                print 'Deploy node in docker'
                docker_node = DockerNode(self)
                docker_node.set_docker_image(node.dockerImage__)
                docker_node.set_node_ip(node.ip__)
                docker_node.prepare_node()
                nodes.append(docker_node)
                thread = threading.Thread(target=docker_node.start_node)
                threads.append(thread)

        for thread in threads:
            thread.start()

        self._wait_for_thread_completion('start management node', 200)
        time.sleep(10)

        if node_exception:
            print 'node start meets exception:'
            info1 = node_exception[0][1]
            info2 = node_exception[0][2]
            raise info1, None, info2
                
        current_time = time.time()
        #largest timeout time for multi nodes startup is 300s
        timeout_time = current_time + 300
        for node in self.nodes:
            #The reserved node is used by test cases. 
            if node.reserve__:
                continue
            new_time = time.time() 
            if new_time >= timeout_time:
                new_timeout = 1
            else:
                new_timeout = timeout_time - new_time

            if not linux.wait_callback_success(\
                    node_ops.is_management_node_start, \
                    node.ip_, timeout=new_timeout, interval=0.5):
                raise ActionError('multi node does not startup on host: %s' \
                        % node.ip_)

        zstack_home = '%s/apache-tomcat/webapps/zstack/' % self.install_path
        cmd = 'zstack-ctl setenv ZSTACK_HOME=%s' % zstack_home
        shell.call(cmd)
 def _configure_bridge(self):
     shell.call('modprobe br_netfilter || true')
     shell.call('echo 1 > /proc/sys/net/bridge/bridge-nf-call-iptables')
     shell.call('echo 1 > /proc/sys/net/bridge/bridge-nf-filter-vlan-tagged')
     shell.call('echo 1 > /proc/sys/net/ipv4/conf/default/forwarding')
    def _extra_deployment(self):
        if not os.path.exists(EXTRA_DEPLOY_SCRIPT):
            return

        shell.call('%s %s' % (EXTRA_DEPLOY_SCRIPT, self.catalina_home))
        print('Extra deployment by %s' % EXTRA_DEPLOY_SCRIPT)
Exemple #54
0
 def delete_image(_):
     # in case image is deleted, we don't have to wait for timeout
     img = "%s/%s" % (pool, image_name)
     shell.call('rbd info %s && rbd rm %s' % (img, img))
     return True
Exemple #55
0
 def _get_file_size(self, path):
     o = shell.call('rbd --format json info %s' % path)
     o = jsonobject.loads(o)
     return long(o.size_)
Exemple #56
0
    def _refresh(self, to):
        conf = '''global
    maxconn {{maxConnection}}
    log 127.0.0.1 local1
    user haproxy
    group haproxy
    daemon

listen {{listenerUuid}}
    mode {{mode}}
    timeout client {{connectionIdleTimeout}}s
    timeout server {{connectionIdleTimeout}}s
    timeout connect 60s
    balance {{balancerAlgorithm}}
    bind {{vip}}:{{loadBalancerPort}}
    {% for ip in nicIps %}
    server nic-{{ip}} {{ip}}:{{loadBalancerPort}} check port {{checkPort}} inter {{healthCheckInterval}}s rise {{healthyThreshold}} fall {{unhealthyThreshold}}
    {% endfor %}
'''

        pid_file = self._make_pid_file_path(to.lbUuid, to.listenerUuid)
        if not os.path.exists(pid_file):
            shell.call('touch %s' % pid_file)

        @rollbackable
        def _0():
            shell.call('rm -f %s' % pid_file)

        _0()

        conf_file = self._make_conf_file_path(to.lbUuid, to.listenerUuid)

        context = {}
        context.update(to.__dict__)
        for p in to.parameters:
            k, v = p.split('::')
            if k == 'healthCheckTarget':
                check_method, check_port = v.split(':')
                if check_port == 'default':
                    context['checkPort'] = to.instancePort
                else:
                    context['checkPort'] = check_port

            context[k] = v

        conf_tmpt = Template(conf)
        conf = conf_tmpt.render(context)
        with open(conf_file, 'w') as fd:
            fd.write(conf)

        shell.call(
            "iptables -I INPUT -d %s -p tcp --dport %s --syn -j DROP && sleep 0.5"
            % (to.vip, to.loadBalancerPort))

        @rollbackable
        def _1():
            shell.call(
                "iptables -D INPUT -d %s -p tcp --dport %s --syn -j DROP" %
                (to.vip, to.loadBalancerPort))

        _1()

        shell.call('haproxy -D -f %s -p %s -sf $(cat %s)' %
                   (conf_file, pid_file, pid_file))
        shell.call("iptables -D INPUT -d %s -p tcp --dport %s --syn -j DROP" %
                   (to.vip, to.loadBalancerPort))

        ipt = iptables.from_iptables_save()
        chain_name = self._make_chain_name(to)
        ipt.add_rule('-A INPUT -d %s/32 -j %s' % (to.vip, chain_name))
        ipt.add_rule('-A %s -p tcp -m tcp --dport %s -j ACCEPT' %
                     (chain_name, to.loadBalancerPort))
        ipt.iptable_restore()
Exemple #57
0
 def do_deletion():
     shell.call('rbd rm %s' % path)
Exemple #58
0
                'When you create a stor cluster, the password of root user must be the same with all nodes.'
            )

    for monHostname in monHostnames:
        try:
            socket.inet_aton(monHostname)
        except Exception, e:
            raise Exception(
                'Invalid IP address, now only support IP, DNS will be supported in the future'
            )

    nodes = ''
    for monHostname in monHostnames:
        nodes = nodes + ' ' + monHostname

    shell.call('/opt/fusionstack/lich/bin/lich sshkey %s -p "%s"' %
               (nodes, sshPasswords[0]))

    if os.path.exists('/opt/fusionstack/etc/cluster.conf'):
        fusionstorIsReady = True
    else:
        for monHostname in monHostnames:
            return_code = shell.run(
                'ssh %s ls /opt/fusionstack/etc/cluster.conf' % monHostname)
            if return_code == 0:
                fusionstorIsReady = True
                break

    return fusionstorIsReady


def lichbd_check_node_in_cluster(fusionstorIsReady=False, monHostnames=None):
Exemple #59
0
 def _0():
     tpath = "%s/%s" % (pool, tmp_image_name)
     shell.call('rbd info %s > /dev/null && rbd rm %s' % (tpath, tpath))
Exemple #60
0
 def _1():
     shell.call('rbd rm %s/%s' % (pool, tmp_image_name))