コード例 #1
0
ファイル: create_cloud.py プロジェクト: LDoctor/flask_auto
def get_or_set_system_info():
    if request.method == 'GET':
        """
            字符串拼接ip=192.168.102.91
            返回对应服务器的数据
        """
        ip = request.args.get("ip")
        data =get_system_info(ip)
        print 'cpu总数: ', GetModel.cpu_count()
        return json.dumps({"code": 0, "data": data})

    elif request.method == 'POST':
        """
        设置节点信息
        # _ = [{
        #     bigdata: 2
        #     cpu: 2
        #     ip: "192.168.66.11"
        #     mem: 2
        #     netmask: "255.255.255.0"
        #     network_ip: "192.168.56.66"
        #     network_name: "eno3"
        #     node-name: "node-1"
        # }]
        """
        r_data = request.json
        current_app.logger.info("server accept node data: {}".format(r_data))
        node_obj = r_data['node']
        print '接受node节点数据', node_obj
        for item in node_obj:
            if isinstance(item, list):
                return json.dumps({"code": 1, 'message': '数据有误'})
        current_app.logger.info(r_data)
        model.update_or_create('node', json.dumps(node_obj))
        return json.dumps({"code": 0, 'message': 'Successful'})
コード例 #2
0
def set_system_info(ip, cpu, mem):
    """设置nova配置文件 cpu和内存"""
    vcpu_pin_set = '4-{}'.format(GetModel.cpu_count() - cpu)
    mem_set = (mem + 8) * 1024
    out_cpu = ssh_popen_1(ip, "cat /etc/nova/nova.conf | grep vcpu_pin_set")
    out_mem = ssh_popen_1(
        ip, "cat /etc/nova/nova.conf | grep reserved_host_memory_mb")
    if out_cpu:
        ssh_popen_1(
            ip, "sed -i 's/{}/vcpu_pin_set = {}/' {file}".format(
                out_cpu, vcpu_pin_set, file="/etc/nova/nova.conf"))
    else:
        ssh_popen_1(
            ip, "echo vcpu_pin_set = {} >> {file}".format(
                vcpu_pin_set, file="/etc/nova/nova.conf"))
    # 设置内存  单位为M
    if out_mem:
        ssh_popen_1(
            ip, "sed -i 's/{}/reserved_host_memory_mb = {}/' {file}".format(
                out_mem, mem_set, file="/etc/nova/nova.conf"))
    else:
        ssh_popen_1(
            ip, "echo reserved_host_memory_mb = {} >> {file}".format(
                mem_set, file="/etc/nova/nova.conf"))
    current_app.logger.info('设置nova配置完成')
コード例 #3
0
def get_system_info(ip):
    """获取物理机的详情"""
    # 字节转换GB单位
    mem = int(
        ssh_popen_1(ip,
                    "cat /proc/meminfo | grep MemTotal").split(':')[1].replace(
                        ' ', '').split('kB')[0]) / 1024**2
    _cpu_count = ssh_popen_1(
        ip, "cat /proc/cpuinfo | grep 'processor' | wc -l")  # 可用的总数
    available_disk = ssh_popen_1(ip, "ceph osd df | grep TOTAL")
    if available_disk:
        _a_disk = available_disk.split(' ')[1:]
        disk_data = []
        for item in _a_disk:
            if item:
                if 'G' in item:
                    disk_data.append(int(item.split('G')[0]))
                elif 'M' in item:
                    disk_data.append(int(item.split('M')[0]) / 1024.0)

        data = {
            "cpu": "4/{}".format(_cpu_count),
            "mem": "8/{}".format(mem),
            "disk": "{}/{}".format(disk_data[1], int(disk_data[0]))
        }

        model.update_or_create('count_cpu', int(_cpu_count) - 1)
        model.update_or_create('count_mem', mem)
        model.update_or_create('count_disk', int(disk_data[0]))
        current_app.logger.info('-----model panaocs-----{}'.format(
            GetModel.cpu_count()))
        return data
    return []
コード例 #4
0
def get_nodes():
    db_update_panaocs()
    insert_node = {}
    node = GetModel.get_node()
    count_cpu = GetModel.cpu_count()
    for item in node:
        # 将node节点数据写入数据库
        insert_node[item['node-name']] = {
            "cpu_start": int(count_cpu) - int(item['cpu']) + 1,
            "mem": item['mem'],
            "cpu_stop": count_cpu
        }
    current_app.logger.info('更新写入数据库的node数据 >>> {}'.format(insert_node))
    model.update_or_create('node', insert_node)
コード例 #5
0
def deploy_bigdata():
    """
    上传lxc安装包,安装lxc环境
    :d  网关
    :y  预留集群ip数
    :network_name 网卡名字
    :netmask      子网掩码
    :bigdata_size 存储大小
    """
    # 获取cpu总数
    count_cpu = GetModel.cpu_count()
    panaocs_data = db_update_panaocs()
    node = GetModel.get_node()
    current_app.logger.info('node 节点信息 >>> {}'.format(node))
    insert_node = {}
    deploy_path = PathDir.deploy()
    exclude_path = PathDir.exclude_list()
    q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(5, 13))})
    _start = 14
    _stop = 17
    for item in node:
        # 将node节点数据写入数据库
        insert_node[item['node-name']] = {
            "cpu_start": int(count_cpu) - int(item['cpu']) + 1,
            "mem": item['mem'],
            "cpu_stop": count_cpu
        }
        set_system_info(item['ip'], item['cpu'], item['mem'])
        q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))})
        _start += 4
        _stop += 11

        namenode = ssh_popen_1(item['ip'], "hostname")
        install_path = '/var/deploy/install.py'
        if namenode == "node-1":
            current_app.logger.info('节点一开始移动deploy到/var目录')
            from app_projects.deploy.get_config import move_file
            move_file()
            # if os.path.exists('/var/deploy'):
            #     shutil.rmtree('/var/deploy')
                # shutil.copytree(deploy_path, '/var/deploy')
            # else:
                # shutil.copytree(deploy_path, '/var/deploy')
            shutil.copy(PathDir.install(), '/var/deploy')
            current_app.logger.info('node-1 项目copy完成')

            q.put({"type": "大数据", "size": "", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))})
            _start += 4
            _stop += 11
        else:
            current_app.logger.info("{node} move deploy to /var".format(node=item['node-name']))
            os.system('rsync -av -e ssh  {deploy} --exclude-from={exclude_path} {node_name}:/var/'.format(
                exclude_path=exclude_path, deploy='/var/deploy', node_name=item['node-name']
            ))
            q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))})
            _start += 4
            _stop += 11

            current_app.logger.info(
                'rsync -av -e ssh  {deploy} --exclude-from={exclude_path} {node_name}:/var/'.format(
                    exclude_path=exclude_path, deploy='/var/deploy', node_name=item['node-name']
                )
            )
            current_app.logger.info(item['node-name'] + '>>>文件移动完成')

        q.put({"type": "大数据", "size": "上传大小", "speed": "start",
               "percentage": "{}".format(generate_random(_start, _start))})
        _start += 4
        _stop += 11

        # install_cmd = 'sh /var/deploy/install.sh -d {namenode} {bigdata_size} {network_name} {ip} {netmask} {d} {y}'
        install_cmd = 'python {py_shell} {size} {network_name} {ip} {netmask} {geteway} {repo_ip} {pack_path}'
        dev_path = None
        if isinstance(item.get('dev_path'), list) and item.get('dev_path'):
            dev_path = ','.join(item.get('dev_path'))
            install_cmd = 'python {py_shell} {size} {network_name} {ip} {netmask} {geteway} {repo_ip} {pack_path} --disk={disk}'
        install_yum_or_create_network = install_cmd.format(
            py_shell=install_path,
            size=item.get('bigdata', None),
            network_name=item.get('network_name'),
            ip=item.get('network_ip'),  # lxc网卡ip
            netmask=item.get('netmask'),
            disk=dev_path,
            geteway=panaocs_data.get('network_segment').replace('*', '1'),
            repo_ip=panaocs_data.get('network_segment').replace('*', str(panaocs_data.get('repo_ip'))),
            pack_path='/var/deploy'
        )

        q.put({"type": "大数据", "size": "上传大小", "speed": "start",
               "percentage": "{}".format(generate_random(_start, _start))})
        _start += 4
        _stop += 11

        current_app.logger.info('deploy bigdata install params: {}'.format(install_yum_or_create_network))
        ssh_popen_1(item['ip'], "\cp /var/deploy/rsync.pass /usr/local/udsafe/")
        ssh_popen_1(item['ip'], "\cp /var/deploy/lxc-create-bigdata.py /usr/local/udsafe/")
        if namenode == "node-1":
            os.system(install_yum_or_create_network)
        else:
            os.system(ssh_node.format(item['ip'], install_yum_or_create_network))

        q.put({"type": "大数据", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))})
        _start += 4
        _stop += 11

        r = ssh_popen_1(item['ip'], "lxc-ls")
        if r == "repo":
            current_app.logger.info("""
                    ——————————————————————————————————————————————————
                    |                   已经有数据了                   |
                    ——————————————————————————————————————————————————
                """)
    current_app.logger.info('更新写入数据库的node数据 >>> {}'.format(insert_node))
    model.update_or_create('node', insert_node)
    current_app.logger.info('update node information')
    q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))})
    from app_projects.deploy.get_config import copy_template
    copy_template()
    push_bigdata_db()