def update_db(): if os.path.exists("/usr/local/udsafe/automatedkvm.txt"): with open('/usr/local/udsafe/automatedkvm.txt', 'r') as f: ip = f.read() model.update_or_create('kvm_ip', ip) push_bigdata_db() return json.dumps({"code": 0, 'message': 'Successful'})
def db_update_panaocs(): """ Write the assembled panaocs data to SQLite and return data :return: """ node = GetModel.get_node() # File last data before overwriting data with open('/usr/local/udsafe/parent_bigdata_info', 'w') as f: f.write(json.dumps(node)) try: max_node_ip = max(int(i["network_ip"].split('.')[-1]) for i in node) except Exception: max_node_ip = max(int(i["network_ip"].split('.')[-1]) for i in node['node']) panaocs_data = { "zfs_pool": "bigdata", "repo_ip": str(max_node_ip + 1), "netmask": node[0]['netmask'], "start_ip": str(max_node_ip + 1 + 5), "link": "br0", "panacube_ip": str(max_node_ip + 2), "panacube_nic_name": "eth2", "network_segment": '.'.join(node[0]['network_ip'].split('.')[:-1]) + '.*' } current_app.logger.info(panaocs_data) model.update_or_create('panaocs', panaocs_data) current_app.logger.info('大数据配置写入文件') return panaocs_data
def get_or_set_system_info(): if request.method == 'GET': """ 字符串拼接ip=192.168.102.91 返回对应服务器的数据 """ ip = request.args.get("ip") data =get_system_info(ip) print 'cpu总数: ', GetModel.cpu_count() return json.dumps({"code": 0, "data": data}) elif request.method == 'POST': """ 设置节点信息 # _ = [{ # bigdata: 2 # cpu: 2 # ip: "192.168.66.11" # mem: 2 # netmask: "255.255.255.0" # network_ip: "192.168.56.66" # network_name: "eno3" # node-name: "node-1" # }] """ r_data = request.json current_app.logger.info("server accept node data: {}".format(r_data)) node_obj = r_data['node'] print '接受node节点数据', node_obj for item in node_obj: if isinstance(item, list): return json.dumps({"code": 1, 'message': '数据有误'}) current_app.logger.info(r_data) model.update_or_create('node', json.dumps(node_obj)) return json.dumps({"code": 0, 'message': 'Successful'})
def start_deploy(): # 数据的类型 kvm还是docker _type = request.json.get('type') if _type == 'docker': model.update_or_create('type', _type) elif _type == 'kvm': model.update_or_create('type', _type) else: return json.dumps({'code': 1, 'message': "数据类型有误"}) return json.dumps({"code": 0, 'message': 'Successful'})
def verify_ip(): """ 验证ip是否可用 :return: """ ip = request.args.get("ip") network_id = request.args.get("network_id") model.update_or_create('network_id', network_id) model.update_or_create('kvm_ip', ip) data = ClearAwCloud(GetModel.get_auth_header()).get_network_card(ip, network_id) return json.dumps({"code": 0, "data": data})
def get_nodes(): db_update_panaocs() insert_node = {} node = GetModel.get_node() count_cpu = GetModel.cpu_count() for item in node: # 将node节点数据写入数据库 insert_node[item['node-name']] = { "cpu_start": int(count_cpu) - int(item['cpu']) + 1, "mem": item['mem'], "cpu_stop": count_cpu } current_app.logger.info('更新写入数据库的node数据 >>> {}'.format(insert_node)) model.update_or_create('node', insert_node)
def warp(*args, **kwargs): is_login = fun(*args, **kwargs) if isinstance(is_login, dict) and 'code' in is_login: if is_login['code'] == '0': return is_login['data']['data'] if is_login['code'] == '00010102': pass if 'code' in is_login and is_login['code'] == '00010105': manager = AWCloudManage() manager._login() model.update_or_create('get_auth_header', manager.get_auth_header()) else: return is_login else: return is_login return is_login
def get_usb_mount(lsscsi_id): cmd = "lsscsi | grep '{id}'".format(id=lsscsi_id.strip('[]')) dev_path = get_cmd(cmd).split(' ')[-2] current_app.logger.info("get usb dev path: {}".format(dev_path)) # 如果挂在则能拿到路径、否则去挂载 mount_num = 0 u_path = get_cmd('mount | grep {}'.format(dev_path)) if u_path: current_app.logger.info({'code': 1, 'message': '该U盘已被挂载'}) current_app.logger.info('当前u盘挂在路径----{}'.format(u_path.split(' ')[2])) model.update_or_create('u_path', u_path.split(' ')[2]) current_app.logger.info('-----model U盘地址-----{}'.format(model.get_params('u_path'))) return True else: mount_num += 1 get_cmd('sudo mount {usb}1 /mnt{num}'.format(usb=dev_path, num=mount_num)) model.update_or_create('u_path', '/mnt{}'.format(mount_num)) current_app.logger.info('-----model U盘地址-----{}'.format(model.get_params('u_path'))) return True
def accept_info(): """ 获取获取2.0或者3.0的账号密码 -> GET获取外部网络信息 # { # "awcloud_ip": "", # "username": "******", # "password": "******", # "panacube_ip": 3 # } :return: """ if request.method == "GET": r_data = ClearAwCloud(GetModel.get_auth_header()).get_external_network() if 'code' in r_data: return json.dumps({'code': 1}) return json.dumps({'data': r_data, 'code': 0}) r_data = request.json model.update_or_create('panacube', r_data) return json.dumps({"code": 0, 'message': 'Successful'})
def kvm_image_upload(): """ 上传panacube镜像 需要2.0的账号密码 :return: """ id2 = ClearAwCloud(GetModel.get_auth_header()).upload_image( PathDir.image_path(None), 'panacube3.0') q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(4, 15)) }) current_app.logger.info('upload kvm image: {}'.format(id2)) model.update_or_create('panacube_img_id', id2['id']) q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(16, 32)) })
def get_system_info(ip): """获取物理机的详情""" # 字节转换GB单位 mem = int( ssh_popen_1(ip, "cat /proc/meminfo | grep MemTotal").split(':')[1].replace( ' ', '').split('kB')[0]) / 1024**2 _cpu_count = ssh_popen_1( ip, "cat /proc/cpuinfo | grep 'processor' | wc -l") # 可用的总数 available_disk = ssh_popen_1(ip, "ceph osd df | grep TOTAL") if available_disk: _a_disk = available_disk.split(' ')[1:] disk_data = [] for item in _a_disk: if item: if 'G' in item: disk_data.append(int(item.split('G')[0])) elif 'M' in item: disk_data.append(int(item.split('M')[0]) / 1024.0) data = { "cpu": "4/{}".format(_cpu_count), "mem": "8/{}".format(mem), "disk": "{}/{}".format(disk_data[1], int(disk_data[0])) } model.update_or_create('count_cpu', int(_cpu_count) - 1) model.update_or_create('count_mem', mem) model.update_or_create('count_disk', int(disk_data[0])) current_app.logger.info('-----model panaocs-----{}'.format( GetModel.cpu_count())) return data return []
def get_usb_info(): """ 获取U盘信息 :return: """ data = [] k_usb = get_cmd("lsscsi | egrep 'Kingston | SanDisk | My Passport 261B'") if not k_usb: raise ValueError('没有USB设备') # return {'code': 1, 'message': '没有找到要求的USB设备'} for item_usb in k_usb.split('\n'): k_id = item_usb.split(' ')[0].replace('[', '').split(':')[0] if os.path.exists('/proc/scsi/usb-storage/{}'.format(k_id)): r = get_cmd("cat /proc/scsi/usb-storage/{}".format(k_id)) k_to_v = ','.join(r.split('\n')).replace(' ', '') d_data = {} for i in k_to_v.split(','): d_data['id'] = item_usb.split(' ')[0] d_data[i.split(':')[0]] = i.split(':')[1] data.append(d_data) else: r = get_cmd("sdparm -i {dev} | grep 'vendor specific: My Passport'" .format(dev=item_usb.split(' ')[-2])) try: d_data = dict(SerialNumber=r.split(' ')[-2].split(' ')[-1], id=item_usb.split(' ')[0], Vendor=item_usb.split(' ')[-7] + item_usb.split(' ')[-6] + item_usb.split(' ')[-5] ) except Exception as e: raise e data.append(d_data) cmd = "lsscsi | grep '{id}'".format(id=item_usb.split(' ')[0].strip('[]')) dev_path = get_cmd(cmd).split(' ')[-2] current_app.logger.info("get dev path {}".format(dev_path)) u_path = get_cmd('mount | grep {}'.format(dev_path)) model.update_or_create('u_path', u_path.split(' ')[2]) os.system("chmod 600 {}".format(PathDir.panacube_idrsa())) return data
def get_usb(): """ 获取usb信息 :return: """ model.update_or_create('u_path', '/mnt') if request.method == "POST": # _ = { # "usb": "", # "message": "" # } data = request.json d = os.popen("which sdparm").read().strip("\n") if not d: return json.dumps({"code": 1, "message": "没有安装sdparm包 "}) else: if data.get("usb_id", None) and data.get('lsscsi_id'): if verify_usb(data.get('lsscsi_id'), data.get('usb_id')): get_usb_mount(data.get('lsscsi_id')) return json.dumps({'code': 0, 'message': 'Successful'}) return json.dumps({'code': 1, 'message': 'U盘数据验证不通过'}) return json.dumps({"code": 1, "message": "参数有误"}) return json.dumps(get_usb_info())
def get_auth_header(): data = model.get_params('get_auth_header') if data: return data from app_projects.tools.get_awcloud_img import AWCloudManage model.update_or_create('username', 'admin') model.update_or_create('password', 'P@ssw0rd') manager = AWCloudManage() manager._login() model.update_or_create('get_auth_header', manager.get_auth_header()) return GetModel.get_auth_header()
def verify_code_img(): """ 获取和验证验证码 -> 验证连通性 :return: """ if request.method == 'POST': r_data = request.json model.update_or_create('username', r_data.get('username')) model.update_or_create('password', base64.b64decode(r_data.get('password'))) manager = AWCloudManage(r_data.get('code_key'), r_data.get('code')) login_data = manager._login() if login_data['code'] != 0: return json.dumps(login_data) model.update_or_create('get_auth_header', manager.get_auth_header()) return json.dumps({'code': 0})
def create_project(self): """创建项目""" project_info = { "project": { "name": "panacube_3.0", "nameNe": "panacube_3.0", "domainUid": self.headers.get('domain_id'), "enabled": True, "description": "panacube3.0项目" }, "quotas": [ { "enterpriseUid": self.headers.get('enterprise_id'), "type": "project_quota", "name": "cores", "isShow": True, "hardLimit": CPU_QUOTA, "domainUid": self.headers.get('domain_id') }, { "enterpriseUid": self.headers.get('enterprise_id'), "type": "project_quota", "name": " ", "isShow": True, "hardLimit": MEMORY_QUOTA, # 内存大小 "domainUid": self.headers.get('domain_id') }, { "enterpriseUid": self.headers.get('enterprise_id'), "type": "project_quota", "name": "gigabytes", "isShow": True, "hardLimit": STORAGE_QUOTA, # 存储配额 "domainUid": self.headers.get('domain_id') } ] } CREATE_PROJECT_URL = "http://{}/awstack-user/v1/enterprises/66666666666666666666666666666666/projects/check?enterpriseUid=66666666666666666666666666666666".format( GetModel.awcloud_ip()) current_app.logger.info('开始创建项目') try: requests.keep_alive = False resp = requests.post(CREATE_PROJECT_URL, data=json.dumps(project_info), headers=self.headers, verify=False) except Exception as e: raise e if resp.json()['code'] == "##": re = requests.get( "http://{}/awstack-user/v1/enterprises/66666666666666666666666666666666/projects" .format(GetModel.awcloud_ip()), headers=self.headers, verify=False) for i in re.json()['data']['data']: if i['name'] == 'panacube_3.0': self.project_id = i['projectUid'] self.project_name = i['name'] elif resp.json()['code'] == '00010105': # docker重启会导致token失效 manager = AWCloudManage() login_data = manager._login() if login_data['code'] == 0: self.headers = manager.get_auth_header() model.update_or_create('get_auth_header', manager.get_auth_header()) self.create_project() time.sleep(2) else: self.project_id = resp.json( )['data']['data']['project']['projectUid'] self.project_name = resp.json()['data']['data']['project']['name']
def deploy_bigdata(): """ 上传lxc安装包,安装lxc环境 :d 网关 :y 预留集群ip数 :network_name 网卡名字 :netmask 子网掩码 :bigdata_size 存储大小 """ # 获取cpu总数 count_cpu = GetModel.cpu_count() panaocs_data = db_update_panaocs() node = GetModel.get_node() current_app.logger.info('node 节点信息 >>> {}'.format(node)) insert_node = {} deploy_path = PathDir.deploy() exclude_path = PathDir.exclude_list() q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(5, 13))}) _start = 14 _stop = 17 for item in node: # 将node节点数据写入数据库 insert_node[item['node-name']] = { "cpu_start": int(count_cpu) - int(item['cpu']) + 1, "mem": item['mem'], "cpu_stop": count_cpu } set_system_info(item['ip'], item['cpu'], item['mem']) q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))}) _start += 4 _stop += 11 namenode = ssh_popen_1(item['ip'], "hostname") install_path = '/var/deploy/install.py' if namenode == "node-1": current_app.logger.info('节点一开始移动deploy到/var目录') from app_projects.deploy.get_config import move_file move_file() # if os.path.exists('/var/deploy'): # shutil.rmtree('/var/deploy') # shutil.copytree(deploy_path, '/var/deploy') # else: # shutil.copytree(deploy_path, '/var/deploy') shutil.copy(PathDir.install(), '/var/deploy') current_app.logger.info('node-1 项目copy完成') q.put({"type": "大数据", "size": "", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))}) _start += 4 _stop += 11 else: current_app.logger.info("{node} move deploy to /var".format(node=item['node-name'])) os.system('rsync -av -e ssh {deploy} --exclude-from={exclude_path} {node_name}:/var/'.format( exclude_path=exclude_path, deploy='/var/deploy', node_name=item['node-name'] )) q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))}) _start += 4 _stop += 11 current_app.logger.info( 'rsync -av -e ssh {deploy} --exclude-from={exclude_path} {node_name}:/var/'.format( exclude_path=exclude_path, deploy='/var/deploy', node_name=item['node-name'] ) ) current_app.logger.info(item['node-name'] + '>>>文件移动完成') q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))}) _start += 4 _stop += 11 # install_cmd = 'sh /var/deploy/install.sh -d {namenode} {bigdata_size} {network_name} {ip} {netmask} {d} {y}' install_cmd = 'python {py_shell} {size} {network_name} {ip} {netmask} {geteway} {repo_ip} {pack_path}' dev_path = None if isinstance(item.get('dev_path'), list) and item.get('dev_path'): dev_path = ','.join(item.get('dev_path')) install_cmd = 'python {py_shell} {size} {network_name} {ip} {netmask} {geteway} {repo_ip} {pack_path} --disk={disk}' install_yum_or_create_network = install_cmd.format( py_shell=install_path, size=item.get('bigdata', None), network_name=item.get('network_name'), ip=item.get('network_ip'), # lxc网卡ip netmask=item.get('netmask'), disk=dev_path, geteway=panaocs_data.get('network_segment').replace('*', '1'), repo_ip=panaocs_data.get('network_segment').replace('*', str(panaocs_data.get('repo_ip'))), pack_path='/var/deploy' ) q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))}) _start += 4 _stop += 11 current_app.logger.info('deploy bigdata install params: {}'.format(install_yum_or_create_network)) ssh_popen_1(item['ip'], "\cp /var/deploy/rsync.pass /usr/local/udsafe/") ssh_popen_1(item['ip'], "\cp /var/deploy/lxc-create-bigdata.py /usr/local/udsafe/") if namenode == "node-1": os.system(install_yum_or_create_network) else: os.system(ssh_node.format(item['ip'], install_yum_or_create_network)) q.put({"type": "大数据", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))}) _start += 4 _stop += 11 r = ssh_popen_1(item['ip'], "lxc-ls") if r == "repo": current_app.logger.info(""" —————————————————————————————————————————————————— | 已经有数据了 | —————————————————————————————————————————————————— """) current_app.logger.info('更新写入数据库的node数据 >>> {}'.format(insert_node)) model.update_or_create('node', insert_node) current_app.logger.info('update node information') q.put({"type": "大数据", "size": "上传大小", "speed": "start", "percentage": "{}".format(generate_random(_start, _start))}) from app_projects.deploy.get_config import copy_template copy_template() push_bigdata_db()
def update_storage_image(): """ 更新智能存储镜像 :return: """ q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(1, 6)) }) if GetModel.deploy_type() == 'kvm': awcloud = ClearAwCloud(GetModel.get_auth_header()) q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(17, 36)) }) image_obj = awcloud.upload_image(PathDir.image_path()) print '上传的镜像信息: %s' % image_obj image_id = image_obj['id'] q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(38, 52)) }) model.update_or_create('storage_img_id', image_id) q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(57, 78)) }) f = l = 79 while True: resp = awcloud.update_image_info(image_id) f += 4 l += 10 if l >= 90: q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(f, l)) }) else: f -= 2 l -= 3 q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(f, l)) }) if resp.get('status') != 415: if resp['code'] == '0': current_app.logger.info( 'update storage image property successful') else: current_app.logger.info( 'update storage image property fail') break else: time.sleep(2) os.system("sh {set_tag} {openstack} {id}".format( set_tag=PathDir.tag_shell_path(), openstack=PathDir.openstack(), id=image_id)) else: awcloud = ClearAwCloud(GetModel.get_auth_header()) q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "23" }) model.update_or_create('awcloud_ip', GetModel.awcloud_ip()) q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "48" }) image_id = awcloud.upload_image(PathDir.image_path()).get('id') q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(56, 88)) }) model.update_or_create('storage_img_id', image_id) while True: resp = awcloud.update_image_info(image_id) if resp.get('status') != 415: if resp['code'] == '0': current_app.logger.info( 'update storage image property successful') else: current_app.logger.info( 'update storage image property fail') break else: time.sleep(2) os.system("sh {set_tag} {openstack} {id}".format( set_tag=PathDir.tag_shell_path(), openstack=PathDir.openstack(), id=image_id)) from app_projects.deploy.get_config import move_storage_code storage_code_name = move_storage_code() node_list = os.popen("consul members | awk '{ print $1 }'").read().split( '\n')[1:-1] for item in node_list: os.system("scp -r /var/deploy/%s %s:/usr/local/udsafe/%s" % (storage_code_name, item, storage_code_name)) q.put({ "type": "镜像上传", "size": "上传大小", "speed": "conducting", "percentage": "{}".format(generate_random(89, 96)) })