def delete_tenant(self, tenant_id, internal=False): tenant = yield identify.get_tenant_by_id(tenant_id) if not tenant: raise TenantNotExist if internal: try: for name in identify.get_quota_names(): q = yield identify.get_quota(tenant_id, name) if q and q.get("quota_used") != 0: region = yield rg.list_region( CONF.keystone.region_name) raise TenantDeleteFailed(region[0].get("displayname")) yield identify.delete_quotas(tenant_id) yield delete_tnr_from_ecloud(tenant_id) except Exception as e: LOG.error("delete tenant error:%s" % e) LOG.error(trace()) raise e else: regions = yield rg.list_region() for region in regions: if region["region"] == CONF.keystone.region_name: for name in identify.get_quota_names(): q = yield identify.get_quota(tenant_id, name) if q.get("quota_used") != 0: raise TenantDeleteFailed() continue servers_url = "%s/tenant/%s?internal=true" % \ (region['url'], tenant_id) try: res = yield async_request(url=servers_url, token=get_token(self.request), body=None, method="DELETE") if res and res.get("success") is False and res.get( "msg") == TenantDeleteFailed.msg: raise TenantDeleteFailed except Exception as e: LOG.error( "delete tenant quota from another region error:%s" % e) LOG.error(trace()) if e.message == TenantDeleteFailed.msg: raise TenantDeleteFailed(args=[region['displayname']]) raise rg.RegionException(args=[region['displayname']]) try: yield identify.delete_quotas(tenant_id) yield delete_tnr_from_ecloud(tenant_id) yield identify.delete_tenant(tenant_id) except Exception as e: LOG.error("delete tenant error:%s" % e) LOG.error(trace()) region = yield rg.list_region(CONF.keystone.region_name) raise TenantDeleteFailed(region[0].get("displayname")) optLog.write(self.request, Type.TENANT, tenant['name'], Operator.DELETE, '') self.response(Response())
def post(self, chunk): try: keys = self.get_key_params(self.request.path) if keys[0] == 'create_instance': seq = yield sequence.number_seq('CL', '') rs = template.create_instance(self.request, keys[1], keys[2], 'CL-%s' % seq) self.write(json.dumps(rs)) if keys[0] == 'delete_instance': app_service.do_delete_app(keys[1]) self.write('{"result":"ok"}') if keys[0] == 'delete_stack': stack_util.delete_stack(keys[1]) self.write('{"result":"ok"}') if keys[0] == 'send_message': self.log.debug('send_message : \n %s' % self.request.body) msg = json.loads(self.request.body) send_message(self.log, msg) self.write('{"result":"ok"}') if keys[0] == 'create_vm': tmp = json.loads(self.request.body) rs = yield _create(tmp) self.log.debug(rs) self.write(json.dumps(rs)) except: self.log.error(generals.trace()) self.write('{"result":"error"}')
def execute(self): try: if not self.executed: self.executed=True self.log.debug('params:') self.log.debug(pyaml.dumps(self.params)) script_content=self.params['execute_script_content'] script_params=self.params['script_params'] if 'info_token' in self.params: self.info_token=self.params['info_token'] rs=yield self.get_stack_id_list() self.log.debug('stack id: %s'%str(rs)) for stack_id in [_['stack_id'] for _ in rs]: temp=yield get_stack_resources(stack_id) self.command_params=self.command_params+temp roles=yield get_roles(stack_id) self.roles=self.roles+roles # 根据脚本参数判断发送的目标 self.filter_by_group(script_params) self.filter_by_ip(script_params) self.log.debug(self.command_params) self.ips=[_['ip'] for _ in self.command_params] for command_p in self.command_params: self._send_msg(command_p,script_content,script_params) except: self.log.error(generals.trace())
def execute_action(self): # 流程的起点,可能会有多个 action_tuple = self.parser.get_streams(self.action_name) origin = self.parser.get_origin_streamlet(action_tuple) action_type = self.parser.get_action_type(action_tuple) self.log.debug('action type : %s' % action_type) r = redis_tool.get_it() try: self.log.debug(len(origin)) for node_id in origin: yield self.execute_multi_streamlet(node_id, action_tuple) if action_type == 'deploy': self.update_status('normal') except Exception as e: self.log.error('stream execute error :\n %s' % generals.trace()) self.interrupt = True if action_type == 'deploy': self.update_status('failure') """ 使用redis来保证一致性。由于监控的线程是并行在多条的进程中的, 而且它只会把状态报告给本进程。 所以需要保证所以进程中的监控线程都能够拿到信息…… """ r.set("manage_error_$_%s" % self.serial, json.dumps({ 'action': self.action_name, 'msg': str(e) })) raise e
def update_app(self, app_serial, body): try: app_name = body['name'] description = body['description'] sql = ("UPDATE manor.manor_app_instance " "SET app_name='%s',app_description='%s' " "WHERE app_serial='%s'") % (app_name, description, app_serial) execute(sql) vms = yield list_app_resources(app_serial) for vm in vms: yield update_vm_display_name(vm['vm_id'], app_name) rs = yield DBUtil().query( ("select * from manor.manor_app_instance " "where app_serial='%s'") % app_serial) seq = rs[0]['app_id'] optLog.write(self.request, optLog.Type.APP_INSTANCE, seq, Operator.UPDATE, '%s' % app_name) self.response(generals.gen_response({'result': 'ok'})) except Exception as detail: logging.getLogger('manor').error(generals.trace()) raise detail
def get_state_count(): url='wss://%s:8443/manor/socket/app/status'%generals.get_ip() request=HTTPRequest(url=url,validate_cert=False) conn=yield websocket_connect(request) try: normal=0 failure=0 apps=yield DBUtil().query("SELECT * FROM manor.manor_app_instance") for app in apps: if app['state']=='normal': print app conn.write_message(json.dumps({ "app_serial":app['app_serial'] })) msg=yield conn.read_message() status=json.loads(msg)['status'] while status=='working': yield gen.sleep(1) msg=yield conn.read_message() status=json.loads(msg)['status'] if status in ['normal','part','offline']: normal+=1 else: failure+=1 else: failure+=1 conn.close() logging.getLogger('manor').debug({'normal':normal,'failure':failure}) except: logging.getLogger('manor').error(generals.trace()) conn.close() raise gen.Return({'normal':normal,'failure':failure})
def check_finish(self): """ 注意,此方法运行在一个线程中,每秒会执行一次。 """ try: self.log.debug('create_nodes step. check finish. stack_id %s'% self.stack_id) if self.stack_id is None: return False if self.stack_status!=CREATING_FLAG: if self.stack_status==SUCCESS_FLAG: if len(self.created_resources)==0: self.calculate_created_resources() if len(self.ips)==0: self.ips=[_['ip'] for _ in self.created_resources] checked=[_ for _ in self.ips if _ in self.__get_road_map()] self.log.debug('%s - %s'%(self.ips,checked)) if len(self.ips)>0 and self.ips==checked: return True else: return False else: self.get_stack_status() else: self.log.debug('the stack stack_status is %s'%self.stack_status) self.get_stack_status() return False except: self.log.error(generals.trace()) raise Exception('error.manor.stream.check.create.node.finish')
def decrypt(self, text): try: cryptor = AES.new(self.key, self.mode, self.iv) plain_text = cryptor.decrypt(a2b_hex(text)) return plain_text.rstrip('\0') except Exception as e: LOG.error("decrypt error:%s" % e) LOG.error(trace()) raise e
def __get_secret(): try: with open(LICENSE_PATH, "r") as f: export = f.read() return export except Exception as e: LOG.error("__get_secret error:%s" % e) LOG.error(trace()) raise e
def _d(sql, second_p=None): con = None try: con = get_connection() fn(sql, second_p, _con=con) except: logging.getLogger('manor').error(generals.trace()) finally: if con: con.close()
def get_hardinfo(self, export): try: # export = self.get_secret() context = self.decrypt(export) out_put = eval(context) return out_put except Exception as e: LOG.error("get_hardinfo error: %s" % e) LOG.error(trace()) raise LicenseAnalysisError
def message_sending(self): while True: time.sleep(0.5) if self.msg != self.current_msg: self.msg = self.current_msg send_msg = json.loads(self.msg) info = list_app_resources(self.serial) while not info.done(): pass send_msg['info'] = info.result() send_msg = json.dumps(send_msg) clients = [] for c in _clients.values(): if self.serial in set(c['serial']): c['client'].write_message(send_msg) clients.append(1) if len([ _ for _ in _clients.values() if self.serial in _['serial'] ]) == 0: self.status = 'interrupt' # 这段代码的位置很重要。 if self.status == 'interrupt': del _monitors[self.serial] break if self.status == 'building': def do_result(rows): if len(rows) > 0: self.status = rows[0]['state'] self.log.debug('check state from db : %s' % self.status) execute_query(("SELECT * FROM manor.manor_app_instance " "where app_serial='%s'") % self.serial, do_result) if self.status == 'failure': try: r = redis_tool.get_it() error = r.get('manage_error_$_%s' % self.serial) if error is None: error = '""' error = json.loads(error) self.current_msg = '{"serial":"%s","status":"failure","msg":[],"error":%s}' % ( self.serial, json.dumps(error)) thread.start_new(self.delete_error_msg, (r, )) except: self.log.error(generals.trace()) self.status = 'interrupt' if self.status == 'thread_error': self.current_msg = '{"serial":"%s","status":"error","msg":[]}' % self.serial self.status = 'interrupt'
def execute(self): if not self.executed: self.executed=True # todo: check input parameters... self.log.debug('params:') self.log.debug(self.params) data_module={ 'name':'create node', 'resources':{}, 'group_name':self.get_resource('group_name') } self.log.debug('calculate data module ..') try: if self.get_resource('group_name')=='': raise Exception('group name is empty.') if self.get_resource('max')!='': _max=int(self.get_resource('max')) group_name=self.get_resource('group_name') rs=yield list_app_resources(self.serial) rs=[_ for _ in rs if _['group_name']==group_name] if len(rs)>=_max: raise Exception('manor.create.node.upper.limited') os_name=yield download_path(self.get_resource('image')) data_module['resources'][self.get_resource('group_name')]={ "count":self.get_resource('amount'), "group_name":self.get_resource('group_name'), "image":self.get_resource('image'), 'flavor':self.get_resource('flavors'), "memory":self.get_resource('memory'), "cores":self.get_resource('cores'), 'tenant':self.get_resource('tenant'), 'size':self.get_resource('disk_capacity'), "os":os_name, "network":[ { "network":self.get_resource('network'), "subnet":self.get_resource('subnet') } ] } self.log.debug(data_module) self.stack_id=yield stack_util.create_action(data_module, self.serial) except Exception as e: self.log.error(generals.trace()) raise e
def get_pri_key(self, pri_key): try: if len(pri_key) <= 16: pri_key = pri_key + (16 - len(pri_key)) * "\0" else: pri_key = pri_key[0:16] return pri_key except Exception as e: LOG.error("get_pri_key error:%s" % e) LOG.error(trace()) raise e
def __check_license_type(private_key, export): try: license_obj = License(private_key) context = license_obj.get_hardinfo(export) # 校验license格式 context_exists = check_license_context(context) if context_exists is False: raise LicenseNotStandard except Exception as e: LOG.error("__check_license_type error:%s" % e) LOG.error(trace()) raise e
def execute_multi_streamlet(self, node_id, action_tuple): """ 注意事项: *:保证下一步执行的时候,上一步的所有实例已经创建出来.否则,在获取上一步操作的时候会出错 *:由于步骤的执行是异步的,所以需要保证步骤不会被重复的调用.因为递归算法本质上会遍历所有 的结点.此外由于我们算法的特殊性,结点一定会被重复的执行,因此必须进行控制. *:算法的特点是需要保证前面的步骤执行结束才进入后面的步骤. :param node_id: 结点的ID.流程结点.其名称与ID是一致的. :param action_tuple: 获取流程数据的一种中间数据结构. """ try: self.log.debug('prepare to EXECUTE streamlet : %s' % node_id) p_s_list = self.parser.get_previous_streamlet( node_id, action_tuple) for p_node_name in p_s_list: if p_node_name != 'start': self.log.debug('check previous node : %s' % p_node_name) params = self.parser.get_streamlet_params( p_node_name, action_tuple) p_s = self.get_instance(params, p_node_name, self.serial) if p_s['status'] != 'executed': p_s["status"] = 'executed' yield self.run_streamlet(p_s["instance"]) self.log.debug('waiting previous node : %s' % p_node_name) yield self.check_finish_streamlet(p_s['instance']) self.log.debug('previous step %s finished ' % p_node_name) params = self.parser.get_streamlet_params(node_id, action_tuple) self.log.debug('get streamlet instance ..') streamlet = self.get_instance(params, node_id, self.serial) self.log.debug('streamlet is %s' % streamlet) # 防止已经执行过的结点重复执行 if streamlet["status"] != 'executed': # 先设置属性再执行,防止出现并发问题. streamlet["status"] = 'executed' yield self.run_streamlet(streamlet["instance"]) # 保证最后一步也要执行完成,才结束流程. if self.parser.is_last(node_id, action_tuple): yield self.check_finish_streamlet(streamlet["instance"]) # self.update_status('normal') next_nodes = self.parser.get_next_streamlet(node_id, action_tuple) for next_node in next_nodes: self.log.debug('NEXT streamlet (node_id) is %s' % next_node) yield self.execute_multi_streamlet(next_node, action_tuple) except Exception as e: self.log.error(generals.trace()) raise e
def get_pub_key(self): try: # return "0a0a1a03" + (32 - len("0a0a1a03")) * "\0" status, hostid = commands.getstatusoutput("hostid") if status == 0: res = divmod(len(hostid), 32) pub_key = hostid + (32 - res[1]) * "\0" return pub_key except Exception as e: LOG.error("get_pub_key error:%s" % e) LOG.error(trace()) raise e
def license_exists(): """ 授权文件是否存在 :return: """ try: if os.path.exists(LICENSE_PATH): return True else: return False except Exception as e: LOG.error("check license exists error:%s" % e) LOG.error(trace()) raise e
def list_app_template_detail(self): rs = [] try: app_path = cfgutils.getval('app', 'template_path') self.log.debug('get templates path: %s' % app_path) for name in os.listdir(app_path): if not os.path.isdir(app_path + '/' + name) and name != '.yaml': rs.append(load_template(app_path, name)) self.log.debug('get templates: %s' % json.dumps(rs)) rs = generals.gen_response(rs) except: self.log.error(generals.trace()) self.response(rs)
def create_action(tmp, serial): try: log_manor().debug(tmp) group_name = tmp['group_name'] stack_id = yield create_stack(tmp['resources'][group_name], serial, group_name) execute( ("INSERT INTO manor.manor_stacks (stack_id,app_serial,group_name)" " VALUES (%s,%s,%s)"), (stack_id, serial, group_name)) except Exception as e: log_manor().error(generals.trace()) raise e raise gen.Return(stack_id)
def check_finish(self): try: checked=[_ for _ in self.ips if _ in self.__get_road_map()] self.log.debug('check finish: %s - %s'%(checked,self.ips)) if len(self.ips)>0 and self.ips==checked: for x in range(10): self.log.debug('finish count down:%s'%x) time.sleep(1) self.log.debug('finished ...') return True return False except: self.log.error(generals.trace()) return False
def run_streamlet(self, instance): self.log.debug('execute streamlet %s start ...' % instance.node_id) future = instance.execute() self.log.debug(future) while True: self.log.debug(future.done()) if future.done(): try: future.result() break except Exception as e: self.log.error(generals.trace()) raise e time.sleep(1)
def on_message(self, message): try: self.log.debug(_monitors) msg = json.loads(message) self.log.debug('receive message:%s' % msg) app_serial = msg['app_serial'] _clients[self.id]['serial'].append(app_serial) self.log.debug(app_serial) self.log.debug(_monitors.keys()) if app_serial not in _monitors.keys(): _monitors[app_serial] = Monitor(app_serial) self.write_message(_monitors[app_serial].msg) except: msg = generals.trace() self.log.error(msg) self.write_message('error')
def get_instance(self, params, node_id, serial): self.log.debug('step %s instances %s' % (node_id, self._streamlet_instance)) try: if node_id not in self._streamlet_instance: module_name = 'manor.streamlet.' + node_id.split('$')[0] self.log.debug('import %s' % module_name) module = importlib.import_module(module_name) self.log.debug('load module %s' % module) self._streamlet_instance[node_id] = { "instance": module.get_instance(params, node_id, serial), "status": 'init' } except Exception as e: self.log.error(generals.trace()) raise e return self._streamlet_instance[node_id]
def pri_key_get(): """ 获取私钥 :return: """ try: if os.path.exists(PRIVATE_KEY_PATH): with open(PRIVATE_KEY_PATH, 'r') as fileopt: pri_key = fileopt.read() if pri_key: return pri_key else: return None else: return None except Exception as e: LOG.error("get license pri key error:%s" % e) LOG.error(trace()) raise LicenseAnalysisError
def check_finish_streamlet(self, streamlet): self.log.debug('check streamlet state .. : %s ' % streamlet.node_id) try: timer = 0 while True: timer += 1 self.log.debug('time out: %s ...' % timer) if self.interrupt: break if timer >= int(C.TIME_OUT): self.log.error('streamlet running timeout.') raise Exception('manor.error.execute.time.out') if streamlet.check_finish(): self.log.info('streamlet %s finished!' % streamlet.node_id) break time.sleep(1) except Exception as e: self.log.error(generals.trace()) raise e
def hostid_get(): """ 获取主机标识 :return: """ try: LOG.debug( "==========================enter hostid_get=============================" ) status, hostid = commands.getstatusoutput("hostid") LOG.debug("status=%s, output=%s", status, hostid) if status == 0: return hostid else: raise GetHostIdError except Exception as e: LOG.error("get hostid error:%s" % e) LOG.error(trace()) raise GetHostIdError
def pri_key_update(pri_key): """ 修改私钥 :param pri_key: :return: """ if len(pri_key) == 0: raise PrivateKeyNotExists # if not os.path.exists(PRIVATE_KEY_PATH): # status, output = commands.getstatusoutput("touch %s" % PRIVATE_KEY_PATH) if not os.path.exists(PRIVATE_KEY_PATH): status, output = commands.getstatusoutput("touch %s" % PRIVATE_KEY_PATH) try: with open(PRIVATE_KEY_PATH, 'wb') as fileopt: fileopt.write(pri_key) except Exception as e: LOG.error(e) LOG.error(trace()) raise e
def check_finish(self): try: t = [] for stack_id in self.stack_ids: self.get_resources_info(stack_id) t = t + [_['status'] for _ in self.stack_resources] # 首先检测机器是否已经关闭 if not self.enter_check_finish: tt = [] for stack_id in self.stack_ids: self.get_resources_info(stack_id) tt = tt + [_['status'] for _ in self.stack_resources] self.log.debug('check nodes shut down : %s' % tt) if len(self.command_params) > 0 and len( self.command_params) == len( [_ for _ in tt if _ != 'ACTIVE']): self.enter_check_finish = True for command in self.command_params: self.log.debug('send start command to %s' % command['ip']) compute_util.start_server(command['resource_id']) else: return False checked = [_ for _ in self.ips if _ in self.__get_road_map()] self.log.debug('check finish: {0:s} - {1:s}'.format( checked, self.ips)) if self.ips == checked: for x in range(10): self.log.debug('finish count down:%s' % x) time.sleep(1) self.log.debug('finished ...') return True return False except: self.log.error(generals.trace()) return False
def license_get(): # 校验私钥 try: LOG.debug( "============================enter license_get==================================" ) res = license_exists() if res: pri_key = pri_key_get() LOG.debug("pri_key=%s", pri_key) if pri_key: license_obj = License(pri_key) export = __get_secret() LOG.debug("export=%s", export) context = license_obj.get_hardinfo(export) LOG.debug("context=%s", context) out_put = { "hostid": context.get("hostid"), # "startdate": int(time.mktime((context.get("startdate")))), "startdate": __strdate2int(str(context.get("startdate"))), "enddate": __strdate2int(str(context.get("enddate"))), # "enddate": int(time.mktime((context.get("enddate")))), "nodes": context.get("nodes"), } LOG.debug("out_put=%s", out_put) return [out_put] else: raise PrivateKeyNotExists else: raise LicenseNotExists except Exception as e: if e.message == PrivateKeyNotExists.msg: raise PrivateKeyNotExists if e.message == LicenseNotExists.msg: raise LicenseNotExists LOG.error("get license error:%s" % e) LOG.error(trace()) raise LicenseAnalysisError