def floating_ip_unbind(self, floatingip): try: # get the float ip uuid floatingip_uuid = self.db.db_get_floatingip_uuid(floatingip) if len(floatingip_uuid[0]) == 0: return request_result(1058) else: floatingip_uuid = floatingip_uuid[0][0] # check the float ip if need unbind db_check = self.db.db_check_floatingip_bind(floatingip_uuid) if db_check[0][0] == 0: log.info('need not unbind!!!') return request_result(0) # get the vm_uuid the floatip binded db_message = self.db.db_get_floatingip_addr(floatingip_uuid) floatingip_addr = db_message[0][0] vm_uuid = db_message[0][1] fixed_ip_address = db_message[0][2] if fixed_ip_address == 'None': fixed_ip_address = None except Exception, e: log.error('check the floatingip if unbined(db) error, ' 'reason is: %s' % e) return request_result(403)
def create_apps(self, token, con, context, cost): log.info('the inner recover data is: %s' % context) try: to_kuber = self.elements_explain(context) except Exception, e: log.error('use the elements_explain error, reason is: %s' % e) return request_result(404)
def osdisk_create(self, context, parameters): try: token = context['token'] source_ip = context.get('source_ip') user_info = token_auth(context['token'])['result'] user_uuid = user_info.get('user_uuid') team_uuid = user_info.get('team_uuid') project_uuid = user_info.get('project_uuid') log.info('the token is: %s, source_ip is: %s, user_uuid is: %s,' 'team_uuid is: %s, project_uuid is: %s' % (token, source_ip, user_uuid, team_uuid, project_uuid)) name = parameters.get('name') description = parameters.get('description') volume_uuid = parameters.get('volume_uuid') v_type = 'system' size = parameters.get('size') conn_to = parameters.get('conn_to') image_uuid = parameters.get('image_uuid') if name is None: name = volume_uuid parameter_check(volume_uuid, exist='yes') parameter_check(conn_to, exist='yes') parameter_check(image_uuid, exist='yes') except Exception, e: log.error('parameters error, reason is: %s' % e) return request_result(101)
def osdisk_delete(self, context, volume_uuid): log.info('osdisk delete, the context is: %s' % context) try: parameter_check(volume_uuid, exist='yes') except Exception, e: log.error('parameters error, reason is: %s' % e) return request_result(101)
def update_image_id(self, image_id, service_name, project_uuid): sql = "update replicationcontrollers SET image_id='%s' WHERE uuid=(SELECT rc_uuid from font_service " \ "WHERE service_name='%s' AND project_uuid='%s')" % (image_id, service_name, project_uuid) log.info('update the database sql is: %s' % sql) return super(ServiceDB, self).exec_update_sql(sql)
def output_network(self): result = [] network = [] for host in self.device_ip: try: device_list = self.get_devices(host) inside = self.get_date(host, 'IF-MIB::ifInOctets') outside = self.get_date(host, 'IF-MIB::ifOutOctets') except Exception, e: log.error( 'get the device network monitor message error, reason is: %s' % e) raise Exception( 'get the device network monitor message happen exception') # for i in range(len(inside)): for i in range(len(device_list)): if device_list[i] not in ['eth0', 'eth1', 'eth2']: continue network.append({ 'net_device': device_list[i], 'RX': inside[i], 'TX': outside[i] }) # add_result = {'ip': host, 'network': {'net_device': device_list[i], 'RX': inside[i], # 'TX': outside[i]}} add_result = {'ip': host, 'network': network} result.append(add_result) network = [] log.info('get the network data is: %s' % add_result)
def create_esjson(json_list, log_info): namespace = json_list.get("metadata").get('namespace') service_name = json_list.get("metadata").get('name') host = "http://*****:*****@timestamp": str(get_now_time_ss_z()) } return msg_json, es_url
def attachment_delete_wait(self, attachment_uuid, server_uuid): result = self.attachment_delete(attachment_uuid, server_uuid) if result.get('status') != 0: return result timeout = 0 while True: timeout = timeout + 1 if timeout >= 16: # 超时3秒 return request_result(1012) log.info('volume detail start execute') op_status = self.volume_detail(attachment_uuid) log.info('volume detail end execute') if op_status.get('status') != 0: sleep(0.2) continue else: status = op_status.get('result') if status == 'available': return result if status == 'detaching': sleep(0.2) continue else: return request_result(1012)
def router_list(self, user_uuid, project_uuid, team_uuid, team_priv, project_priv, page_size, page_num): ret = [] try: if ((project_priv is not None) and ('R' in project_priv)) \ or ((team_priv is not None) and ('R' in team_priv)): db_result = self.db.db_router_list_project(team_uuid, project_uuid, page_size, page_num) db_count = self.db.router_pro_count(team_uuid, project_uuid) count = db_count[0][0] else: db_result = self.db.db_router_list_user(team_uuid, project_uuid, user_uuid, page_size, page_num) db_count = self.db.router_usr_count(team_uuid, project_uuid, user_uuid) count = db_count[0][0] log.info(db_result) except Exception, e: log.error('Database select error, reason=%s' % e) return request_result(403)
class ServicesApi(Resource): def __init__(self): self.kubernetes = KubernetesRpcClient() @time_log def post(self): try: token = request.headers.get('token') token_ret = token_auth(token) source_ip = request.headers.get('X-Real-IP') if source_ip is None: source_ip = request.remote_addr except Exception, e: log.error('Token check error, reason=%s' % e) return request_result(201) try: parameters = json.loads(request.get_data()) log.info('parameters body is: %s' % parameters) parameters['token'] = token token_rets = token_ret.get('result') if 'service_name' in token_rets.keys(): del token_rets['service_name'] parameters.update(token_rets) log.info('parameters body(1) is:%s' % parameters) if parameters.get('service_name') is None: return request_result(101) except Exception, e: log.error("parameters error,reason=%s" % e) return request_result(101)
def neo4j_http_test(): url = "http://localhost:7474/db/data/transaction/commit" a = "neo4j:qwe123" a = base64.b64encode(a) log.info('________________a:%s' % a) h = {"Authorization": a} dict_data = { "statements": [ { "statement": "LOAD CSV WITH HEADERS FROM \"file:///aaa.csv\" AS line MERGE (p:person{id:line.id,name:line.name,age:line.age,sex:line.sex})" }, # { # "statement": "CREATE (n {props}) RETURN n", # "parameters": { # "props": { # "name": "My Node" # } # } # } ] } result = json.loads( requests.post(url, json.dumps(dict_data), headers=h, timeout=5).text) log.info('the request result is: %s' % result)
def send_email(self, email_list, service_uuid): email = self.get_email_and_phone(service_uuid) if email.get('status') != 0: return request_result(404) email = email.get('result') log.info('send to the email is: %s' % email) usage = "" for i in email_list: usage = usage + "<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (i.get('service_name'), i.get('usage'), i.get('time')) html_body = ("<table bgcolor=\"#FF5151\" border=\"2\">" "<th>service name</th>" "<th>alarm message</th>" "<th>alarm time</th>" "%s" "</table>") % str(usage) data = { "to": email, "title": "服务告警啦", "text": None, "html": html_body } email_send = self.email_driver(data).get('status') if int(email_send) != 0: return request_result(601)
def rolling_update(self, context): try: log.info('to rolling update, the data is: %s' % context) self.rbtmq.rpc_cast_client(self.queue, context) except Exception, e: log.error('Rpc client exec error, reason=%s' % e) return request_result(598)
def db_network_list_user(self, team_uuid, project_uuid, user_uuid, page_size, page_num): start_position = (page_num - 1) * page_size sql = "select a.name, b.name as subnet_name, b.cidr, a.description, " \ "a.is_shared, a.is_router_external, a.size, a.status, " \ "a.is_admin_state_up, a.create_time, a.uuid, a.update_time " \ "from network a, subnet b, " \ "resources_acl c where a.is_show=1 and a.uuid=b.network_uuid " \ "and a.uuid=c.resource_uuid and c.user_uuid='%s' " \ "and c.project_uuid='%s' and c.team_uuid='%s' " \ "UNION " \ "select a.name, '' as subnet_name, '' as cidr, a.description, " \ "a.is_shared, a.is_router_external, a.size, a.status, " \ "a.is_admin_state_up, a.create_time, a.uuid, a.update_time " \ "from network a, " \ "subnet b, resources_acl c where a.is_show=1 " \ "and a.uuid not in (select network_uuid from subnet) " \ "and a.uuid=c.resource_uuid and c.user_uuid='%s' " \ "and c.project_uuid='%s' and c.team_uuid='%s' " \ "order by create_time DESC limit %d, %d" % (user_uuid, project_uuid, team_uuid, user_uuid, project_uuid, team_uuid, start_position, page_size) log.info('user network list sql is: %s' % sql) return super(NetworkDB, self).exec_select_sql(sql)
class EventsDriver(object): def __init__(self): self.k8s_driver = K8sDriver() def app_events_es(self, project_uuid, rc_name): event = [] try: ns_events_info = self.k8s_driver.app_events_info(project_uuid) if ns_events_info.get('status') != 0: log.debug('Get events from k8s error') events_info = json.loads(ns_events_info['result']) events_list = events_info['items'] except Exception, e: log.error('get the events error, reason is: %s' % e) raise Exception('get the events error') for i in events_list: if i.get('involvedObject').get( 'kind') == 'ReplicationController' and i.get( 'involvedObject').get('name') == rc_name: log.info('will post to es is: %s' % i.get('message')) event.append(i.get('message')) if i.get('involvedObject').get('kind') == 'Pod' and ( rc_name in i.get('involvedObject').get('name')[:-6]): log.info('will post to es is: %s' % i.get('message')) event.append(i.get('message')) return event
class RpcAPI(object): def __init__(self): self.app_resources = {} def add_resource(self, api, resource): self.app_resources[api] = resource def rpcapp_run(self, dict_data): try: api = dict_data['api'] context = dict_data['context'] parameters = dict_data['parameters'] except Exception, e: log.error('parameters error: %s' % e) return request_result(101) try: log.info('执行结果:%s'% self.app_resources[api]) log.info('context:%s' % context) log.info('parameters:%s' % parameters) return self.app_resources[api](context, parameters) except Exception, e: log.error('RPC API routing error: %s' % e) return request_result(102)
def db_subnet_create(self, subnet_uuid, name, description, is_dhcp_enabled, network_uuid, ip_version, gateway_ip, allocation_pools, cidr, dns_nameservers=[], host_routes=[], user_uuid=None, project_uuid=None, team_uuid=None): sql_acl = "insert into resources_acl(resource_uuid, resource_type," \ "admin_uuid, team_uuid, project_uuid, user_uuid) " \ "values ('%s','%s','%s','%s','%s','%s')" % (subnet_uuid, 'network', '0', team_uuid, project_uuid, user_uuid) sql = "insert into subnet(uuid, name, description, is_dhcp_enabled," \ "network_uuid, ip_version, gateway_ip, allocation_pools, " \ "cidr, dns_nameservers, host_routes) values('%s','%s','%s'," \ "%d,'%s','%s','%s','%s'," \ "'%s','%s','%s')" % (subnet_uuid, name, description, is_dhcp_enabled, network_uuid, ip_version, gateway_ip, allocation_pools, cidr, dns_nameservers, host_routes) log.info('create sql is: (%s;%s)' % (sql_acl, sql)) return super(NetworkDB, self).exec_update_sql(sql_acl, sql)
def update_alarm(self, dict_data): sql_alarming = "" service_uuid = dict_data.get('service_uuid') email = dict_data.get('email') phone = dict_data.get('phone') up_or_in = dict_data.get('up_or_in') a_uuid, user_uuid, service_uuide, wise, cpu_unit, cpu_value, memory_unit, memory_value, network_unit, \ network_value, storage_unit, storage_value, time_span, \ alarm_time, alarm_uuid = self.element_ex.insert_alarm_ex(dict_data) if up_or_in == 'insert': sql_alarming = "insert INTO alarming(uuid,wise,cpu_unit,cpu_value,memory_unit,memory_value," \ "network_unit,network_value,storage_unit,storage_value,time_span,alarm_time)" \ "VALUES ('%s','%s','%s','%s','%s','%s','%s','%s','%s','%s','%s'," \ "'%s'" % (a_uuid, wise, cpu_unit, cpu_value, memory_unit, memory_value, network_unit, network_value, storage_unit, storage_value, time_span, alarm_time) elif up_or_in == 'update': sql_alarming = "update alarming SET wise=0,cpu_unit='%s',cpu_value='%s',memory_unit='%s'," \ "memory_value='%s',network_unit='%s',network_value='%s',storage_unit='%s'," \ "storage_value='%s',time_span='%s',alarm_time='%s' WHERE uuid=(SELECT " \ "alarm_uuid from alarm_service_rules WHERE " \ "service_uuid='%s')" % (cpu_unit, cpu_value, memory_unit, memory_value, network_unit, network_value, storage_unit, storage_value, time_span, alarm_time, service_uuid) sql_rules = "update alarm_service_rules set email='%s' AND phone='%s' ,alarm_uuid='%s'" \ "WHERE service_uuid = '%s'" % (email, phone, a_uuid, service_uuid) log.info( "update the rules sql is: %s,,,,,,and update the alarming sql is: " "%s" % (sql_rules, sql_alarming)) return super(AlarmDB, self).exec_update(sql_rules, sql_alarming)
def update_container(self, dict_data): project_uuid, service_name = normal_call(dict_data) container = dict_data.get('container') sql_delete = "delete from containers where rc_uuid=(select rc_uuid from font_service where " \ "project_uuid='%s' and service_name='%s')" % (project_uuid, service_name) super(ServiceDB, self).exec_update_sql(sql_delete) for i in container: uuid_c = uuid_ele() container_uuid, rc_uuid, container_port, protocol, access_mode, access_scope, tcp_port, http_domain, \ tcp_domain = container_element(i) sql_insert = "insert into containers(uuid, rc_uuid, container_port, protocol, access_mode," \ "access_scope,tcp_port,http_domain,cname,tcp_domain,private_domain,identify) VALUES " \ "('%s',(select rc_uuid from font_service where service_name='%s' " \ "and project_uuid='%s'),%d,'%s','%s','%s','%s','%s','%s','%s','%s'," \ "'%s')" % (uuid_c, service_name, project_uuid, int(container_port), protocol, access_mode, access_scope, tcp_port, http_domain, http_domain, tcp_domain, i.get('domain'), i.get('identify')) log.info('update the container sql is: %s' % sql_insert) super(ServiceDB, self).exec_update_sql(sql_insert) sql_update_time = "update font_service SET service_update_time=now() WHERE rc_uuid='%s'" % rc_uuid super(ServiceDB, self).exec_update_sql(sql_update_time)
def update_zero_services(self, teams_list): sql = "update font_service set lifecycle='stop'," \ "service_update_time=now() where team_uuid in " + self.element_explain(teams_list) # sql = "update font_service set lifecycle='stop' where team_uuid='99acbcae-76f0-42b4-90e8-279a6f96c327'" log.info('update the db sql is: %s,type is: %s' % (sql, type(sql))) return super(ServiceDB, self).exec_update_sql(sql)
def show_namespace(self, json_list): url = '%s/namespaces/%s' % (self.host_address, json_list.get('namespace')) response = requests.get(url, headers=self.HEADERS, verify=False) log.info('show the namespace result is %s, type is %s' % (response.text, type(response.text))) return response.text
def service_list_user(self, dict_data): project_uuid = dict_data.get('project_uuid') user_uuid = dict_data.get('project_uuid') page_size = int(dict_data.get('page_size')) page_num = int(dict_data.get('page_num')) start_position = (page_num - 1) * page_size sql = "SELECT a.uuid service_uuid, a.service_name, b.http_domain, b.tcp_domain, b.container_port, \ a.service_status, a.image_dir, a.service_create_time ltime, a.description, b.private_domain domain, \ b.identify,b.access_mode FROM font_service a join \ containers b \ WHERE (a.rc_uuid = b.rc_uuid AND a.project_uuid='%s' AND a.user_uuid='%s' \ AND ((b.http_domain is not NULL and b.http_domain != '' AND b.http_domain != 'None') \ OR (b.tcp_domain is not NULL AND b.tcp_domain != 'None' AND b.tcp_domain != '') \ )) and (a.lifecycle is NULL or a.lifecycle='') ORDER BY ltime DESC \ limit %d, %d" % (project_uuid, user_uuid, start_position, page_size) # sql = "SELECT a.uuid service_uuid, a.service_name, b.http_domain, b.tcp_domain, b.container_port, \ # a.service_status, a.image_dir, a.service_create_time ltime, a.description, b.private_domain domain," \ # "b.identify,b.access_mode \ # FROM font_service a join \ # containers b \ # WHERE (a.rc_uuid = b.rc_uuid AND a.project_uuid='%s' AND a.user_uuid='%s' \ # ) and (a.lifecycle is NULL or a.lifecycle='') ORDER BY ltime DESC \ # limit %d, %d" % (project_uuid, user_uuid, start_position, page_size) log.info('get the service_list for user sql is: %s' % sql) # sql_count = "select count(*) from font_service a where a.project_uuid='%s' AND " \ # "a.user_uuid='%s'" % (project_uuid, user_uuid) return super(ServiceDB, self).exec_select_sql(sql)
def service_create(self, context, parameters): log.info('rpc server get the data is : %s' % parameters) token = parameters.get('token') cost = parameters.get('cost') return self.recover.create_apps(token, context, parameters, cost)
def delete_namespace(self, dict_data): log.info('delete the ns dict is: %s' % dict_data) namespace = dict_data.get('namespace') url = '%s/namespaces/%s' % (self.host_address, namespace) msg = requests.delete(url, headers=self.HEADERS, verify=False) log.info('delete the kubernetes ns result is : %s' % msg.text) return json.loads(msg.text)
def os_disks_snapshot_create(self, description, name, vm_uuid, metadata, user_uuid, team_uuid, project_uuid): snapshot_uuid = str(uuid4()) # 系统盘快照 if vm_uuid is not None: size = 0 try: self.qemu.os_snap_create(vm_uuid, name) except Exception, e: log.error('create the osdisk snapshot error, ' 'reason is: %s' % e) return request_result(1001) try: snap_info = self.qemu.os_snap_detail(vm_uuid) for i in snap_info: log.info('get the size of the snapshot, ' 'the items is: %s' % i) if name in i: size = i.split(' ')[1].rstrip() log.info('get the size from qemu is: %s' % size) size = int(size) except Exception, e: log.error('get the snap detail error, reason is: %s' % e) # rollback try: self.qemu.os_snap_delete(name, vm_uuid) except Exception, e: log.error('recovery the snapshot error, reason is: %s' % e)
def photo_dir(dict_data): photo_url = 'http://%s/api/v1.0/pictures' % conf.IMAGE_S file_url = 'http://101.201.56.57:8765/api/v1.0/files/%s/ServiceAvatars/%s/boxlinker' % ( dict_data.get('team_uuid'), dict_data.get('service_uuid')) service_name = dict_data.get('service_name') token = dict_data.get('token') photo_data = {'name': service_name} header = {'token': token} try: photo_ret = requests.post(photo_url, json.dumps(photo_data), headers=header, timeout=5).text log.info('make the photo,url is: %s result is: %s, type is: %s' % (photo_url, photo_ret, type(photo_ret))) photo_ret = json.loads(photo_ret) if photo_ret.get('status') == 0: image_dir = photo_ret.get('result').get('image_url') else: raise Exception('make the photo error') except Exception, e: log.error('make the photo error, reason is: %s' % e) raise Exception('request the photo url error')
def storage_status(self, dict_data): response = "" volume = dict_data.get("volume") if volume is None or volume == '': return True if dict_data.get("action").upper() == "POST": for i in volume: volume_uuid = i.get("volume_uuid") json_status = { "volume_uuid": volume_uuid, "volume_status": "using" } dict_data.update(json_status) response = self.put_using(dict_data) if dict_data.get("action").upper() == "PUT": for i in volume: volume_uuid = i.get("volume_uuid") json_status = { "volume_uuid": volume_uuid, "volume_status": "unused" } dict_data.update(json_status) response = self.put_using(dict_data) log.info('delete the volumes data is f**k: %s' % dict_data) return response
def get_alarm_svc(self): sql = "select c.service_uuid,a.cpu_value,a.memory_value," \ "a.network_value,a.storage_value,a.time_span,a.alarm_time FROM " \ "alarming a,alarm_service_rules c WHERE a.uuid=c.alarm_uuid" log.info('query the alarm message sql is: %s' % sql) return super(AlarmDB, self).exec_select_sql(sql)
def service_delete(self, context, token, source_ip, resource_uuid): log.info('the data(in) when delete service....is: %s' % context) try: context = self.service_db.get_service_name(context) except Exception, e: log.error('get the service name error, reason=%s' % e) return request_result(404)
def post(self): try: context = json.loads(request.get_data()) log.info('parameters body is: %s' % context) except Exception, e: log.error("parameters error,reason=%s" % e) return json.dumps(request_result(101))