def stopCluster(clustername): userid = session['userid'] username = session['username'] status, clusterid = app.userMgr.get_clusterid(clustername, userid) if not status: return redirect('/dashboard/') status, clusterinfo = app.userMgr.get_clusterinfo(clusterid) if clusterinfo['status'] == 'stopped': return redirect('/dashboard/') """ send event to DistGear Master to start cluster """ ret = requests.post(app.master, data = json.dumps({ 'event':'StopCluster', 'parameters':{ 'clustername':clustername, 'clusterid':clusterid, 'userid':clusterinfo['userid'], 'containers':clusterinfo['containers'] } })) print('result from master: %s'%str(ret.text)) try: result = json.loads(ret.text) except json.JSONDecodeError: print('result from master is not json') result = {'status':'fail', 'result':'result not json--%s'%result} if result['status'] == 'success': app.userMgr.set_cluster_status(clusterid, 'stopped') proxytool.delete_route('go/%s/%s'%(username, clustername)) else: print('ERROR : stop cluster failed') return redirect('/dashboard/')
def stop_cluster(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] == 'stopped': return [False, 'cluster is already stopped'] if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) worker.delete_route("/" + info['proxy_public_ip'] + '/go/' + username + '/' + clustername) else: proxytool.delete_route("/" + info['proxy_public_ip'] + '/go/' + username + '/' + clustername) for container in info['containers']: self.delete_all_port_mapping(username, clustername, container['containername']) worker = xmlrpc.client.ServerProxy( "http://%s:%s" % (container['host'], env.getenv("WORKER_PORT"))) if worker is None: return [ False, "The worker can't be found or has been stopped." ] worker.stop_container(container['containername']) [status, info] = self.get_clusterinfo(clustername, username) info['status'] = 'stopped' info['start_time'] = "------" infofile = open( self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') infofile.write(json.dumps(info)) infofile.close() return [True, "stop cluster"]
def deleteproxy(self, username, clustername): [status, clusterinfo] = self.get_clusterinfo(clustername, username) if 'proxy_ip' not in clusterinfo: return [False, "proxy not exists"] clusterinfo.pop('proxy_ip') clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() proxytool.delete_route("/_web/" + username + "/" + clustername) return [True, clusterinfo]
def deleteproxy(self, username, clustername): [status, clusterinfo] = self.get_clusterinfo(clustername, username) if 'proxy_ip' not in clusterinfo: return [True, clusterinfo] clusterinfo.pop('proxy_ip') clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() proxytool.delete_route("/_web/" + username + "/" + clustername) return [True, clusterinfo]
def deleteproxy(self, username, clustername): [status, clusterinfo] = self.get_clusterinfo(clustername, username) if 'proxy_ip' not in clusterinfo: return [True, clusterinfo] clusterinfo.pop('proxy_ip') if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) worker.delete_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername) else: proxytool.delete_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername) clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() return [True, clusterinfo]
def deleteproxy(self, username, clustername): [status, clusterinfo] = self.get_clusterinfo(clustername, username) if 'proxy_ip' not in clusterinfo: return [True, clusterinfo] clusterinfo.pop('proxy_ip') if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip']) worker.delete_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername) else: proxytool.delete_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername) clusterfile = open( self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w') clusterfile.write(json.dumps(clusterinfo)) clusterfile.close() return [True, clusterinfo]
def stop_cluster(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] == 'stopped': return [False, 'cluster is already stopped'] if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) worker.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername) else: proxytool.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername) for container in info['containers']: self.delete_all_port_mapping(username,clustername,container['containername']) worker = self.nodemgr.ip_to_rpc(container['host']) if worker is None: return [False, "The worker can't be found or has been stopped."] worker.stop_container(container['containername']) [status, vcluster] = self.get_vcluster(clustername, username) vcluster.status = 'stopped' vcluster.start_time ="------" db.session.commit() return [True, "stop cluster"]
def stop_cluster(self, clustername, username): [status, info] = self.get_clusterinfo(clustername, username) if not status: return [False, "cluster not found"] if info['status'] == 'stopped': return [False, 'cluster is already stopped'] if self.distributedgw == 'True': worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip']) worker.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername) else: proxytool.delete_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername) for container in info['containers']: worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT"))) if worker is None: return [False, "The worker can't be found or has been stopped."] worker.stop_container(container['containername']) info['status']='stopped' info['start_time']="------" infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w') infofile.write(json.dumps(info)) infofile.close() return [True, "stop cluster"]
def do_POST(self): global G_vclustermgr global G_usermgr #logger.info ("get request, header content:\n%s" % self.headers) #logger.info ("read request content:\n%s" % self.rfile.read(int(self.headers["Content-Length"]))) logger.info ("get request, path: %s" % self.path) # for test if self.path == '/test': logger.info ("return welcome for test") self.response(200, {'success':'true', 'message':'welcome to docklet'}) return [True, 'test ok'] # check for not null content if 'Content-Length' not in self.headers: logger.info ("request content is null") self.response(401, {'success':'false', 'message':'request content is null'}) return [False, 'content is null'] # auth the user # cgi.FieldStorage need fp/headers/environ. (see /usr/lib/python3.4/cgi.py) form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,environ={'REQUEST_METHOD':'POST'}) cmds = self.path.strip('/').split('/') if cmds[0] == 'register' and form.getvalue('activate', None) == None: logger.info ("handle request : user register") username = form.getvalue('username', '') password = form.getvalue('password', '') email = form.getvalue('email', '') description = form.getvalue('description','') if (username == '' or password == '' or email == ''): self.response(500, {'success':'false'}) newuser = G_usermgr.newuser() newuser.username = form.getvalue('username') newuser.password = form.getvalue('password') newuser.e_mail = form.getvalue('email') newuser.student_number = form.getvalue('studentnumber') newuser.department = form.getvalue('department') newuser.nickname = form.getvalue('truename') newuser.truename = form.getvalue('truename') newuser.description = form.getvalue('description') newuser.status = "init" newuser.auth_method = "local" result = G_usermgr.register(user = newuser) self.response(200, result) return [True, "register succeed"] if cmds[0] == 'login': logger.info ("handle request : user login") user = form.getvalue("user") key = form.getvalue("key") if user == None or key == None: self.response(401, {'success':'false', 'message':'user or key is null'}) return [False, "auth failed"] auth_result = G_usermgr.auth(user, key) if auth_result['success'] == 'false': self.response(401, {'success':'false', 'message':'auth failed'}) return [False, "auth failed"] self.response(200, {'success':'true', 'action':'login', 'data': auth_result['data']}) return [True, "auth succeeded"] if cmds[0] == 'pkulogin': logger.info ("handle request : PKU user login") url = "https://iaaa.pku.edu.cn/iaaaWS/TokenValidation?WSDL" remoteaddr = form.getvalue("ip") appid = "iwork" token = form.getvalue("token") key = "2CF3E3B1A57A13BFE0530100007FB96E" msg = remoteaddr + appid + token + key m = hashlib.md5() m.update(msg.encode("utf8")) msg = m.hexdigest() client = Client(url) result = dict(client.service.validate(remoteaddr, appid, token, msg)) if (result['Status'] != 0): self.response(200, {'success':'false', 'result': result}) return result result = G_usermgr.auth_iaaa(result['Info']) self.response(200, result) return result token = form.getvalue("token") if token == None: self.response(401, {'success':'false', 'message':'user or key is null'}) return [False, "auth failed"] cur_user = G_usermgr.auth_token(token) if cur_user == None: self.response(401, {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'}) return [False, "auth failed"] user = cur_user.username # parse the url and get to do actions # /cluster/list # /cluster/create & clustername # /cluster/start & clustername # /cluster/stop & clustername # /cluster/delete & clustername # /cluster/info & clustername if cmds[0] == 'cluster': clustername = form.getvalue('clustername') # check for 'clustername' : all actions except 'list' need 'clustername' if (cmds[1] != 'list') and clustername == None: self.response(401, {'success':'false', 'message':'clustername is null'}) return [False, "clustername is null"] if cmds[1] == 'create': image = {} image['name'] = form.getvalue("imagename") image['type'] = form.getvalue("imagetype") image['owner'] = form.getvalue("imageowner") onenode = form.getvalue('onenode') multinodes = form.getvalue('multinodes') #logger.info ('onende : %s, multinodes : %s' % (onenode, multinodes)) logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name'])) [status, result] = G_vclustermgr.create_cluster(clustername, user, image, onenode, multinodes) if status: self.response(200, {'success':'true', 'action':'create cluster', 'message':result}) else: self.response(200, {'success':'false', 'action':'create cluster', 'message':result}) elif cmds[1] == 'scaleout': logger.info("handle request : scale out %s" % clustername) image = {} image['name'] = form.getvalue("imagename") image['type'] = form.getvalue("imagetype") image['owner'] = form.getvalue("imageowner") extensive = form.getvalue("extensive") onenode = form.getvalue("onenode") logger.debug("imagename:" + image['name']) logger.debug("imagetype:" + image['type']) logger.debug("imageowner:" + image['owner']) [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, extensive, onenode) if status: self.response(200, {'success':'true', 'action':'scale out', 'message':result}) else: self.response(200, {'success':'false', 'action':'scale out', 'message':result}) elif cmds[1] == 'scalein': logger.info("handle request : scale in %s" % clustername) containername = form.getvalue("containername") [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername) if status: self.response(200, {'success':'true', 'action':'scale in', 'message':result}) else: self.response(200, {'success':'false', 'action':'scale in', 'message':result}) elif cmds[1] == 'start': logger.info ("handle request : start cluster %s" % clustername) [status, result] = G_vclustermgr.start_cluster(clustername, user) if status: # add proxy in configurable-http-proxy [infostatus, clusterinfo] = G_vclustermgr.get_clusterinfo(clustername, user) if clusterinfo['size'] > 0: target = 'http://' + clusterinfo['containers'][0]['ip'].split('/')[0]+":10000" proxytool.set_route('go/'+user+'/'+clustername, target) self.response(200, {'success':'true', 'action':'start cluster', 'message':result}) else: self.response(200, {'success':'false', 'action':'start cluster', 'message':result}) elif cmds[1] == 'stop': logger.info ("handle request : stop cluster %s" % clustername) [status, result] = G_vclustermgr.stop_cluster(clustername, user) if status: # delete proxy in configurable-http-proxy [infostatus, clusterinfo] = G_vclustermgr.get_clusterinfo(clustername, user) # target = 'http://' + clusterinfo['containers'][0]['ip'].split('/')[0]+":10000" proxytool.delete_route('go/'+user+'/'+clustername) self.response(200, {'success':'true', 'action':'stop cluster', 'message':result}) else: self.response(200, {'success':'false', 'action':'stop cluster', 'message':result}) elif cmds[1] == 'delete': logger.info ("handle request : delete cluster %s" % clustername) [status, result] = G_vclustermgr.delete_cluster(clustername, user) if status: self.response(200, {'success':'true', 'action':'delete cluster', 'message':result}) else: self.response(200, {'success':'false', 'action':'delete cluster', 'message':result}) elif cmds[1] == 'info': logger.info ("handle request : info cluster %s" % clustername) [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) if status: self.response(200, {'success':'true', 'action':'info cluster', 'message':result}) else: self.response(200, {'success':'false', 'action':'info cluster', 'message':result}) elif cmds[1] == 'list': logger.info ("handle request : list clusters for %s" % user) [status, clusterlist] = G_vclustermgr.list_clusters(user) if status: self.response(200, {'success':'true', 'action':'list cluster', 'clusters':clusterlist}) else: self.response(400, {'success':'false', 'action':'list cluster', 'message':clusterlist}) elif cmds[1] == 'flush': from_lxc = form.getvalue('from_lxc') G_vclustermgr.flush_cluster(user,clustername,from_lxc) self.response(200, {'success':'true', 'action':'flush'}) elif cmds[1] == 'save': imagename = form.getvalue("image") description = form.getvalue("description") containername = form.getvalue("containername") isforce = form.getvalue("isforce") if isforce == "true": isforce = True else: isforce = False [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,isforce) if status: logger.info("image has been saved") self.response(200, {'success':'true', 'action':'save'}) else: logger.debug(message) self.response(400, {'success':'false', 'message':message}) else: logger.warning ("request not supported ") self.response(400, {'success':'false', 'message':'not supported request'}) elif cmds[0] == 'image': if cmds[1] == 'list': images = G_imagemgr.list_images(user) self.response(200, {'success':'true', 'images': images}) elif cmds[1] == 'description': image = {} image['name'] = form.getvalue("imagename") image['type'] = form.getvalue("imagetype") image['owner'] = form.getvalue("imageowner") description = G_imagemgr.get_image_description(user,image) self.response(200, {'success':'true', 'message':description}) elif cmds[1] == 'share': image = form.getvalue('image') G_imagemgr.shareImage(user,image) self.response(200, {'success':'true', 'action':'share'}) elif cmds[1] == 'unshare': image = form.getvalue('image') G_imagemgr.unshareImage(user,image) self.response(200, {'success':'true', 'action':'unshare'}) elif cmds[1] == 'delete': image = form.getvalue('image') G_imagemgr.removeImage(user,image) self.response(200, {'success':'true', 'action':'delete'}) else: logger.warning("request not supported ") self.response(400, {'success':'false', 'message':'not supported request'}) # Request for Monitor elif cmds[0] == 'monitor': logger.info("handle request: monitor") res = {} if cmds[1] == 'real': com_id = cmds[2] fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id) if cmds[3] == 'meminfo': res['meminfo'] = fetcher.get_meminfo() elif cmds[3] == 'cpuinfo': res['cpuinfo'] = fetcher.get_cpuinfo() elif cmds[3] == 'cpuconfig': res['cpuconfig'] = fetcher.get_cpuconfig() elif cmds[3] == 'diskinfo': res['diskinfo'] = fetcher.get_diskinfo() elif cmds[3] == 'osinfo': res['osinfo'] = fetcher.get_osinfo() elif cmds[3] == 'containers': res['containers'] = fetcher.get_containers() elif cmds[3] == 'status': res['status'] = fetcher.get_status() elif cmds[3] == 'containerslist': res['containerslist'] = fetcher.get_containerslist() elif cmds[3] == 'containersinfo': res = [] conlist = fetcher.get_containerslist() for container in conlist: ans = {} confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) ans = confetcher.get_basic_info(container) ans['cpu_use'] = confetcher.get_cpu_use(container) ans['mem_use'] = confetcher.get_mem_use(container) res.append(ans) else: self.response(400, {'success':'false', 'message':'not supported request'}) return self.response(200, {'success':'true', 'monitor':res}) elif cmds[1] == 'node': fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername) if cmds[3] == 'cpu_use': res['cpu_use'] = fetcher.get_cpu_use(cmds[2]) elif cmds[3] == 'mem_use': res['mem_use'] = fetcher.get_mem_use(cmds[2]) elif cmds[3] == 'basic_info': res['basic_info'] = fetcher.get_basic_info(cmds[2]) self.response(200, {'success':'true', 'monitor':res}) elif cmds[1] == 'user': if not user == 'root': self.response(400, {'success':'false', 'message':'Root Required'}) if cmds[3] == 'clustercnt': flag = True clutotal = 0 clurun = 0 contotal = 0 conrun = 0 [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) if status: for clustername in clusterlist: clutotal += 1 [status2, result] = G_vclustermgr.get_clusterinfo(clustername, cmds[2]) if status2: contotal += result['size'] if result['status'] == 'running': clurun += 1 conrun += result['size'] else: flag = False if flag: res = {} res['clutotal'] = clutotal res['clurun'] = clurun res['contotal'] = contotal res['conrun'] = conrun self.response(200, {'success':'true', 'monitor':{'clustercnt':res}}) else: self.response(200, {'success':'false','message':clusterlist}) elif cmds[3] == 'cluster': if cmds[4] == 'list': [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2]) if status: self.response(200, {'success':'true', 'monitor':{'clusters':clusterlist}}) else: self.response(400, {'success':'false', 'message':clusterlist}) elif cmds[4] == 'info': clustername = form.getvalue('clustername') logger.info ("handle request : info cluster %s" % clustername) [status, result] = G_vclustermgr.get_clusterinfo(clustername, user) if status: self.response(200, {'success':'true', 'monitor':{'info':result}}) else: self.response(200, {'success':'false','message':result}) else: self.response(400, {'success':'false', 'message':'not supported request'}) elif cmds[1] == 'listphynodes': res['allnodes'] = G_nodemgr.get_allnodes() self.response(200, {'success':'true', 'monitor':res}) # Request for User elif cmds[0] == 'user': logger.info("handle request: user") if cmds[1] == 'modify': #user = G_usermgr.query(username = form.getvalue("username"), cur_user = cur_user).get('token', None) result = G_usermgr.modify(newValue = form, cur_user = cur_user) self.response(200, result) if cmds[1] == 'groupModify': result = G_usermgr.groupModify(newValue = form, cur_user = cur_user) self.response(200, result) if cmds[1] == 'query': result = G_usermgr.query(ID = form.getvalue("ID"), cur_user = cur_user) if (result.get('success', None) == None or result.get('success', None) == "false"): self.response(301,result) else: result = G_usermgr.queryForDisplay(user = result['token']) self.response(200,result) elif cmds[1] == 'add': user = G_usermgr.newuser(cur_user = cur_user) user.username = form.getvalue('username') user.password = form.getvalue('password') user.e_mail = form.getvalue('e_mail', '') user.status = "normal" result = G_usermgr.register(user = user, cur_user = cur_user) self.response(200, result) elif cmds[1] == 'groupadd': result = G_usermgr.groupadd(name = form.getvalue('name', None), cur_user = cur_user) self.response(200, result) elif cmds[1] == 'data': logger.info("handle request: user/data") result = G_usermgr.userList(cur_user = cur_user) self.response(200, result) elif cmds[1] == 'groupNameList': result = G_usermgr.groupListName(cur_user = cur_user) self.response(200, result) elif cmds[1] == 'groupList': result = G_usermgr.groupList(cur_user = cur_user) self.response(200, result) elif cmds[1] == 'groupQuery': result = G_usermgr.groupQuery(ID = form.getvalue("ID", '3'), cur_user = cur_user) if (result.get('success', None) == None or result.get('success', None) == "false"): self.response(301,result) else: self.response(200,result) elif cmds[1] == 'selfQuery': result = G_usermgr.selfQuery(cur_user = cur_user) self.response(200,result) elif cmds[1] == 'selfModify': result = G_usermgr.selfModify(cur_user = cur_user, newValue = form) self.response(200,result) elif cmds[0] == 'register' : #activate logger.info("handle request: user/activate") newuser = G_usermgr.newuser() newuser.username = cur_user.username newuser.nickname = cur_user.truename newuser.status = 'applying' newuser.user_group = cur_user.user_group newuser.auth_method = cur_user.auth_method newuser.e_mail = form.getvalue('email','') newuser.student_number = form.getvalue('studentnumber', '') newuser.department = form.getvalue('department', '') newuser.truename = form.getvalue('truename', '') newuser.tel = form.getvalue('tel', '') result = G_usermgr.register(user = newuser) self.response(200,result) elif cmds[0] == 'service' : logger.info('handle request: service') if cmds[1] == 'list' : imagename = form.getvalue('imagename', 'base') username = form.getvalue('username', 'base') isshared = form.getvalue('isshared', 'base') result = G_servicemgr.list_service(imagename, username, isshared) self.response(200, result) elif cmds[1] == 'list2' : imagename = form.getvalue('imagename', 'base') username = form.getvalue('username', 'base') isshared = form.getvalue('isshared', 'base') clustername = form.getvalue('clustername', '') result = G_servicemgr.list_service2(user, clustername, imagename, username, isshared) self.response(200, result) elif cmds[1] == 'list3' : clustername = form.getvalue('clustername', '') containername = form.getvalue('containername', '') result = G_servicemgr.list_service3(user, clustername, containername) self.response(200, result) elif cmds[1] == 'list4' : imagename = form.getvalue('imagename', 'base') imageowner = form.getvalue('imageowner', 'base') imagetype = form.getvalue('imagetype', 'base') result = G_servicemgr.list_service4(imagename, imageowner, imagetype) self.response(200, result) elif cmds[1] == 'config' : clustername = form.getvalue('clustername', '') containername = form.getvalue('containername', '') services = form.getvalue('services', '') result = G_servicemgr.config_service(user, clustername, containername, services) self.response(200, result) elif cmds[1] == 'combine' : imagename = form.getvalue('imagename', 'base') imageowner = form.getvalue('imageowner', 'base') imagetype = form.getvalue('imagetype', 'base') services = form.getvalue('services', '') result = G_servicemgr.combine_service(imagename, imageowner, imagetype, services) self.response(200, result) else: logger.warning ("request not supported ") self.response(400, {'success':'false', 'message':'not supported request'})