コード例 #1
0
ファイル: vclustermgr.py プロジェクト: K45V/docklet
 def start_cluster(self, clustername, username):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if info['status'] == 'running':
         return [False, "cluster is already running"]
     # check gateway for user
     # after reboot, user gateway goes down and lose its configuration
     # so, check is necessary
     self.networkmgr.check_usergw(username)
     # set proxy 
     try:
         target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" 
         proxytool.set_route('/go/'+username+'/'+clustername, target)
     except:
         return [False, "start cluster failed with setting proxy failed"]
     for container in info['containers']:
         worker = self.nodemgr.ip_to_rpc(container['host'])
         worker.start_container(container['containername'])
         worker.start_services(container['containername'])
     info['status']='running'
     info['start_time']=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
     infofile = open(self.fspath+"/global/users/"+username+"/clusters/"+clustername, 'w')
     infofile.write(json.dumps(info))
     infofile.close()
     return [True, "start cluster"]
コード例 #2
0
 def start_cluster(self, clustername, username):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if info['status'] == 'running':
         return [False, "cluster is already running"]
     # check gateway for user
     # after reboot, user gateway goes down and lose its configuration
     # so, check is necessary
     self.networkmgr.check_usergw(username)
     # set proxy
     try:
         target = 'http://' + info['containers'][0]['ip'].split(
             '/')[0] + ":10000"
         proxytool.set_route('/go/' + username + '/' + clustername, target)
     except:
         return [False, "start cluster failed with setting proxy failed"]
     for container in info['containers']:
         worker = self.nodemgr.ip_to_rpc(container['host'])
         worker.start_container(container['containername'])
         worker.start_services(container['containername'])
     info['status'] = 'running'
     info['start_time'] = datetime.datetime.now().strftime(
         "%Y-%m-%d %H:%M:%S")
     infofile = open(
         self.fspath + "/global/users/" + username + "/clusters/" +
         clustername, 'w')
     infofile.write(json.dumps(info))
     infofile.close()
     return [True, "start cluster"]
コード例 #3
0
ファイル: web.py プロジェクト: tpppppub/Docklet-DistGear
def startCluster(clustername):
    userid = session['userid']
    username = session['username']
    status, clusterid = app.userMgr.get_clusterid(clustername, userid)
    if not status:
        return redirect('/dashboard/')
    status, clusterinfo = app.userMgr.get_clusterinfo(clusterid)
    if clusterinfo['status'] == 'running':
        return redirect('/dashboard/')
    """
    send event to DistGear Master to start cluster
    """
    ret = requests.post(app.master, data = json.dumps({
            'event':'StartCluster',
            'parameters':{
                    'clustername':clustername,
                    'clusterid':clusterid,
                    'userid':clusterinfo['userid'],
                    'containers':clusterinfo['containers']
                }
        }))
    print('result from master: %s'%str(ret.text))
    try:
        result = json.loads(ret.text)
    except json.JSONDecodeError:
        print('result from master is not json')
        result = {'status':'fail', 'result':'result not json--%s'%result}
    if result['status'] == 'success':
        app.userMgr.set_cluster_status(clusterid, 'running')
        target = 'http://'+clusterinfo['containers'][0]['ip'].split('/')[0]+':10000'
        proxytool.set_route('go/%s/%s'%(username, clustername), target)
    else:
        print('ERROR : start cluster failed')
    return redirect('/dashboard/')
コード例 #4
0
ファイル: vclustermgr.py プロジェクト: iteratorlee/docklet
 def recover_cluster(self, clustername, username, uid, input_rate_limit, output_rate_limit):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if not "proxy_server_ip" in info.keys():
         info['proxy_server_ip'] = self.addr
         self.write_clusterinfo(info,clustername,username)
         [status, info] = self.get_clusterinfo(clustername, username)
     if not "proxy_public_ip" in info.keys():
         self.update_proxy_ipAndurl(clustername,username,info['proxy_server_ip'])
         [status, info] = self.get_clusterinfo(clustername, username)
         self.update_cluster_baseurl(clustername,username,info['proxy_server_ip'],info['proxy_public_ip'])
     if not 'port_mapping' in info.keys():
         info['port_mapping'] = []
         self.write_clusterinfo(info,clustername,username)
     if info['status'] == 'stopped':
         return [True, "cluster no need to start"]
     # recover proxy of cluster
     try:
         target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000"
         if self.distributedgw == 'True':
             worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip'])
             # check public ip
             if not self.check_public_ip(clustername,username):
                 [status, info] = self.get_clusterinfo(clustername, username)
             worker.set_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target)
         else:
             if not info['proxy_server_ip'] == self.addr:
                 logger.info("%s %s proxy_server_ip has been changed, base_url need to be modified."%(username,clustername))
                 oldpublicIP= info['proxy_public_ip']
                 self.update_proxy_ipAndurl(clustername,username,self.addr)
                 [status, info] = self.get_clusterinfo(clustername, username)
                 self.update_cluster_baseurl(clustername,username,oldpublicIP,info['proxy_public_ip'])
             # check public ip
             if not self.check_public_ip(clustername,username):
                 [status, info] = self.get_clusterinfo(clustername, username)
             proxytool.set_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target)
     except:
         return [False, "start cluster failed with setting proxy failed"]
     # need to check and recover gateway of this user
     self.networkmgr.check_usergw(input_rate_limit, output_rate_limit, username, uid, self.nodemgr,self.distributedgw=='True')
     # recover containers of this cluster
     for container in info['containers']:
         # set up gre from user's gateway host to container's host.
         self.networkmgr.check_usergre(username, uid, container['host'], self.nodemgr, self.distributedgw=='True')
         worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
         if worker is None:
             return [False, "The worker can't be found or has been stopped."]
         worker.recover_container(container['containername'])
         namesplit = container['containername'].split('-')
         portname = namesplit[1] + '-' + namesplit[2]
         worker.recover_usernet(portname, uid, info['proxy_server_ip'], container['host']==info['proxy_server_ip'])
     # recover ports mapping
     [success, msg] = self.recover_port_mapping(username,clustername)
     if not success:
         return [False, msg]
     return [True, "start cluster"]
コード例 #5
0
ファイル: vclustermgr.py プロジェクト: K45V/docklet
 def addproxy(self,username,clustername,ip,port):
     [status, clusterinfo] = self.get_clusterinfo(clustername, username)
     if 'proxy_ip' in clusterinfo:
         return [False, "proxy already exists"]
     target = "http://" + ip + ":" + port
     clusterinfo['proxy_ip'] = ip + ":" + port 
     clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
     clusterfile.write(json.dumps(clusterinfo))
     clusterfile.close()
     proxytool.set_route("/_web/" + username + "/" + clustername, target)
     return [True, clusterinfo]        
コード例 #6
0
ファイル: vclustermgr.py プロジェクト: iteratorlee/docklet
 def start_cluster(self, clustername, username, user_info):
     uid = user_info['data']['id']
     input_rate_limit = user_info['data']['groupinfo']['input_rate_limit']
     output_rate_limit = user_info['data']['groupinfo']['output_rate_limit']
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if info['status'] == 'running':
         return [False, "cluster is already running"]
     # set proxy
     if not "proxy_server_ip" in info.keys():
         info['proxy_server_ip'] = self.addr
     try:
         target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000"
         if self.distributedgw == 'True':
             worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip'])
             # check public ip
             if not self.check_public_ip(clustername,username):
                 [status, info] = self.get_clusterinfo(clustername, username)
             worker.set_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target)
         else:
             if not info['proxy_server_ip'] == self.addr:
                 logger.info("%s %s proxy_server_ip has been changed, base_url need to be modified."%(username,clustername))
                 oldpublicIP= info['proxy_public_ip']
                 self.update_proxy_ipAndurl(clustername,username,self.addr)
                 [status, info] = self.get_clusterinfo(clustername, username)
                 self.update_cluster_baseurl(clustername,username,oldpublicIP,info['proxy_public_ip'])
             # check public ip
             if not self.check_public_ip(clustername,username):
                 [status, info] = self.get_clusterinfo(clustername, username)
             proxytool.set_route("/" + info['proxy_public_ip'] + '/go/'+username+'/'+clustername, target)
     except:
         logger.info(traceback.format_exc())
         return [False, "start cluster failed with setting proxy failed"]
     # check gateway for user
     # after reboot, user gateway goes down and lose its configuration
     # so, check is necessary
     self.networkmgr.check_usergw(input_rate_limit, output_rate_limit, username, uid, self.nodemgr,self.distributedgw=='True')
     # start containers
     for container in info['containers']:
         # set up gre from user's gateway host to container's host.
         self.networkmgr.check_usergre(username, uid, container['host'], self.nodemgr, self.distributedgw=='True')
         worker = xmlrpc.client.ServerProxy("http://%s:%s" % (container['host'], env.getenv("WORKER_PORT")))
         if worker is None:
             return [False, "The worker can't be found or has been stopped."]
         worker.start_container(container['containername'])
         worker.start_services(container['containername'])
         namesplit = container['containername'].split('-')
         portname = namesplit[1] + '-' + namesplit[2]
         worker.recover_usernet(portname, uid, info['proxy_server_ip'], container['host']==info['proxy_server_ip'])
     info['status']='running'
     info['start_time']=datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
     self.write_clusterinfo(info,clustername,username)
     return [True, "start cluster"]
コード例 #7
0
ファイル: vclustermgr.py プロジェクト: SouvenirD/docklet
 def addproxy(self,username,clustername,ip,port):
     [status, clusterinfo] = self.get_clusterinfo(clustername, username)
     if 'proxy_ip' in clusterinfo:
         return [False, "proxy already exists"]
     target = "http://" + ip + ":" + port
     clusterinfo['proxy_ip'] = ip + ":" + port 
     clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
     clusterfile.write(json.dumps(clusterinfo))
     clusterfile.close()
     proxytool.set_route("/_web/" + username + "/" + clustername, target)
     return [True, clusterinfo]        
コード例 #8
0
ファイル: vclustermgr.py プロジェクト: anbo225/docklet
 def addproxy(self,username,clustername,ip,port):
     [status, clusterinfo] = self.get_clusterinfo(clustername, username)
     if 'proxy_ip' in clusterinfo:
         return [False, "proxy already exists"]
     target = "http://" + ip + ":" + port + "/"
     clusterinfo['proxy_ip'] = ip + ":" + port
     if self.distributedgw == 'True':
         worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip'])
         worker.set_route("/"+ clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername, target)
     else:
         proxytool.set_route("/" + clusterinfo['proxy_public_ip'] + "/_web/" + username + "/" + clustername, target)
     clusterfile = open(self.fspath + "/global/users/" + username + "/clusters/" + clustername, 'w')
     clusterfile.write(json.dumps(clusterinfo))
     clusterfile.close()
     return [True, clusterinfo]
コード例 #9
0
ファイル: vclustermgr.py プロジェクト: monumentality/docklet
 def recover_cluster(self, clustername, username):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if info['status'] == 'stopped':
         return [True, "cluster no need to start"]
     # need to check and recover gateway of this user
     self.networkmgr.check_usergw(username)
     # recover proxy of cluster
     try:
         target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" 
         proxytool.set_route('/go/'+username+'/'+clustername, target)
     except:
         return [False, "start cluster failed with setting proxy failed"]
     # recover containers of this cluster
     for container in info['containers']:
         worker = self.nodemgr.ip_to_rpc(container['host'])
         worker.recover_container(container['containername'])
     return [True, "start cluster"]
コード例 #10
0
ファイル: vclustermgr.py プロジェクト: SouvenirD/docklet
 def recover_cluster(self, clustername, username):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if info['status'] == 'stopped':
         return [True, "cluster no need to start"]
     # need to check and recover gateway of this user
     self.networkmgr.check_usergw(username)
     # recover proxy of cluster
     try:
         target = 'http://'+info['containers'][0]['ip'].split('/')[0]+":10000" 
         proxytool.set_route('/go/'+username+'/'+clustername, target)
     except:
         return [False, "start cluster failed with setting proxy failed"]
     # recover containers of this cluster
     for container in info['containers']:
         worker = self.nodemgr.ip_to_rpc(container['host'])
         worker.recover_container(container['containername'])
     return [True, "start cluster"]
コード例 #11
0
ファイル: vclustermgr.py プロジェクト: assmdx/docklet
 def addproxy(self, username, clustername, ip, port):
     [status, clusterinfo] = self.get_clusterinfo(clustername, username)
     if 'proxy_ip' in clusterinfo:
         return [False, "proxy already exists"]
     target = "http://" + ip + ":" + port + "/"
     clusterinfo['proxy_ip'] = ip + ":" + port
     if self.distributedgw == 'True':
         worker = self.nodemgr.ip_to_rpc(clusterinfo['proxy_server_ip'])
         worker.set_route(
             "/" + clusterinfo['proxy_public_ip'] + "/_web/" + username +
             "/" + clustername, target)
     else:
         proxytool.set_route(
             "/" + clusterinfo['proxy_public_ip'] + "/_web/" + username +
             "/" + clustername, target)
     clusterfile = open(
         self.fspath + "/global/users/" + username + "/clusters/" +
         clustername, 'w')
     clusterfile.write(json.dumps(clusterinfo))
     clusterfile.close()
     return [True, clusterinfo]
コード例 #12
0
ファイル: guest_control.py プロジェクト: liukai0322/docklet
 def work(self):
     image = {}
     image['name'] = "base"
     image['type'] = "base"
     image['owner'] = "docklet"
     while len(self.nodemgr.get_rpcs()) < 1:
         time.sleep(10)
     if not os.path.isdir(self.fspath+"/global/users/guest"):
         subprocess.getoutput(self.libpath+"/userinit.sh guest")
     self.G_vclustermgr.create_cluster("guestspace", "guest", image)
     [infostatus, clusterinfo] = self.G_vclustermgr.get_clusterinfo("guestspace", "guest")
     target = 'http://' + clusterinfo['containers'][0]['ip'].split('/')[0]+":10000"
     while(True):
         [status, out] = proxytool.set_route('go/guest/guestspace',target)
         if status:
             break
         else:
             time.sleep(30)
     while True:
         self.G_vclustermgr.start_cluster("guestspace", "guest")
         time.sleep(3600)
         self.G_vclustermgr.stop_cluster("guestspace", "guest")
         fspath = self.fspath + "/global/local/volume/guest-1-0/upper/"
         subprocess.getoutput("(cd %s && rm -rf *)" % fspath)
コード例 #13
0
ファイル: httprest.py プロジェクト: liukai0322/docklet
    def do_POST(self):
        global G_vclustermgr
        global G_usermgr
        #logger.info ("get request, header content:\n%s" % self.headers)
        #logger.info ("read request content:\n%s" % self.rfile.read(int(self.headers["Content-Length"])))
        logger.info ("get request, path: %s" % self.path)
        # for test
        if self.path == '/test':
            logger.info ("return welcome for test")
            self.response(200, {'success':'true', 'message':'welcome to docklet'})
            return [True, 'test ok']

        # check for not null content
        if 'Content-Length' not in self.headers:
            logger.info ("request content is null")
            self.response(401, {'success':'false', 'message':'request content is null'})
            return [False, 'content is null']

        # auth the user
        # cgi.FieldStorage need fp/headers/environ. (see /usr/lib/python3.4/cgi.py)
        form = cgi.FieldStorage(fp=self.rfile, headers=self.headers,environ={'REQUEST_METHOD':'POST'})
        cmds = self.path.strip('/').split('/')
        if cmds[0] == 'register' and form.getvalue('activate', None) == None:
            logger.info ("handle request : user register")
            username = form.getvalue('username', '')
            password = form.getvalue('password', '')
            email = form.getvalue('email', '')
            description = form.getvalue('description','')
            if (username == '' or password == '' or email == ''):
                self.response(500, {'success':'false'})
            newuser = G_usermgr.newuser()
            newuser.username = form.getvalue('username')
            newuser.password = form.getvalue('password')
            newuser.e_mail = form.getvalue('email')
            newuser.student_number = form.getvalue('studentnumber')
            newuser.department = form.getvalue('department')
            newuser.nickname = form.getvalue('truename')
            newuser.truename = form.getvalue('truename')
            newuser.description = form.getvalue('description')
            newuser.status = "init"
            newuser.auth_method = "local"
            result = G_usermgr.register(user = newuser)
            self.response(200, result)
            return [True, "register succeed"]
        if cmds[0] == 'login':
            logger.info ("handle request : user login")
            user = form.getvalue("user")
            key = form.getvalue("key")
            if user == None or key == None:
                self.response(401, {'success':'false', 'message':'user or key is null'})
                return [False, "auth failed"]
            auth_result = G_usermgr.auth(user, key)
            if  auth_result['success'] == 'false':
                self.response(401, {'success':'false', 'message':'auth failed'})
                return [False, "auth failed"]
            self.response(200, {'success':'true', 'action':'login', 'data': auth_result['data']})
            return [True, "auth succeeded"]
        if cmds[0] == 'pkulogin':
            logger.info ("handle request : PKU user login")
            url = "https://iaaa.pku.edu.cn/iaaaWS/TokenValidation?WSDL"
            remoteaddr = form.getvalue("ip")
            appid = "iwork"
            token = form.getvalue("token")
            key = "2CF3E3B1A57A13BFE0530100007FB96E"
            msg = remoteaddr + appid + token + key
            m = hashlib.md5()
            m.update(msg.encode("utf8"))
            msg = m.hexdigest()

            client = Client(url)
            result = dict(client.service.validate(remoteaddr, appid, token, msg))
            if (result['Status'] != 0):
                self.response(200, {'success':'false',  'result': result})
                return result
            result = G_usermgr.auth_iaaa(result['Info'])
            self.response(200, result)
            return result

        token = form.getvalue("token")
        if token == None:
            self.response(401, {'success':'false', 'message':'user or key is null'})
            return [False, "auth failed"]
        cur_user = G_usermgr.auth_token(token)
        if cur_user == None:
            self.response(401, {'success':'false', 'message':'token failed or expired', 'Unauthorized': 'True'})
            return [False, "auth failed"]



        user = cur_user.username
        # parse the url and get to do actions
        # /cluster/list
        # /cluster/create  &  clustername
        # /cluster/start    &  clustername
        # /cluster/stop    &  clustername
        # /cluster/delete    &  clustername
        # /cluster/info    &  clustername


        if cmds[0] == 'cluster':
            clustername = form.getvalue('clustername')
            # check for 'clustername' : all actions except 'list' need 'clustername'
            if (cmds[1] != 'list') and clustername == None:
                self.response(401, {'success':'false', 'message':'clustername is null'})
                return [False, "clustername is null"]
            if cmds[1] == 'create':
                image = {}
                image['name'] = form.getvalue("imagename")
                image['type'] = form.getvalue("imagetype")
                image['owner'] = form.getvalue("imageowner")
                onenode = form.getvalue('onenode')
                multinodes = form.getvalue('multinodes')
                #logger.info ('onende : %s, multinodes : %s' % (onenode, multinodes))
                logger.info ("handle request : create cluster %s with image %s " % (clustername, image['name']))
                [status, result] = G_vclustermgr.create_cluster(clustername, user, image, onenode, multinodes)
                if status:
                    self.response(200, {'success':'true', 'action':'create cluster', 'message':result})
                else:
                    self.response(200, {'success':'false', 'action':'create cluster', 'message':result})
            elif cmds[1] == 'scaleout':
                logger.info("handle request : scale out %s" % clustername)
                image = {}
                image['name'] = form.getvalue("imagename")
                image['type'] = form.getvalue("imagetype")
                image['owner'] = form.getvalue("imageowner")
                extensive = form.getvalue("extensive")
                onenode = form.getvalue("onenode")
                logger.debug("imagename:" + image['name'])
                logger.debug("imagetype:" + image['type'])
                logger.debug("imageowner:" + image['owner'])
                [status, result] = G_vclustermgr.scale_out_cluster(clustername, user, image, extensive, onenode)
                if status:
                    self.response(200, {'success':'true', 'action':'scale out', 'message':result})
                else:
                    self.response(200, {'success':'false', 'action':'scale out', 'message':result})
            elif cmds[1] == 'scalein':
                logger.info("handle request : scale in %s" % clustername)
                containername = form.getvalue("containername")
                [status, result] = G_vclustermgr.scale_in_cluster(clustername, user, containername)
                if status:
                    self.response(200, {'success':'true', 'action':'scale in', 'message':result})
                else:
                    self.response(200, {'success':'false', 'action':'scale in', 'message':result})
            elif cmds[1] == 'start':
                logger.info ("handle request : start cluster %s" % clustername)
                [status, result] = G_vclustermgr.start_cluster(clustername, user)
                if status:
                    # add proxy in configurable-http-proxy
                    [infostatus, clusterinfo] = G_vclustermgr.get_clusterinfo(clustername, user)
                    if clusterinfo['size'] > 0:
                        target = 'http://' + clusterinfo['containers'][0]['ip'].split('/')[0]+":10000"
                        proxytool.set_route('go/'+user+'/'+clustername, target)
                    self.response(200, {'success':'true', 'action':'start cluster', 'message':result})
                else:
                    self.response(200, {'success':'false', 'action':'start cluster', 'message':result})
            elif cmds[1] == 'stop':
                logger.info ("handle request : stop cluster %s" % clustername)
                [status, result] = G_vclustermgr.stop_cluster(clustername, user)
                if status:
                    # delete proxy in configurable-http-proxy
                    [infostatus, clusterinfo] = G_vclustermgr.get_clusterinfo(clustername, user)
                    # target = 'http://' + clusterinfo['containers'][0]['ip'].split('/')[0]+":10000"
                    proxytool.delete_route('go/'+user+'/'+clustername)
                    self.response(200, {'success':'true', 'action':'stop cluster', 'message':result})
                else:
                    self.response(200, {'success':'false', 'action':'stop cluster', 'message':result})
            elif cmds[1] == 'delete':
                logger.info ("handle request : delete cluster %s" % clustername)
                [status, result] = G_vclustermgr.delete_cluster(clustername, user)
                if status:
                    self.response(200, {'success':'true', 'action':'delete cluster', 'message':result})
                else:
                    self.response(200, {'success':'false', 'action':'delete cluster', 'message':result})
            elif cmds[1] == 'info':
                logger.info ("handle request : info cluster %s" % clustername)
                [status, result] = G_vclustermgr.get_clusterinfo(clustername, user)
                if status:
                    self.response(200, {'success':'true', 'action':'info cluster', 'message':result})
                else:
                    self.response(200, {'success':'false', 'action':'info cluster', 'message':result})
            elif cmds[1] == 'list':
                logger.info ("handle request : list clusters for %s" % user)
                [status, clusterlist] = G_vclustermgr.list_clusters(user)
                if status:
                    self.response(200, {'success':'true', 'action':'list cluster', 'clusters':clusterlist})
                else:
                    self.response(400, {'success':'false', 'action':'list cluster', 'message':clusterlist})

            elif cmds[1] == 'flush':
                from_lxc = form.getvalue('from_lxc')
                G_vclustermgr.flush_cluster(user,clustername,from_lxc)
                self.response(200, {'success':'true', 'action':'flush'})

            elif cmds[1] == 'save':
                imagename = form.getvalue("image")
                description = form.getvalue("description")
                containername = form.getvalue("containername")
                isforce = form.getvalue("isforce")
                if isforce == "true":
                    isforce = True
                else:
                    isforce = False
                [status,message] = G_vclustermgr.create_image(user,clustername,containername,imagename,description,isforce)
                if status:
                    logger.info("image has been saved")
                    self.response(200, {'success':'true', 'action':'save'})
                else:
                    logger.debug(message)
                    self.response(400, {'success':'false', 'message':message})

            else:
                logger.warning ("request not supported ")
                self.response(400, {'success':'false', 'message':'not supported request'})

        elif cmds[0] == 'image':
            if cmds[1] == 'list':
                images = G_imagemgr.list_images(user)
                self.response(200, {'success':'true', 'images': images})
            elif cmds[1] == 'description':
                image = {}
                image['name'] = form.getvalue("imagename")
                image['type'] = form.getvalue("imagetype")
                image['owner'] = form.getvalue("imageowner")
                description = G_imagemgr.get_image_description(user,image)
                self.response(200, {'success':'true', 'message':description})
            elif cmds[1] == 'share':
                image = form.getvalue('image')
                G_imagemgr.shareImage(user,image)
                self.response(200, {'success':'true', 'action':'share'})
            elif cmds[1] == 'unshare':
                image = form.getvalue('image')
                G_imagemgr.unshareImage(user,image)
                self.response(200, {'success':'true', 'action':'unshare'})
            elif cmds[1] == 'delete':
                image = form.getvalue('image')
                G_imagemgr.removeImage(user,image)
                self.response(200, {'success':'true', 'action':'delete'})
            else:
                logger.warning("request not supported ")
                self.response(400, {'success':'false', 'message':'not supported request'})
        # Request for Monitor
        elif cmds[0] == 'monitor':
            logger.info("handle request: monitor")
            res = {}
            if cmds[1] == 'real':
                com_id = cmds[2]
                fetcher = monitor.Fetcher(etcdaddr,G_clustername,com_id)
                if cmds[3] == 'meminfo':
                    res['meminfo'] = fetcher.get_meminfo()
                elif cmds[3] == 'cpuinfo':
                    res['cpuinfo'] = fetcher.get_cpuinfo()
                elif cmds[3] == 'cpuconfig':
                    res['cpuconfig'] = fetcher.get_cpuconfig()
                elif cmds[3] == 'diskinfo':
                    res['diskinfo'] = fetcher.get_diskinfo()
                elif cmds[3] == 'osinfo':
                    res['osinfo'] = fetcher.get_osinfo()
                elif cmds[3] == 'containers':
                    res['containers'] = fetcher.get_containers()
                elif cmds[3] == 'status':
                    res['status'] = fetcher.get_status()
                elif cmds[3] == 'containerslist':
                    res['containerslist'] = fetcher.get_containerslist()
                elif cmds[3] == 'containersinfo':
                    res = []
                    conlist = fetcher.get_containerslist()
                    for container in conlist:
                        ans = {}
                        confetcher = monitor.Container_Fetcher(etcdaddr,G_clustername)
                        ans = confetcher.get_basic_info(container)
                        ans['cpu_use'] = confetcher.get_cpu_use(container)
                        ans['mem_use'] = confetcher.get_mem_use(container)
                        res.append(ans)
                else:
                    self.response(400, {'success':'false', 'message':'not supported request'})
                    return

                self.response(200, {'success':'true', 'monitor':res})
            elif cmds[1] == 'node':
                fetcher = monitor.Container_Fetcher(etcdaddr,G_clustername)
                if cmds[3] == 'cpu_use':
                    res['cpu_use'] = fetcher.get_cpu_use(cmds[2])
                elif cmds[3] == 'mem_use':
                    res['mem_use'] = fetcher.get_mem_use(cmds[2])
                elif cmds[3] == 'basic_info':
                    res['basic_info'] = fetcher.get_basic_info(cmds[2])
                self.response(200, {'success':'true', 'monitor':res})
            elif cmds[1] == 'user':
                if not user == 'root':
                    self.response(400, {'success':'false', 'message':'Root Required'})
                if cmds[3] == 'clustercnt':
                    flag = True
                    clutotal = 0
                    clurun = 0
                    contotal = 0
                    conrun = 0
                    [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2])
                    if status:
                        for clustername in clusterlist:
                            clutotal += 1
                            [status2, result] = G_vclustermgr.get_clusterinfo(clustername, cmds[2])
                            if status2:
                                contotal += result['size']
                                if result['status'] == 'running':
                                    clurun += 1
                                    conrun += result['size']
                    else:
                         flag = False
                    if flag:
                         res = {}
                         res['clutotal'] = clutotal
                         res['clurun'] = clurun
                         res['contotal'] = contotal
                         res['conrun'] = conrun
                         self.response(200, {'success':'true', 'monitor':{'clustercnt':res}})
                    else:
                         self.response(200, {'success':'false','message':clusterlist})
                elif cmds[3] == 'cluster':
                    if cmds[4] == 'list':
                        [status, clusterlist] = G_vclustermgr.list_clusters(cmds[2])
                        if status:
                            self.response(200, {'success':'true', 'monitor':{'clusters':clusterlist}})
                        else:
                            self.response(400, {'success':'false', 'message':clusterlist})
                    elif cmds[4] == 'info':
                        clustername = form.getvalue('clustername')
                        logger.info ("handle request : info cluster %s" % clustername)
                        [status, result] = G_vclustermgr.get_clusterinfo(clustername, user)
                        if status:
                            self.response(200, {'success':'true', 'monitor':{'info':result}})
                        else:
                     	    self.response(200, {'success':'false','message':result})
                    else:
                        self.response(400, {'success':'false', 'message':'not supported request'})

            elif cmds[1] == 'listphynodes':
                res['allnodes'] = G_nodemgr.get_allnodes()
                self.response(200, {'success':'true', 'monitor':res})
        # Request for User
        elif cmds[0] == 'user':
            logger.info("handle request: user")
            if cmds[1] == 'modify':
                #user = G_usermgr.query(username = form.getvalue("username"), cur_user = cur_user).get('token',  None)
                result = G_usermgr.modify(newValue = form, cur_user = cur_user)
                self.response(200, result)
            if cmds[1] == 'groupModify':
                result = G_usermgr.groupModify(newValue = form, cur_user = cur_user)
                self.response(200, result)
            if cmds[1] == 'query':
                result = G_usermgr.query(ID = form.getvalue("ID"), cur_user = cur_user)
                if (result.get('success', None) == None or result.get('success', None) == "false"):
                    self.response(301,result)
                else:
                    result = G_usermgr.queryForDisplay(user = result['token'])
                    self.response(200,result)

            elif cmds[1] == 'add':
                user = G_usermgr.newuser(cur_user = cur_user)
                user.username = form.getvalue('username')
                user.password = form.getvalue('password')
                user.e_mail = form.getvalue('e_mail', '')
                user.status = "normal"
                result = G_usermgr.register(user = user, cur_user = cur_user)
                self.response(200, result)
            elif cmds[1] == 'groupadd':
                result = G_usermgr.groupadd(name = form.getvalue('name', None), cur_user = cur_user)
                self.response(200, result)
            elif cmds[1] == 'data':
                logger.info("handle request: user/data")
                result = G_usermgr.userList(cur_user = cur_user)
                self.response(200, result)
            elif cmds[1] == 'groupNameList':
                result = G_usermgr.groupListName(cur_user = cur_user)
                self.response(200, result)
            elif cmds[1] == 'groupList':
                result = G_usermgr.groupList(cur_user = cur_user)
                self.response(200, result)
            elif cmds[1] == 'groupQuery':
                result = G_usermgr.groupQuery(ID = form.getvalue("ID", '3'), cur_user = cur_user)
                if (result.get('success', None) == None or result.get('success', None) == "false"):
                    self.response(301,result)
                else:
                    self.response(200,result)
            elif cmds[1] == 'selfQuery':
                result = G_usermgr.selfQuery(cur_user = cur_user)
                self.response(200,result)
            elif cmds[1] == 'selfModify':
                result = G_usermgr.selfModify(cur_user = cur_user, newValue = form)
                self.response(200,result)
        elif cmds[0] == 'register' :
            #activate
            logger.info("handle request: user/activate")
            newuser = G_usermgr.newuser()
            newuser.username = cur_user.username
            newuser.nickname = cur_user.truename
            newuser.status = 'applying'
            newuser.user_group = cur_user.user_group
            newuser.auth_method = cur_user.auth_method
            newuser.e_mail = form.getvalue('email','')
            newuser.student_number = form.getvalue('studentnumber', '')
            newuser.department = form.getvalue('department', '')
            newuser.truename = form.getvalue('truename', '')
            newuser.tel = form.getvalue('tel', '')
            result = G_usermgr.register(user = newuser)
            self.response(200,result)
        elif cmds[0] == 'service' :
            logger.info('handle request: service')
            if cmds[1] == 'list' :
                imagename = form.getvalue('imagename', 'base')
                username = form.getvalue('username', 'base')
                isshared = form.getvalue('isshared', 'base')
                result = G_servicemgr.list_service(imagename, username, isshared)
                self.response(200, result)
            elif cmds[1] == 'list2' :
                imagename = form.getvalue('imagename', 'base')
                username = form.getvalue('username', 'base')
                isshared = form.getvalue('isshared', 'base')
                clustername = form.getvalue('clustername', '')
                result = G_servicemgr.list_service2(user, clustername, imagename, username, isshared)
                self.response(200, result)
            elif cmds[1] == 'list3' :
                clustername = form.getvalue('clustername', '')
                containername = form.getvalue('containername', '')
                result = G_servicemgr.list_service3(user, clustername, containername)
                self.response(200, result)
            elif cmds[1] == 'list4' :
                imagename = form.getvalue('imagename', 'base')
                imageowner = form.getvalue('imageowner', 'base')
                imagetype = form.getvalue('imagetype', 'base')
                result = G_servicemgr.list_service4(imagename, imageowner, imagetype)
                self.response(200, result)
            elif cmds[1] == 'config' :
                clustername = form.getvalue('clustername', '')
                containername = form.getvalue('containername', '')
                services = form.getvalue('services', '')
                result = G_servicemgr.config_service(user, clustername, containername, services)
                self.response(200, result)
            elif cmds[1] == 'combine' :
                imagename = form.getvalue('imagename', 'base')
                imageowner = form.getvalue('imageowner', 'base')
                imagetype = form.getvalue('imagetype', 'base')
                services = form.getvalue('services', '')
                result = G_servicemgr.combine_service(imagename, imageowner, imagetype, services)
                self.response(200, result)
        else:
            logger.warning ("request not supported ")
            self.response(400, {'success':'false', 'message':'not supported request'})
コード例 #14
0
ファイル: vclustermgr.py プロジェクト: assmdx/docklet
 def recover_cluster(self, clustername, username, uid, input_rate_limit,
                     output_rate_limit):
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if not "proxy_server_ip" in info.keys():
         info['proxy_server_ip'] = self.addr
         self.write_clusterinfo(info, clustername, username)
         [status, info] = self.get_clusterinfo(clustername, username)
     if not "proxy_public_ip" in info.keys():
         self.update_proxy_ipAndurl(clustername, username,
                                    info['proxy_server_ip'])
         [status, info] = self.get_clusterinfo(clustername, username)
         self.update_cluster_baseurl(clustername, username,
                                     info['proxy_server_ip'],
                                     info['proxy_public_ip'])
     if not 'port_mapping' in info.keys():
         info['port_mapping'] = []
         self.write_clusterinfo(info, clustername, username)
     if info['status'] == 'stopped':
         return [True, "cluster no need to start"]
     # recover proxy of cluster
     try:
         target = 'http://' + info['containers'][0]['ip'].split(
             '/')[0] + ":10000"
         if self.distributedgw == 'True':
             worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip'])
             # check public ip
             if not self.check_public_ip(clustername, username):
                 [status,
                  info] = self.get_clusterinfo(clustername, username)
             worker.set_route(
                 "/" + info['proxy_public_ip'] + '/go/' + username + '/' +
                 clustername, target)
         else:
             if not info['proxy_server_ip'] == self.addr:
                 logger.info(
                     "%s %s proxy_server_ip has been changed, base_url need to be modified."
                     % (username, clustername))
                 oldpublicIP = info['proxy_public_ip']
                 self.update_proxy_ipAndurl(clustername, username,
                                            self.addr)
                 [status,
                  info] = self.get_clusterinfo(clustername, username)
                 self.update_cluster_baseurl(clustername, username,
                                             oldpublicIP,
                                             info['proxy_public_ip'])
             # check public ip
             if not self.check_public_ip(clustername, username):
                 [status,
                  info] = self.get_clusterinfo(clustername, username)
             proxytool.set_route(
                 "/" + info['proxy_public_ip'] + '/go/' + username + '/' +
                 clustername, target)
     except:
         return [False, "start cluster failed with setting proxy failed"]
     # need to check and recover gateway of this user
     self.networkmgr.check_usergw(input_rate_limit, output_rate_limit,
                                  username, uid, self.nodemgr,
                                  self.distributedgw == 'True')
     # recover containers of this cluster
     for container in info['containers']:
         # set up gre from user's gateway host to container's host.
         self.networkmgr.check_usergre(username, uid, container['host'],
                                       self.nodemgr,
                                       self.distributedgw == 'True')
         worker = xmlrpc.client.ServerProxy(
             "http://%s:%s" %
             (container['host'], env.getenv("WORKER_PORT")))
         if worker is None:
             return [
                 False, "The worker can't be found or has been stopped."
             ]
         worker.recover_container(container['containername'])
         namesplit = container['containername'].split('-')
         portname = namesplit[1] + '-' + namesplit[2]
         worker.recover_usernet(
             portname, uid, info['proxy_server_ip'],
             container['host'] == info['proxy_server_ip'])
     # recover ports mapping
     [success, msg] = self.recover_port_mapping(username, clustername)
     if not success:
         return [False, msg]
     return [True, "start cluster"]
コード例 #15
0
ファイル: vclustermgr.py プロジェクト: assmdx/docklet
 def start_cluster(self, clustername, username, user_info):
     uid = user_info['data']['id']
     input_rate_limit = user_info['data']['groupinfo']['input_rate_limit']
     output_rate_limit = user_info['data']['groupinfo']['output_rate_limit']
     [status, info] = self.get_clusterinfo(clustername, username)
     if not status:
         return [False, "cluster not found"]
     if info['status'] == 'running':
         return [False, "cluster is already running"]
     # set proxy
     if not "proxy_server_ip" in info.keys():
         info['proxy_server_ip'] = self.addr
     try:
         target = 'http://' + info['containers'][0]['ip'].split(
             '/')[0] + ":10000"
         if self.distributedgw == 'True':
             worker = self.nodemgr.ip_to_rpc(info['proxy_server_ip'])
             # check public ip
             if not self.check_public_ip(clustername, username):
                 [status,
                  info] = self.get_clusterinfo(clustername, username)
             worker.set_route(
                 "/" + info['proxy_public_ip'] + '/go/' + username + '/' +
                 clustername, target)
         else:
             if not info['proxy_server_ip'] == self.addr:
                 logger.info(
                     "%s %s proxy_server_ip has been changed, base_url need to be modified."
                     % (username, clustername))
                 oldpublicIP = info['proxy_public_ip']
                 self.update_proxy_ipAndurl(clustername, username,
                                            self.addr)
                 [status,
                  info] = self.get_clusterinfo(clustername, username)
                 self.update_cluster_baseurl(clustername, username,
                                             oldpublicIP,
                                             info['proxy_public_ip'])
             # check public ip
             if not self.check_public_ip(clustername, username):
                 [status,
                  info] = self.get_clusterinfo(clustername, username)
             proxytool.set_route(
                 "/" + info['proxy_public_ip'] + '/go/' + username + '/' +
                 clustername, target)
     except:
         logger.info(traceback.format_exc())
         return [False, "start cluster failed with setting proxy failed"]
     # check gateway for user
     # after reboot, user gateway goes down and lose its configuration
     # so, check is necessary
     self.networkmgr.check_usergw(input_rate_limit, output_rate_limit,
                                  username, uid, self.nodemgr,
                                  self.distributedgw == 'True')
     # start containers
     for container in info['containers']:
         # set up gre from user's gateway host to container's host.
         self.networkmgr.check_usergre(username, uid, container['host'],
                                       self.nodemgr,
                                       self.distributedgw == 'True')
         worker = xmlrpc.client.ServerProxy(
             "http://%s:%s" %
             (container['host'], env.getenv("WORKER_PORT")))
         if worker is None:
             return [
                 False, "The worker can't be found or has been stopped."
             ]
         worker.start_container(container['containername'])
         worker.start_services(container['containername'])
         namesplit = container['containername'].split('-')
         portname = namesplit[1] + '-' + namesplit[2]
         worker.recover_usernet(
             portname, uid, info['proxy_server_ip'],
             container['host'] == info['proxy_server_ip'])
     info['status'] = 'running'
     info['start_time'] = datetime.datetime.now().strftime(
         "%Y-%m-%d %H:%M:%S")
     self.write_clusterinfo(info, clustername, username)
     return [True, "start cluster"]