def get_postgresql_cluster_standby_ip(conn, cluster_id): print("get_postgresql_cluster_standby_ip cluster_id == %s" % (cluster_id)) standby_ip = None # DescribeClusterDisplayTabs action = const.ACTION_DESCRIBE_CLUSTER_DISPLAY_TABS print("action == %s" % (action)) ret = conn.describe_cluster_display_tabs(cluster=cluster_id, verbose=1, display_tabs="node_details") print("describe_cluster_display_tabs ret == %s" % (ret)) Common.check_ret_code(ret, action) display_tabs = ret['display_tabs'] if display_tabs is None or len(display_tabs) == 0: print("describe_cluster_display_tabs display_tabs is None") return None datas = display_tabs['data'] print("datas == %s" % (datas)) for data in datas: print("data == %s" % (data)) if "Standby" in data or "standby" in data: standby_ip = data[1] print("standby_ip == %s" % (standby_ip)) return standby_ip
def create_new_volume(conn,user_id,volume_type): print("create_new_volume user_id == %s volume_type == %s" %(user_id,volume_type)) volume_id = "" # CreateVolumes action = const.ACTION_CREATE_VOLUMES print("action == %s" % (action)) ret = conn.create_volumes(owner=user_id,volume_name="vdi-portal-nas",volume_type=volume_type,count=1,size=200,target_user=user_id) print("create_volumes ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] volume_id = ret['volumes'] print("job_id == %s" % (job_id)) print("volume_id == %s" % (volume_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("create_volumes successful") break print("status == %s" % (status)) return volume_id
def get_loadbalancer_listeners(conn, user_id, loadbalancer_id): print("get_loadbalancer_listeners user_id == %s loadbalancer_id == %s" % (user_id, loadbalancer_id)) loadbalancer_listeners_ids = [] # DescribeLoadBalancerListeners action = const.ACTION_DESCRIBE_LOADBALANCER_LISTENERS print("action == %s" % (action)) ret = conn.describe_loadbalancer_listeners(owner=user_id, offset=0, limit=5, loadbalancer=loadbalancer_id) print("describe_loadbalancer_listeners ret == %s" % (ret)) Common.check_ret_code(ret, action) # get listener_port loadbalancer_listener_set = ret['loadbalancer_listener_set'] if loadbalancer_listener_set is None or len( loadbalancer_listener_set) == 0: print( "describe_loadbalancer_listeners loadbalancer_listener_set is None" ) return None for loadbalancer_listener in loadbalancer_listener_set: loadbalancer_listener_id = loadbalancer_listener.get( "loadbalancer_listener_id") if loadbalancer_listener_id not in loadbalancer_listeners_ids: loadbalancer_listeners_ids.append(loadbalancer_listener_id) print("loadbalancer_listeners_ids == %s" % (loadbalancer_listeners_ids)) return loadbalancer_listeners_ids
def get_cloned_instance_ip(conn, user_id, instance_id): print("get_cloned_instance_ip user_id == %s instance_id == %s" % (user_id, instance_id)) private_ip = "" if instance_id and not isinstance(instance_id, list): instance_id = [instance_id] print("instance_id == %s" % (instance_id)) # DescribeInstances action = const.ACTION_DESCRIBE_INSTANCES print("action == %s" % (action)) ret = conn.describe_instances(owner=user_id, instances=instance_id, verbose=1) print("describe_instances ret == %s" % (ret)) Common.check_ret_code(ret, action) # get private_ip instance_set = ret['instance_set'] if instance_set is None or len(instance_set) == 0: print("describe_instances instance_set is None") exit(-1) for instance in instance_set: vxnets = instance.get("vxnets") for vxnet in vxnets: private_ip = vxnet.get("private_ip") print("private_ip == %s" % (private_ip)) return private_ip
def get_instance_vxnet_id(conn, user_id, resource_id): print("get_instance_vxnet_id user_id == %s resource_id == %s" % (user_id, resource_id)) if resource_id and not isinstance(resource_id, list): resource_id = [resource_id] print("resource_id == %s" % (resource_id)) vxnet_id = None # DescribeInstances action = const.ACTION_DESCRIBE_INSTANCES print("action == %s" % (action)) ret = conn.describe_instances(owner=user_id, instances=resource_id, verbose=1) print("describe_instances ret == %s" % (ret)) Common.check_ret_code(ret, action) instance_set = ret['instance_set'] if instance_set is None or len(instance_set) == 0: print("describe_instances instance_set is None") exit(-1) for instance in instance_set: vxnets = instance.get("vxnets") for vxnet in vxnets: vxnet_id = vxnet.get("vxnet_id") return vxnet_id
def delete_s2server(conn,user_id,s2_servers_id): print("子线程启动") print("delete_s2server user_id == %s s2_servers_id == %s" % (user_id,s2_servers_id)) if s2_servers_id and not isinstance(s2_servers_id, list): s2_servers_id = [s2_servers_id] print("s2_servers_id == %s" %(s2_servers_id)) # DeleteS2Servers action = const.ACTION_DELETE_S2_SERVERS print("action == %s" % (action)) ret = conn.delete_s2_servers(s2_servers=s2_servers_id,owner=user_id) print("delete_s2_servers ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("delete_s2_servers successful") break print("status == %s" % (status)) if status == "successful": print("delete_s2_servers s2_servers successful") print("子线程结束")
def describe_apps(conn,user_id,app_ids): print("子线程启动") print("describe_apps") app_type = ['cluster'] if app_ids and not isinstance(app_ids, list): app_ids = [app_ids] print("app_ids == %s" %(app_ids)) print("app_type == %s" % (app_type)) for app_id in app_ids: # DescribeApps action = const.ACTION_DESCRIBE_APPS print("action == %s" % (action)) ret = conn.describe_apps(app=app_id,app_type=app_type) print("describe_apps ret == %s" % (ret)) Common.check_ret_code(ret, action) #get total total_count = ret.get('total_count') print("total_count == %d" %(total_count)) if app_id == const.POSTGRESQL_APP_IDS: if total_count: print("appcenter postgresql app %s is available" % (app_id)) appcenter_postgreql_status = True else: print("appcenter postgresql app %s isn't available" % (app_id)) appcenter_postgreql_status = False # appcenter_postgreql_status 写入文件 appcenter_postgreql_status_conf = "/opt/appcenter_postgreql_status_conf" with open(appcenter_postgreql_status_conf, "w+") as f1: f1.write("APPCENTER_POSTGRESQL_STATUS %s" % (appcenter_postgreql_status)) elif app_id == const.MEMCACHED_APP_IDS: if total_count: print("appcenter memcached app %s is available" % (app_id)) appcenter_memcached_status = True else: print("appcenter memcached app %s isn't available" % (app_id)) appcenter_memcached_status = False # appcenter_memcached_status 写入文件 appcenter_memcached_status_conf = "/opt/appcenter_memcached_status_conf" with open(appcenter_memcached_status_conf, "w+") as f1: f1.write("APPCENTER_MEMCACHED_STATUS %s" % (appcenter_memcached_status)) else: print("app_id %s is unsupport" % (app_id)) appcenter_postgreql_status = False appcenter_memcached_status = False # appcenter_postgreql_status 写入文件 appcenter_postgreql_status_conf = "/opt/appcenter_postgreql_status_conf" with open(appcenter_postgreql_status_conf, "w+") as f1: f1.write("APPCENTER_POSTGRESQL_STATUS %s" % (appcenter_postgreql_status)) # appcenter_memcached_status 写入文件 appcenter_memcached_status_conf = "/opt/appcenter_memcached_status_conf" with open(appcenter_memcached_status_conf, "w+") as f1: f1.write("APPCENTER_MEMCACHED_STATUS %s" % (appcenter_memcached_status)) print("子线程结束")
def delete_loadbalancer(conn,user_id,loadbalancer_id): print("子线程启动") print("delete_loadbalancer user_id == %s loadbalancer_id == %s" % (user_id,loadbalancer_id)) if loadbalancer_id and not isinstance(loadbalancer_id, list): loadbalancer_id = [loadbalancer_id] print("loadbalancer_id == %s" %(loadbalancer_id)) # DeleteLoadBalancers action = const.ACTION_CREATE_LOADBALANCER print("action == %s" % (action)) ret = conn.delete_loadbalancers(loadbalancers=loadbalancer_id,owner=user_id) print("delete_loadbalancers ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("delete_loadbalancers successful") break print("status == %s" % (status)) if status == "successful": print("delete_loadbalancers loadbalancer successful") print("子线程结束")
def get_s2server_ip(conn, user_id, s2_servers_id): print("get_s2server_ip user_id == %s s2_servers_id == %s" % (user_id, s2_servers_id)) private_ip = None if s2_servers_id and not isinstance(s2_servers_id, list): s2_servers_id = [s2_servers_id] print("s2_servers_id == %s" % (s2_servers_id)) # DescribeS2Servers action = const.ACTION_DESCRIBE_S2_SERVERS print("action == %s" % (action)) ret = conn.describe_s2_servers(owner=user_id, s2_servers=s2_servers_id, verbose=1) print("describe_s2_servers ret == %s" % (ret)) Common.check_ret_code(ret, action) # get private_ip s2_server_set = ret['s2_server_set'] if s2_server_set is None or len(s2_server_set) == 0: print("describe_s2_servers s2_server_set is None") exit(-1) for s2_server in s2_server_set: private_ip = s2_server.get("private_ip") return private_ip
def get_instance_class(conn, user_id, vdi_resource_id): print("get_instance_class user_id == %s vdi_resource_id == %s" % (user_id, vdi_resource_id)) instance_class = 0 if vdi_resource_id and not isinstance(vdi_resource_id, list): vdi_resource_id = [vdi_resource_id] print("vdi_resource_id == %s" % (vdi_resource_id)) # DescribeInstances action = const.ACTION_DESCRIBE_INSTANCES print("action == %s" % (action)) ret = conn.describe_instances(owner=user_id, instances=vdi_resource_id, verbose=1) print("describe_instances ret == %s" % (ret)) Common.check_ret_code(ret, action) # get instance_class instance_set = ret['instance_set'] if instance_set is None or len(instance_set) == 0: print("describe_instances instance_set is None") exit(-1) for instance in instance_set: instance_class = instance.get("instance_class") return instance_class
def get_postgresql_cluster_master_rdb_instance_id(conn, cluster_id): print("get_postgresql_cluster_master_rdb_instance_id cluster_id == %s" % (cluster_id)) master_rdb_instance_id = None # DescribeClusterDisplayTabs action = const.ACTION_DESCRIBE_CLUSTER_DISPLAY_TABS print("action == %s" % (action)) ret = conn.describe_cluster_display_tabs(cluster=cluster_id, verbose=1, display_tabs="node_details") print("describe_cluster_display_tabs ret == %s" % (ret)) Common.check_ret_code(ret, action) display_tabs = ret['display_tabs'] if display_tabs is None or len(display_tabs) == 0: print("describe_cluster_display_tabs display_tabs is None") return None datas = display_tabs['data'] print("datas == %s" % (datas)) for data in datas: print("data == %s" % (data)) if "Master" in data or "primary" in data: master_rdb_instance_id = data[0] print("master_rdb_instance_id == %s" % (master_rdb_instance_id)) return master_rdb_instance_id
def create_s2_shared_target(conn,user_id,vxnet_id,s2_server_id,instance_class): print("子线程启动") print("create_s2_shared_target user_id == %s vxnet_id == %s s2_server_id == %s instance_class == %s" % (user_id,vxnet_id,s2_server_id,instance_class)) # get the volume_type corresponding to the instance class volume_type = const.INSTANCE_CLASS_VOLUME_TYPE_MAP[instance_class] print("instance_class == %s" % (instance_class)) print("volume_type == %s" % (volume_type)) # get available volume_id volume_id = create_new_volume(conn,user_id,volume_type) print("create_new_volume volume_id == %s" % (volume_id)) if not volume_id: print("volume_id is not available. and create volume failed") exit(-1) if volume_id and not isinstance(volume_id, list): volume_id = [volume_id] print("volume_id == %s" %(volume_id)) # CreateS2SharedTarget action = const.ACTION_CREATE_S2_SHARED_TARGET print("action == %s" % (action)) ret = conn.create_s2_shared_target(owner=user_id,vxnet=vxnet_id,s2_server_id=s2_server_id,target_type='NFS',export_name_nfs='nas',export_name='/mnt/nas',volumes=volume_id) print("create_s2_shared_target ret == %s" % (ret)) Common.check_ret_code(ret, action) s2_shared_target_id = ret['s2_shared_target'] print("s2_shared_target_id == %s" % (s2_shared_target_id)) # s2_shared_target_id 写入文件 shared_target_id_conf = "/opt/shared_target_id_conf" with open(shared_target_id_conf, "w+") as f: f.write("SHARED_TARGET_ID %s" % (s2_shared_target_id)) print("子线程结束")
def get_rdb_topslave_ip(conn, user_id, rdb_id): print("get_rdb_topslave_ip user_id == %s rdb_id == %s" % (user_id, rdb_id)) if rdb_id and not isinstance(rdb_id, list): rdb_id = [rdb_id] print("rdb_id == %s" % (rdb_id)) rdb_topslave_ip = None # DescribeRDBs action = const.ACTION_DESCRIBE_RDBS print("action == %s" % (action)) ret = conn.describe_rdbs(owner=user_id, rdbs=rdb_id, verbose=1) print("describe_rdbs ret == %s" % (ret)) Common.check_ret_code(ret, action) rdb_set = ret['rdb_set'] if rdb_set is None or len(rdb_set) == 0: print("describe_rdbs rdb_set is None") exit(-1) for rdb in rdb_set: rdb_instances = rdb.get("rdb_instances") print("rdb_instances == %s" % (rdb_instances)) for rdb_instance in rdb_instances: print("rdb_instance == %s" % (rdb_instance)) rdb_instance_role = rdb_instance["rdb_instance_role"] if "topslave" == rdb_instance_role: rdb_topslave_ip = rdb_instance["private_ip"] return rdb_topslave_ip
def terminate_instances(conn,user_id,instance_id): print("terminate_instances user_id == %s instance_id == %s" % (user_id,instance_id)) if instance_id and not isinstance(instance_id, list): instance_id = [instance_id] # TerminateInstances action = const.ACTION_TERMINATE_INSTANCES print("action == %s" % (action)) ret = conn.terminate_instances(instances=instance_id,owner=user_id,direct_cease=1) print("terminate_instances ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("terminate_instances successful") break print("status == %s" % (status))
def get_loadbalancer_listeners_port(conn,user_id,loadbalancer_listeners_id): print("get_loadbalancer_listeners_port user_id == %s loadbalancer_listeners_id == %s" %(user_id,loadbalancer_listeners_id)) listener_port = None if loadbalancer_listeners_id and not isinstance(loadbalancer_listeners_id, list): loadbalancer_listeners_id = [loadbalancer_listeners_id] print("loadbalancer_listeners_id == %s" %(loadbalancer_listeners_id)) # DescribeLoadBalancerListeners action = const.ACTION_DESCRIBE_LOADBALANCER_LISTENERS print("action == %s" % (action)) ret = conn.describe_loadbalancer_listeners(owner=user_id,offset=0,limit=1,loadbalancer_listeners=loadbalancer_listeners_id) print("describe_loadbalancer_listeners ret == %s" % (ret)) Common.check_ret_code(ret, action) # get listener_port loadbalancer_listener_set = ret['loadbalancer_listener_set'] if loadbalancer_listener_set is None or len(loadbalancer_listener_set) == 0: print("describe_loadbalancer_listeners loadbalancer_listener_set is None") return None for loadbalancer_listener in loadbalancer_listener_set: listener_port = loadbalancer_listener.get("listener_port") print("listener_port == %s" % (listener_port)) return listener_port
def get_loadbalancer_ip(conn,user_id,loadbalancer_id): print("get_loadbalancer_ip user_id == %s loadbalancer_id == %s" %(user_id,loadbalancer_id)) loadbalancer_ip = None if loadbalancer_id and not isinstance(loadbalancer_id, list): loadbalancer_id = [loadbalancer_id] print("loadbalancer_id == %s" %(loadbalancer_id)) # DescribeLoadBalancers action = const.ACTION_DESCRIBE_LOADBALANCERS print("action == %s" % (action)) ret = conn.describe_loadbalancers(owner=user_id,offset=0,limit=1,loadbalancers=loadbalancer_id) print("describe_loadbalancers ret == %s" % (ret)) Common.check_ret_code(ret, action) # get loadbalancer_ip loadbalancer_set = ret['loadbalancer_set'] if loadbalancer_set is None or len(loadbalancer_set) == 0: print("describe_loadbalancers loadbalancer_set is None") return None for loadbalancer in loadbalancer_set: vxnets = loadbalancer.get("vxnet") loadbalancer_ip = vxnets.get("private_ip") print("loadbalancer_ip == %s" % (loadbalancer_ip)) return loadbalancer_ip
def add_backends_to_listener(conn,user_id,loadbalancer_listeners_ids,resource_id): print("add_backends_to_listener user_id == %s loadbalancer_listeners_ids == %s resource_id == %s" % (user_id,loadbalancer_listeners_ids,resource_id)) loadbalancer_backends_ids = [] for loadbalancer_listeners_id in loadbalancer_listeners_ids: print("loadbalancer_listeners_id == %s" % (loadbalancer_listeners_id)) # get listener_port listener_port = get_loadbalancer_listeners_port(conn,user_id,loadbalancer_listeners_id) print("get_loadbalancer_listeners_port listener_port == %s" % (listener_port)) if not listener_port: print("get_loadbalancer_listeners_port listener_port failed") continue if 10080 == listener_port: print("get_loadbalancer_listeners_port listener_port [%d]") print("10080 listener_port does not need to add a listener backend") continue # AddLoadBalancerBackends action = const.ACTION_ADD_LOADBALANCER_BACKENDS print("action == %s" % (action)) backends_list = [{"loadbalancer_backend_name":"backend-desktop-server","resource_id":resource_id,"port":listener_port,"weight":"1"}] print("backends_list == %s" % (backends_list)) ret = conn.add_backends_to_listener(loadbalancer_listener=loadbalancer_listeners_id,backends=backends_list,owner=user_id) print("add_backends_to_listener ret == %s" % (ret)) Common.check_ret_code(ret, action) loadbalancer_backends_id = ret['loadbalancer_backends'] if loadbalancer_backends_id not in loadbalancer_backends_ids: loadbalancer_backends_ids.append(loadbalancer_backends_id) print("loadbalancer_backends_ids == %s" % (loadbalancer_backends_ids)) return loadbalancer_backends_ids
def create_s2_account_vdi_host(conn, user_id, g_vdi_ip_list): print("create_s2_account_vdi_host user_id == %s g_vdi_ip_list == %s" % (user_id, g_vdi_ip_list)) if g_vdi_ip_list and not isinstance(g_vdi_ip_list, list): g_vdi_ip_list = [g_vdi_ip_list] print("g_vdi_ip_list == %s" % (g_vdi_ip_list)) s2_account_id_list = [] for vdi_ip in g_vdi_ip_list: print("vdi_ip == %s" % (vdi_ip)) # DescribeS2Groups action = const.ACTION_DESCRIBE_S2_GROUPS print("action == %s" % (action)) ret = conn.describe_s2_groups(owner=user_id, offset=0, limit=1, verbose=1, group_types=['NFS_GROUP']) Common.check_ret_code(ret, action) # get s2_group_id s2_group_set = ret['s2_group_set'] if s2_group_set is None or len(s2_group_set) == 0: print("describe_s2_groups s2_group_set is None") exit(-1) for s2_group in s2_group_set: s2_group_id = s2_group.get("group_id") print("s2_group_id == %s" % (s2_group_id)) # CreateS2Account action = const.ACTION_CREATE_S2_ACCOUNT print("action == %s" % (action)) s2_groups_list = [{"group_id": s2_group_id, "rw_flag": "rw"}] print("s2_groups_list == %s" % (s2_groups_list)) ret = conn.create_s2_account( owner=user_id, account_name='vdi-portal-account', account_type='NFS', nfs_ipaddr=vdi_ip, s2_group=s2_group_id, opt_parameters='squash=no_root_squash,sync=sync', s2_groups=s2_groups_list) ret_code = ret.get("ret_code") if ret_code != 0: print("%s failed" % (action)) continue # get s2_account_id s2_account_id = ret.get("s2_account_id") if s2_account_id not in s2_account_id_list: s2_account_id_list.append(s2_account_id) print("s2_account_id_list == %s" % (s2_account_id_list)) return None
def leave_vxnet(conn,user_id,instance_id): print("leave_vxnet user_id == %s instance_id == %s" %(user_id,instance_id)) if instance_id and not isinstance(instance_id, list): instance_id = [instance_id] vxnet_id = None # DescribeInstances action = const.ACTION_DESCRIBE_INSTANCES print("action == %s" % (action)) ret = conn.describe_instances(instances=instance_id,owner=user_id,verbose=1) print("describe_instances ret == %s" % (ret)) Common.check_ret_code(ret,action) # get vxnet_id instance_set = ret['instance_set'] if instance_set is None or len(instance_set) == 0: print("describe_instances instance_set is None") exit(-1) for instance in instance_set: vxnets = instance.get("vxnets") for vxnet in vxnets: vxnet_id = vxnet.get("vxnet_id") print("vxnet_id == %s" % (vxnet_id)) if not vxnet_id: print("describe_instances no vxnet") return None # LeaveVxnet action = const.ACTION_LEAVE_VXNET print("action == %s" % (action)) ret = conn.leave_vxnet(instances=instance_id,vxnet=vxnet_id,owner=user_id) print("leave_vxnet ret == %s" % (ret)) Common.check_ret_code(ret,action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("leave_vxnet successful") break print("status == %s" % (status)) return vxnet_id
def create_loadbalancer(conn,user_id,vxnet_id,private_ips): print("子线程启动") print("create_loadbalancer user_id == %s vxnet_id == %s private_ips == %s" % (user_id,vxnet_id,private_ips)) global g_loadbalancer_id # CreateLoadBalancer if not private_ips: print("private_ips is None") action = const.ACTION_CREATE_LOADBALANCER print("action == %s" % (action)) ret = conn.create_loadbalancer(loadbalancer_name='桌面管理中心',loadbalancer_type=1,node_count=2,vxnet=vxnet_id,mode=1,owner=user_id) print("create_loadbalancer ret == %s" % (ret)) Common.check_ret_code(ret, action) else: print("private_ips is %s" %(private_ips)) action = const.ACTION_CREATE_LOADBALANCER print("action == %s" % (action)) ret = conn.create_loadbalancer(loadbalancer_name='桌面管理中心',loadbalancer_type=1,node_count=2,vxnet=vxnet_id,mode=1,owner=user_id,private_ip=private_ips) print("create_loadbalancer ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] loadbalancer_id = ret['loadbalancer_id'] print("job_id == %s" % (job_id)) print("loadbalancer_id == %s" % (loadbalancer_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("create_loadbalancer successful") break print("status == %s" % (status)) if status == "successful": print("create_loadbalancer loadbalancer successful") g_loadbalancer_id = loadbalancer_id print("g_loadbalancer_id == %s" % (g_loadbalancer_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云负载均衡器 %s' %(current_time) Common.attach_tags_to_resource(conn,user_id=user_id,tag_name=tag_name,resource_type='loadbalancer',resource_id=loadbalancer_id) print("子线程结束")
def update_loadbalancers(conn, user_id, loadbalancer_id): print("子线程启动") print("update_loadbalancers user_id == %s loadbalancer_id == %s" % (user_id, loadbalancer_id)) if loadbalancer_id and not isinstance(loadbalancer_id, list): loadbalancer_id = [loadbalancer_id] print("loadbalancer_id == %s" % (loadbalancer_id)) # UpdateLoadBalancers action = const.ACTION_UPDATE_LOADBALANCERS print("action == %s" % (action)) ret = conn.update_loadbalancers(loadbalancers=loadbalancer_id, owner=user_id) print("update_loadbalancers ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("update_loadbalancers successful") break print("status == %s" % (status)) if status == "successful": print("update_loadbalancers loadbalancer successful") #loadbalancer_ip 写入文件 loadbalancer_ip_conf = "/opt/loadbalancer_ip_conf" loadbalancer_ip = get_loadbalancer_ip(conn, user_id, loadbalancer_id) print("get_loadbalancer_ip loadbalancer_ip == %s" % (loadbalancer_ip)) if loadbalancer_ip: with open(loadbalancer_ip_conf, "w+") as f1: f1.write("LOADBALANCER_IP %s" % (loadbalancer_ip)) # loadbalancer_id 写入文件 loadbalancer_id_conf = "/opt/loadbalancer_id_conf" with open(loadbalancer_id_conf, "w+") as f1: f1.write("LOADBALANCER_ID %s" % (loadbalancer_id[0])) print("子线程结束")
def delete_loadbalancer_backends(conn, user_id, loadbalancer_backends_ids): print("子线程启动") print( "delete_loadbalancer_backends user_id == %s loadbalancer_backends_ids == %s" % (user_id, loadbalancer_backends_ids)) if loadbalancer_backends_ids and not isinstance(loadbalancer_backends_ids, list): loadbalancer_backends_ids = [loadbalancer_backends_ids] print("loadbalancer_backends_ids == %s" % (loadbalancer_backends_ids)) # DeleteLoadBalancerBackends action = const.ACTION_DELETE_LOADBALANCER_BACKENDS print("action == %s" % (action)) ret = conn.delete_loadbalancer_backends( loadbalancer_backends=loadbalancer_backends_ids, owner=user_id) print("delete_loadbalancer_backends ret == %s" % (ret)) Common.check_ret_code(ret, action) print("子线程结束")
def update_s2_servers(conn,user_id,s2_servers_id): print("update_s2_servers user_id == %s s2_servers_id == %s" %(user_id,s2_servers_id)) if s2_servers_id and not isinstance(s2_servers_id, list): s2_servers_id = [s2_servers_id] print("s2_servers_id == %s" % (s2_servers_id)) # UpdateS2Servers action = const.ACTION_UPDATE_S2_SERVERS print("action == %s" % (action)) ret = conn.update_s2_servers(owner=user_id,s2_servers=s2_servers_id) print("update_s2_servers ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] print("job_id == %s" % (job_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("update_s2_servers successful") break print("status == %s" % (status)) # Result is written to file if status == "successful": print("update_s2_servers s2_servers successful") #s2server_ip 写入文件 s2server_ip_conf = "/opt/s2server_ip_conf" s2server_ip = get_s2server_ip(conn,user_id,s2_servers_id) print("get_s2server_ip s2server_ip == %s" %(s2server_ip)) if s2server_ip: with open(s2server_ip_conf, "w+") as f1: f1.write("S2SERVER_ADDRESS %s" %(s2server_ip)) print("子线程结束") return None
def get_cache_node_id(conn,user_id,cache_id): print("get_cache_node_id user_id == %s cache_id == %s" % (user_id,cache_id)) cache_node_id = None # DescribeCacheNodes action = const.ACTION_DESCRIBE_CACHE_NODES print("action == %s" % (action)) ret = conn.describe_cache_nodes(owner=user_id,cache=cache_id,verbose=1) print("describe_cache_nodes ret == %s" % (ret)) Common.check_ret_code(ret, action) cache_node_set = ret['cache_node_set'] if cache_node_set is None or len(cache_node_set) == 0: print("describe_cache_nodes cache_node_set is None") exit(-1) for cache_node in cache_node_set: cache_node_id = cache_node.get("cache_node_id") print("cache_node_id == %s" %(cache_node_id)) return cache_node_id
def get_memcached_cluster_private_ip(conn, cluster_id): print("get_memcached_cluster_private_ip cluster_id == %s" % (cluster_id)) private_ip = None # DescribeClusterNodes action = const.ACTION_DESCRIBE_CLUSTER_NODES print("action == %s" % (action)) ret = conn.describe_cluster_nodes(cluster=cluster_id, verbose=1, limit=1) print("describe_cluster_nodes ret == %s" % (ret)) Common.check_ret_code(ret, action) node_set = ret['node_set'] if node_set is None or len(node_set) == 0: print("describe_cluster_nodes node_set is None") exit(-1) for node in node_set: private_ip = node.get("private_ip") print("private_ip == %s" % (private_ip)) return private_ip
def get_rdb_master_ip(conn, user_id, rdb_id): print("get_rdb_master_ip user_id == %s rdb_id == %s" % (user_id, rdb_id)) if rdb_id and not isinstance(rdb_id, list): rdb_id = [rdb_id] print("rdb_id == %s" % (rdb_id)) master_ip = None # DescribeRDBs action = const.ACTION_DESCRIBE_RDBS print("action == %s" % (action)) ret = conn.describe_rdbs(owner=user_id, rdbs=rdb_id, verbose=1) print("describe_rdbs ret == %s" % (ret)) Common.check_ret_code(ret, action) rdb_set = ret['rdb_set'] if rdb_set is None or len(rdb_set) == 0: print("describe_rdbs rdb_set is None") exit(-1) for rdb in rdb_set: master_ip = rdb.get("master_ip") return master_ip
def get_user_quota_left(resource_type, user_id, conn): print("get_user_quota_left resource_type == %s user_id == %s" % (resource_type, user_id)) if resource_type and not isinstance(resource_type, list): resource_type = [resource_type] print("resource_type == %s" % (resource_type)) resource_type_left = 0 # GetQuotaLeft action = const.ACTION_GET_QUOTA_LEFT print("action == %s" % (action)) ret = conn.get_quota_left(resource_types=resource_type, owner=user_id) print("get_quota_left ret == %s" % (ret)) Common.check_ret_code(ret, action) quota_left_set = ret['quota_left_set'] if quota_left_set is None or len(quota_left_set) == 0: print("get_quota_left quota_left_set is None") return 0 for quota_left in quota_left_set: resource_type_left = quota_left.get("left") return resource_type_left
def get_cache_master_ip(conn,user_id,cache_id): print("get_cache_master_ip user_id == %s cache_id == %s" % (user_id,cache_id)) if cache_id and not isinstance(cache_id, list): cache_id = [cache_id] print("cache_id == %s" %(cache_id)) private_ip = None # DescribeCaches action = const.ACTION_DESCRIBE_CACHES print("action == %s" % (action)) ret = conn.describe_caches(owner=user_id,caches=cache_id,verbose=1) print("describe_caches ret == %s" % (ret)) Common.check_ret_code(ret, action) cache_set = ret['cache_set'] if cache_set is None or len(cache_set) == 0: print("describe_caches cache_set is None") exit(-1) for cache in cache_set: nodes = cache.get("nodes") for node in nodes: private_ip = node.get("private_ip") return private_ip
def check_private_ip(conn, user_id, private_ips=None): print("check_private_ip user_id == %s private_ips == %s" % (user_id, private_ips)) # DescribeNics action = const.ACTION_DESCRIBE_NICS print("action == %s" % (action)) ret = conn.describe_nics(offset=0, limit=100, search_word=private_ips, owner=user_id, status=["in-use", "available"]) print("describe_nics ret == %s" % (ret)) Common.check_ret_code(ret, action) #get total total_count = ret.get('total_count') print("total_count == %d" % (total_count)) if not total_count: print("total_count is 0") print("private_ips:%s is available" % (private_ips)) return True else: return False
def deploy_app_version(conn, user_id, vxnet_id, zone_id, app_ids, primary_private_ip, standby_private_ip, instance_class): print("子线程启动") print("deploy_app_version") if app_ids and not isinstance(app_ids, list): app_ids = [app_ids] app_type = ["cluster"] status = ["active"] print("app_ids == %s" % (app_ids)) print("primary_private_ip == %s" % (primary_private_ip)) print("standby_private_ip == %s" % (standby_private_ip)) print("instance_class == %s" % (instance_class)) # # DescribeApps action = const.ACTION_DESCRIBE_APPS print("action == %s" % (action)) ret = conn.describe_apps(app=app_ids[0], app_type=app_type) print("describe_apps ret == %s" % (ret)) Common.check_ret_code(ret, action) # DescribeAppVersions action = const.ACTION_DESCRIBE_APP_VERSIONS print("action == %s" % (action)) ret = conn.describe_app_versions(app_ids=app_ids, status=status, limit=1) print("describe_app_versions ret == %s" % (ret)) Common.check_ret_code(ret, action) version_set = ret['version_set'] if version_set is None or len(version_set) == 0: print("describe_app_versions version_set is None") exit(-1) for version in version_set: version_id = version.get("version_id") # GetGlobalUniqueId action = const.ACTION_GET_GLOBAL_UNIQUE_ID print("action == %s" % (action)) ret = conn.get_global_unique_id(owner=user_id, zone=zone_id) print("get_global_unique_id ret == %s" % (ret)) Common.check_ret_code(ret, action) global_uuid = ret['uuid'] #DeployAppVersion action = const.ACTION_DEPLOY_APP_VERSION print("action == %s" % (action)) print("app_ids == %s version_id == %s" % (app_ids, version_id)) if app_ids == [const.POSTGRESQL_APP_IDS]: if primary_private_ip: print( "primary_private_ip is not None.The cluster uses the specified private IP" ) # "private_ips":"192.168.15.100,192.168.15.101" private_ips_list = primary_private_ip + "," + standby_private_ip print("private_ips_list == %s" % (private_ips_list)) conf = { "cluster": { "name": "数据库服务", "description": "postgresql", "auto_backup_time": "-1", "pg": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "volume_size": 50 }, "ri": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "pgpool": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "resource_group": "Standard", "zone": zone_id, "private_ips": [{ "role": "pg", "private_ips": private_ips_list }], "env": { "db_name": "test_vdi", "user_name": "yunify", "password": "******", "pg_version": "11", "serialize_accept": "off", "pgpool_port": 9999, "child_life_time": 300, "connection_life_time": 600, "client_idle_limit": 0, "max_pool": 2, "num_init_children": 100, "sync_stream_repl": "Yes", "load_read_request_to_primary": "Yes", "auto_failover": "Yes", "max_connections": "auto-optimized-conns", "wal_buffers": "8MB", "work_mem": "4MB", "maintenance_work_mem": "64MB", "effective_cache_size": "4GB", "wal_keep_segments": 256, "checkpoint_timeout": "5min", "autovacuum": "on", "vacuum_cost_delay": 0, "autovacuum_naptime": "1min", "vacuum_cost_limit": 200, "bgwriter_delay": 200, "bgwriter_lru_multiplier": 2, "wal_writer_delay": 200, "fsync": "on", "commit_delay": 0, "commit_siblings": 5, "enable_bitmapscan": "on", "enable_seqscan": "on", "full_page_writes": "on", "log_min_messages": "warning", "deadlock_timeout": 1, "log_lock_waits": "off", "log_min_duration_statement": -1, "temp_buffers": "8MB", "max_prepared_transactions": 0, "max_wal_senders": 10, "bgwriter_lru_maxpages": 100, "log_statement": "none", "shared_preload_libraries": "passwordcheck", "wal_level": "replica", "shared_buffers": "auto-optimized-sharedbuffers", "jit": "off" }, "toggle_passwd": "on" } else: print( "primary_private_ip is None.The cluster uses automatically assigns private IP" ) conf = { "cluster": { "name": "数据库服务", "description": "postgresql", "auto_backup_time": "-1", "pg": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "volume_size": 50 }, "ri": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "pgpool": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "resource_group": "Standard", "zone": zone_id, "env": { "db_name": "test_vdi", "user_name": "yunify", "password": "******", "pg_version": "11", "serialize_accept": "off", "pgpool_port": 9999, "child_life_time": 300, "connection_life_time": 600, "client_idle_limit": 0, "max_pool": 2, "num_init_children": 100, "sync_stream_repl": "Yes", "load_read_request_to_primary": "Yes", "auto_failover": "Yes", "max_connections": "auto-optimized-conns", "wal_buffers": "8MB", "work_mem": "4MB", "maintenance_work_mem": "64MB", "effective_cache_size": "4GB", "wal_keep_segments": 256, "checkpoint_timeout": "5min", "autovacuum": "on", "vacuum_cost_delay": 0, "autovacuum_naptime": "1min", "vacuum_cost_limit": 200, "bgwriter_delay": 200, "bgwriter_lru_multiplier": 2, "wal_writer_delay": 200, "fsync": "on", "commit_delay": 0, "commit_siblings": 5, "enable_bitmapscan": "on", "enable_seqscan": "on", "full_page_writes": "on", "log_min_messages": "warning", "deadlock_timeout": 1, "log_lock_waits": "off", "log_min_duration_statement": -1, "temp_buffers": "8MB", "max_prepared_transactions": 0, "max_wal_senders": 10, "bgwriter_lru_maxpages": 100, "log_statement": "none", "shared_preload_libraries": "passwordcheck", "wal_level": "replica", "shared_buffers": "auto-optimized-sharedbuffers", "jit": "off" }, "toggle_passwd": "on" } elif app_ids == [const.MEMCACHED_APP_IDS]: if primary_private_ip: print( "primary_private_ip is not None.The cluster uses the specified private IP" ) # "private_ips":"192.168.15.102" private_ips_list = primary_private_ip print("private_ips_list == %s" % (private_ips_list)) conf = { "cluster": { "name": "缓存服务", "description": "memcached", "memcached_node": { "cpu": 1, "memory": 1024, "instance_class": instance_class, "count": 1 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "zone": zone_id, "private_ips": [{ "role": "memcached_node", "private_ips": private_ips_list }], "env": { "-p": 11211, "-U": 11211, "-c": 65000, "-m": 716, "-n": 48, "-f": 1.25, "-t": 1, "-M": 0 } } else: print( "primary_private_ip is None.The cluster uses automatically assigns private IP" ) conf = { "cluster": { "name": "缓存服务", "description": "memcached", "memcached_node": { "cpu": 1, "memory": 1024, "instance_class": instance_class, "count": 1 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "zone": zone_id, "env": { "-p": 11211, "-U": 11211, "-c": 65000, "-m": 716, "-n": 48, "-f": 1.25, "-t": 1, "-M": 0 } } else: print("app_ids %s is invalid" % (app_ids)) #conf python dictionary conversion JSON format jconf = json.dumps(conf) print("jconf == %s" % (jconf)) ret = conn.deploy_app_version(app_type=app_type, app_id=app_ids, version_id=version_id, conf=jconf, charge_mode="elastic", debug=0, owner=user_id) print("deploy_app_version ret == %s" % (ret)) Common.check_ret_code(ret, action) cluster_id = ret['cluster_id'] job_id = ret['job_id'] print("cluster_id == %s" % (cluster_id)) print("job_id == %s" % (job_id)) # check job status num = 0 while num < 1000: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("deploy_app_version successful") break print("status == %s" % (status)) # Record node IP through file if app_ids == [const.POSTGRESQL_APP_IDS]: if status == "successful": print("deploy_app_version postresql successful") # create_rdb ok create_rdb_status = "True" # create_rdb_status 写入文件 create_rdb_status_conf = "/opt/create_rdb_status_conf" with open(create_rdb_status_conf, "w+") as f: f.write("CREATE_RDB_STATUS %s" % (create_rdb_status)) # cluster_id 写入文件 rdb_cluster_id_conf = "/opt/rdb_cluster_id_conf" with open(rdb_cluster_id_conf, "w+") as f: f.write("RDB_CLUSTER_ID %s" % (cluster_id)) # cluster_id 写入文件 rdb_id_conf = "/opt/rdb_id_conf" with open(rdb_id_conf, "w+") as f: f.write("RDB_ID %s" % (cluster_id)) # rdb_master_ip 写入文件 rdb_master_ip_conf = "/opt/rdb_master_ip_conf" rdb_master_ip = get_postgresql_cluster_primary_ip(conn, cluster_id) print("get_postgresql_cluster_primary_ip == %s" % (rdb_master_ip)) if rdb_master_ip: with open(rdb_master_ip_conf, "w+") as f: f.write("POSTGRESQL_ADDRESS %s" % (rdb_master_ip)) # rdb_topslave_ip 写入文件 rdb_topslave_ip_conf = "/opt/rdb_topslave_ip_conf" rdb_topslave_ip = get_postgresql_cluster_standby_ip( conn, cluster_id) print("get_postgresql_cluster_standby_ip rdb_topslave_ip == %s" % (rdb_topslave_ip)) if rdb_topslave_ip: with open(rdb_topslave_ip_conf, "w+") as f: f.write("RDB_TOPSLAVE_IP %s" % (rdb_topslave_ip)) # master_rdb_instance_id 写入文件 master_rdb_instance_id_conf = "/opt/master_rdb_instance_id_conf" master_rdb_instance_id = get_postgresql_cluster_master_rdb_instance_id( conn, cluster_id) print( "get_postgresql_cluster_master_rdb_instance_id master_rdb_instance_id == %s" % (master_rdb_instance_id)) if master_rdb_instance_id: with open(master_rdb_instance_id_conf, "w+") as f: f.write("MASTER_RDB_INSTANCE_ID %s" % (master_rdb_instance_id)) # topslave_rdb_instance_id 写入文件 topslave_rdb_instance_id_conf = "/opt/topslave_rdb_instance_id_conf" topslave_rdb_instance_id = get_postgresql_cluster_topslave_rdb_instance_id( conn, cluster_id) print( "get_postgresql_cluster_topslave_rdb_instance_id topslave_rdb_instance_id == %s" % (topslave_rdb_instance_id)) if topslave_rdb_instance_id: with open(topslave_rdb_instance_id_conf, "w+") as f: f.write("TOPSLAVE_RDB_INSTANCE_ID %s" % (topslave_rdb_instance_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云数据库 %s' % (current_time) Common.attach_tags_to_resource(conn, user_id=user_id, tag_name=tag_name, resource_type='cluster', resource_id=cluster_id) else: print("deploy_app_version postresql timeout") create_rdb_status = "False" # create_rdb_status 写入文件 create_rdb_status_conf = "/opt/create_rdb_status_conf" with open(create_rdb_status_conf, "w+") as f1: f1.write("CREATE_RDB_STATUS %s" % (create_rdb_status)) elif app_ids == [const.MEMCACHED_APP_IDS]: if status == "successful": print("deploy_app_version memcached successful") # create_memcached ok create_memcached_status = "True" create_memcached_status_conf = "/opt/create_memcached_status_conf" with open(create_memcached_status_conf, "w+") as f: f.write("CREATE_MEMCACHED_STATUS %s" % (create_memcached_status)) # cluster_id 写入文件 cache_cluster_id_conf = "/opt/cache_cluster_id_conf" with open(cache_cluster_id_conf, "w+") as f: f.write("CACHE_CLUSTER_ID %s" % (cluster_id)) # cache_id 写入文件 cache_id_conf = "/opt/cache_id_conf" with open(cache_id_conf, "w+") as f: f.write("CACHE_ID %s" % (cluster_id)) # cache_master_ip 写入文件 cache_master_ip_conf = "/opt/cache_master_ip_conf" cache_master_ip = get_memcached_cluster_private_ip( conn, cluster_id) print("get_memcached_cluster_private_ip cache_master_ip == %s" % (cache_master_ip)) if cache_master_ip: with open(cache_master_ip_conf, "w+") as f: f.write("MEMCACHED_ADDRESS %s" % (cache_master_ip)) # cache_node_id 写入文件 cache_node_id_conf = "/opt/cache_node_id_conf" cache_node_id = get_memcached_cluster_cache_node_id( conn, cluster_id) print("get_memcached_cluster_cache_node_id cache_node_id == %s" % (cache_node_id)) if cache_node_id: with open(cache_node_id_conf, "w+") as f: f.write("CACHE_NODE_ID %s" % (cache_node_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云缓存 %s' % (current_time) Common.attach_tags_to_resource(conn, user_id=user_id, tag_name=tag_name, resource_type='cluster', resource_id=cluster_id) else: print("deploy_app_version memcached timeout") create_memcached_status = "False" # create_memcached_status 写入文件 create_memcached_status_conf = "/opt/create_memcached_status_conf" with open(create_memcached_status_conf, "w+") as f1: f1.write("CREATE_MEMCACHED_STATUS %s" % (create_memcached_status)) else: print("app_ids %s doesn't support" % (app_ids)) exit(-1) print("子线程结束")