def delete_s2server(conn,user_id,s2_servers_id): print("子线程启动") print("delete_s2server user_id == %s s2_servers_id == %s" % (user_id,s2_servers_id)) if s2_servers_id and not isinstance(s2_servers_id, list): s2_servers_id = [s2_servers_id] print("s2_servers_id == %s" %(s2_servers_id)) # DeleteS2Servers action = const.ACTION_DELETE_S2_SERVERS print("action == %s" % (action)) ret = conn.delete_s2_servers(s2_servers=s2_servers_id,owner=user_id) print("delete_s2_servers ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("delete_s2_servers successful") break print("status == %s" % (status)) if status == "successful": print("delete_s2_servers s2_servers successful") print("子线程结束")
def delete_loadbalancer(conn,user_id,loadbalancer_id): print("子线程启动") print("delete_loadbalancer user_id == %s loadbalancer_id == %s" % (user_id,loadbalancer_id)) if loadbalancer_id and not isinstance(loadbalancer_id, list): loadbalancer_id = [loadbalancer_id] print("loadbalancer_id == %s" %(loadbalancer_id)) # DeleteLoadBalancers action = const.ACTION_CREATE_LOADBALANCER print("action == %s" % (action)) ret = conn.delete_loadbalancers(loadbalancers=loadbalancer_id,owner=user_id) print("delete_loadbalancers ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("delete_loadbalancers successful") break print("status == %s" % (status)) if status == "successful": print("delete_loadbalancers loadbalancer successful") print("子线程结束")
def terminate_instances(conn,user_id,instance_id): print("terminate_instances user_id == %s instance_id == %s" % (user_id,instance_id)) if instance_id and not isinstance(instance_id, list): instance_id = [instance_id] # TerminateInstances action = const.ACTION_TERMINATE_INSTANCES print("action == %s" % (action)) ret = conn.terminate_instances(instances=instance_id,owner=user_id,direct_cease=1) print("terminate_instances ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("terminate_instances successful") break print("status == %s" % (status))
def create_new_volume(conn,user_id,volume_type): print("create_new_volume user_id == %s volume_type == %s" %(user_id,volume_type)) volume_id = "" # CreateVolumes action = const.ACTION_CREATE_VOLUMES print("action == %s" % (action)) ret = conn.create_volumes(owner=user_id,volume_name="vdi-portal-nas",volume_type=volume_type,count=1,size=200,target_user=user_id) print("create_volumes ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] volume_id = ret['volumes'] print("job_id == %s" % (job_id)) print("volume_id == %s" % (volume_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("create_volumes successful") break print("status == %s" % (status)) return volume_id
def leave_vxnet(conn,user_id,instance_id): print("leave_vxnet user_id == %s instance_id == %s" %(user_id,instance_id)) if instance_id and not isinstance(instance_id, list): instance_id = [instance_id] vxnet_id = None # DescribeInstances action = const.ACTION_DESCRIBE_INSTANCES print("action == %s" % (action)) ret = conn.describe_instances(instances=instance_id,owner=user_id,verbose=1) print("describe_instances ret == %s" % (ret)) Common.check_ret_code(ret,action) # get vxnet_id instance_set = ret['instance_set'] if instance_set is None or len(instance_set) == 0: print("describe_instances instance_set is None") exit(-1) for instance in instance_set: vxnets = instance.get("vxnets") for vxnet in vxnets: vxnet_id = vxnet.get("vxnet_id") print("vxnet_id == %s" % (vxnet_id)) if not vxnet_id: print("describe_instances no vxnet") return None # LeaveVxnet action = const.ACTION_LEAVE_VXNET print("action == %s" % (action)) ret = conn.leave_vxnet(instances=instance_id,vxnet=vxnet_id,owner=user_id) print("leave_vxnet ret == %s" % (ret)) Common.check_ret_code(ret,action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("leave_vxnet successful") break print("status == %s" % (status)) return vxnet_id
def create_loadbalancer(conn,user_id,vxnet_id,private_ips): print("子线程启动") print("create_loadbalancer user_id == %s vxnet_id == %s private_ips == %s" % (user_id,vxnet_id,private_ips)) global g_loadbalancer_id # CreateLoadBalancer if not private_ips: print("private_ips is None") action = const.ACTION_CREATE_LOADBALANCER print("action == %s" % (action)) ret = conn.create_loadbalancer(loadbalancer_name='桌面管理中心',loadbalancer_type=1,node_count=2,vxnet=vxnet_id,mode=1,owner=user_id) print("create_loadbalancer ret == %s" % (ret)) Common.check_ret_code(ret, action) else: print("private_ips is %s" %(private_ips)) action = const.ACTION_CREATE_LOADBALANCER print("action == %s" % (action)) ret = conn.create_loadbalancer(loadbalancer_name='桌面管理中心',loadbalancer_type=1,node_count=2,vxnet=vxnet_id,mode=1,owner=user_id,private_ip=private_ips) print("create_loadbalancer ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] loadbalancer_id = ret['loadbalancer_id'] print("job_id == %s" % (job_id)) print("loadbalancer_id == %s" % (loadbalancer_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("create_loadbalancer successful") break print("status == %s" % (status)) if status == "successful": print("create_loadbalancer loadbalancer successful") g_loadbalancer_id = loadbalancer_id print("g_loadbalancer_id == %s" % (g_loadbalancer_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云负载均衡器 %s' %(current_time) Common.attach_tags_to_resource(conn,user_id=user_id,tag_name=tag_name,resource_type='loadbalancer',resource_id=loadbalancer_id) print("子线程结束")
def update_loadbalancers(conn, user_id, loadbalancer_id): print("子线程启动") print("update_loadbalancers user_id == %s loadbalancer_id == %s" % (user_id, loadbalancer_id)) if loadbalancer_id and not isinstance(loadbalancer_id, list): loadbalancer_id = [loadbalancer_id] print("loadbalancer_id == %s" % (loadbalancer_id)) # UpdateLoadBalancers action = const.ACTION_UPDATE_LOADBALANCERS print("action == %s" % (action)) ret = conn.update_loadbalancers(loadbalancers=loadbalancer_id, owner=user_id) print("update_loadbalancers ret == %s" % (ret)) Common.check_ret_code(ret, action) # check job status job_id = ret['job_id'] print("job_id == %s" % (job_id)) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("update_loadbalancers successful") break print("status == %s" % (status)) if status == "successful": print("update_loadbalancers loadbalancer successful") #loadbalancer_ip 写入文件 loadbalancer_ip_conf = "/opt/loadbalancer_ip_conf" loadbalancer_ip = get_loadbalancer_ip(conn, user_id, loadbalancer_id) print("get_loadbalancer_ip loadbalancer_ip == %s" % (loadbalancer_ip)) if loadbalancer_ip: with open(loadbalancer_ip_conf, "w+") as f1: f1.write("LOADBALANCER_IP %s" % (loadbalancer_ip)) # loadbalancer_id 写入文件 loadbalancer_id_conf = "/opt/loadbalancer_id_conf" with open(loadbalancer_id_conf, "w+") as f1: f1.write("LOADBALANCER_ID %s" % (loadbalancer_id[0])) print("子线程结束")
def update_s2_servers(conn,user_id,s2_servers_id): print("update_s2_servers user_id == %s s2_servers_id == %s" %(user_id,s2_servers_id)) if s2_servers_id and not isinstance(s2_servers_id, list): s2_servers_id = [s2_servers_id] print("s2_servers_id == %s" % (s2_servers_id)) # UpdateS2Servers action = const.ACTION_UPDATE_S2_SERVERS print("action == %s" % (action)) ret = conn.update_s2_servers(owner=user_id,s2_servers=s2_servers_id) print("update_s2_servers ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] print("job_id == %s" % (job_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("update_s2_servers successful") break print("status == %s" % (status)) # Result is written to file if status == "successful": print("update_s2_servers s2_servers successful") #s2server_ip 写入文件 s2server_ip_conf = "/opt/s2server_ip_conf" s2server_ip = get_s2server_ip(conn,user_id,s2_servers_id) print("get_s2server_ip s2server_ip == %s" %(s2server_ip)) if s2server_ip: with open(s2server_ip_conf, "w+") as f1: f1.write("S2SERVER_ADDRESS %s" %(s2server_ip)) print("子线程结束") return None
def deploy_app_version(conn, user_id, vxnet_id, zone_id, app_ids, primary_private_ip, standby_private_ip, instance_class): print("子线程启动") print("deploy_app_version") if app_ids and not isinstance(app_ids, list): app_ids = [app_ids] app_type = ["cluster"] status = ["active"] print("app_ids == %s" % (app_ids)) print("primary_private_ip == %s" % (primary_private_ip)) print("standby_private_ip == %s" % (standby_private_ip)) print("instance_class == %s" % (instance_class)) # # DescribeApps action = const.ACTION_DESCRIBE_APPS print("action == %s" % (action)) ret = conn.describe_apps(app=app_ids[0], app_type=app_type) print("describe_apps ret == %s" % (ret)) Common.check_ret_code(ret, action) # DescribeAppVersions action = const.ACTION_DESCRIBE_APP_VERSIONS print("action == %s" % (action)) ret = conn.describe_app_versions(app_ids=app_ids, status=status, limit=1) print("describe_app_versions ret == %s" % (ret)) Common.check_ret_code(ret, action) version_set = ret['version_set'] if version_set is None or len(version_set) == 0: print("describe_app_versions version_set is None") exit(-1) for version in version_set: version_id = version.get("version_id") # GetGlobalUniqueId action = const.ACTION_GET_GLOBAL_UNIQUE_ID print("action == %s" % (action)) ret = conn.get_global_unique_id(owner=user_id, zone=zone_id) print("get_global_unique_id ret == %s" % (ret)) Common.check_ret_code(ret, action) global_uuid = ret['uuid'] #DeployAppVersion action = const.ACTION_DEPLOY_APP_VERSION print("action == %s" % (action)) print("app_ids == %s version_id == %s" % (app_ids, version_id)) if app_ids == [const.POSTGRESQL_APP_IDS]: if primary_private_ip: print( "primary_private_ip is not None.The cluster uses the specified private IP" ) # "private_ips":"192.168.15.100,192.168.15.101" private_ips_list = primary_private_ip + "," + standby_private_ip print("private_ips_list == %s" % (private_ips_list)) conf = { "cluster": { "name": "数据库服务", "description": "postgresql", "auto_backup_time": "-1", "pg": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "volume_size": 50 }, "ri": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "pgpool": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "resource_group": "Standard", "zone": zone_id, "private_ips": [{ "role": "pg", "private_ips": private_ips_list }], "env": { "db_name": "test_vdi", "user_name": "yunify", "password": "******", "pg_version": "11", "serialize_accept": "off", "pgpool_port": 9999, "child_life_time": 300, "connection_life_time": 600, "client_idle_limit": 0, "max_pool": 2, "num_init_children": 100, "sync_stream_repl": "Yes", "load_read_request_to_primary": "Yes", "auto_failover": "Yes", "max_connections": "auto-optimized-conns", "wal_buffers": "8MB", "work_mem": "4MB", "maintenance_work_mem": "64MB", "effective_cache_size": "4GB", "wal_keep_segments": 256, "checkpoint_timeout": "5min", "autovacuum": "on", "vacuum_cost_delay": 0, "autovacuum_naptime": "1min", "vacuum_cost_limit": 200, "bgwriter_delay": 200, "bgwriter_lru_multiplier": 2, "wal_writer_delay": 200, "fsync": "on", "commit_delay": 0, "commit_siblings": 5, "enable_bitmapscan": "on", "enable_seqscan": "on", "full_page_writes": "on", "log_min_messages": "warning", "deadlock_timeout": 1, "log_lock_waits": "off", "log_min_duration_statement": -1, "temp_buffers": "8MB", "max_prepared_transactions": 0, "max_wal_senders": 10, "bgwriter_lru_maxpages": 100, "log_statement": "none", "shared_preload_libraries": "passwordcheck", "wal_level": "replica", "shared_buffers": "auto-optimized-sharedbuffers", "jit": "off" }, "toggle_passwd": "on" } else: print( "primary_private_ip is None.The cluster uses automatically assigns private IP" ) conf = { "cluster": { "name": "数据库服务", "description": "postgresql", "auto_backup_time": "-1", "pg": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "volume_size": 50 }, "ri": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "pgpool": { "cpu": 2, "memory": 4096, "instance_class": instance_class, "count": 0, "volume_size": 20 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "resource_group": "Standard", "zone": zone_id, "env": { "db_name": "test_vdi", "user_name": "yunify", "password": "******", "pg_version": "11", "serialize_accept": "off", "pgpool_port": 9999, "child_life_time": 300, "connection_life_time": 600, "client_idle_limit": 0, "max_pool": 2, "num_init_children": 100, "sync_stream_repl": "Yes", "load_read_request_to_primary": "Yes", "auto_failover": "Yes", "max_connections": "auto-optimized-conns", "wal_buffers": "8MB", "work_mem": "4MB", "maintenance_work_mem": "64MB", "effective_cache_size": "4GB", "wal_keep_segments": 256, "checkpoint_timeout": "5min", "autovacuum": "on", "vacuum_cost_delay": 0, "autovacuum_naptime": "1min", "vacuum_cost_limit": 200, "bgwriter_delay": 200, "bgwriter_lru_multiplier": 2, "wal_writer_delay": 200, "fsync": "on", "commit_delay": 0, "commit_siblings": 5, "enable_bitmapscan": "on", "enable_seqscan": "on", "full_page_writes": "on", "log_min_messages": "warning", "deadlock_timeout": 1, "log_lock_waits": "off", "log_min_duration_statement": -1, "temp_buffers": "8MB", "max_prepared_transactions": 0, "max_wal_senders": 10, "bgwriter_lru_maxpages": 100, "log_statement": "none", "shared_preload_libraries": "passwordcheck", "wal_level": "replica", "shared_buffers": "auto-optimized-sharedbuffers", "jit": "off" }, "toggle_passwd": "on" } elif app_ids == [const.MEMCACHED_APP_IDS]: if primary_private_ip: print( "primary_private_ip is not None.The cluster uses the specified private IP" ) # "private_ips":"192.168.15.102" private_ips_list = primary_private_ip print("private_ips_list == %s" % (private_ips_list)) conf = { "cluster": { "name": "缓存服务", "description": "memcached", "memcached_node": { "cpu": 1, "memory": 1024, "instance_class": instance_class, "count": 1 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "zone": zone_id, "private_ips": [{ "role": "memcached_node", "private_ips": private_ips_list }], "env": { "-p": 11211, "-U": 11211, "-c": 65000, "-m": 716, "-n": 48, "-f": 1.25, "-t": 1, "-M": 0 } } else: print( "primary_private_ip is None.The cluster uses automatically assigns private IP" ) conf = { "cluster": { "name": "缓存服务", "description": "memcached", "memcached_node": { "cpu": 1, "memory": 1024, "instance_class": instance_class, "count": 1 }, "vxnet": vxnet_id, "global_uuid": global_uuid }, "version": version_id, "zone": zone_id, "env": { "-p": 11211, "-U": 11211, "-c": 65000, "-m": 716, "-n": 48, "-f": 1.25, "-t": 1, "-M": 0 } } else: print("app_ids %s is invalid" % (app_ids)) #conf python dictionary conversion JSON format jconf = json.dumps(conf) print("jconf == %s" % (jconf)) ret = conn.deploy_app_version(app_type=app_type, app_id=app_ids, version_id=version_id, conf=jconf, charge_mode="elastic", debug=0, owner=user_id) print("deploy_app_version ret == %s" % (ret)) Common.check_ret_code(ret, action) cluster_id = ret['cluster_id'] job_id = ret['job_id'] print("cluster_id == %s" % (cluster_id)) print("job_id == %s" % (job_id)) # check job status num = 0 while num < 1000: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("deploy_app_version successful") break print("status == %s" % (status)) # Record node IP through file if app_ids == [const.POSTGRESQL_APP_IDS]: if status == "successful": print("deploy_app_version postresql successful") # create_rdb ok create_rdb_status = "True" # create_rdb_status 写入文件 create_rdb_status_conf = "/opt/create_rdb_status_conf" with open(create_rdb_status_conf, "w+") as f: f.write("CREATE_RDB_STATUS %s" % (create_rdb_status)) # cluster_id 写入文件 rdb_cluster_id_conf = "/opt/rdb_cluster_id_conf" with open(rdb_cluster_id_conf, "w+") as f: f.write("RDB_CLUSTER_ID %s" % (cluster_id)) # cluster_id 写入文件 rdb_id_conf = "/opt/rdb_id_conf" with open(rdb_id_conf, "w+") as f: f.write("RDB_ID %s" % (cluster_id)) # rdb_master_ip 写入文件 rdb_master_ip_conf = "/opt/rdb_master_ip_conf" rdb_master_ip = get_postgresql_cluster_primary_ip(conn, cluster_id) print("get_postgresql_cluster_primary_ip == %s" % (rdb_master_ip)) if rdb_master_ip: with open(rdb_master_ip_conf, "w+") as f: f.write("POSTGRESQL_ADDRESS %s" % (rdb_master_ip)) # rdb_topslave_ip 写入文件 rdb_topslave_ip_conf = "/opt/rdb_topslave_ip_conf" rdb_topslave_ip = get_postgresql_cluster_standby_ip( conn, cluster_id) print("get_postgresql_cluster_standby_ip rdb_topslave_ip == %s" % (rdb_topslave_ip)) if rdb_topslave_ip: with open(rdb_topslave_ip_conf, "w+") as f: f.write("RDB_TOPSLAVE_IP %s" % (rdb_topslave_ip)) # master_rdb_instance_id 写入文件 master_rdb_instance_id_conf = "/opt/master_rdb_instance_id_conf" master_rdb_instance_id = get_postgresql_cluster_master_rdb_instance_id( conn, cluster_id) print( "get_postgresql_cluster_master_rdb_instance_id master_rdb_instance_id == %s" % (master_rdb_instance_id)) if master_rdb_instance_id: with open(master_rdb_instance_id_conf, "w+") as f: f.write("MASTER_RDB_INSTANCE_ID %s" % (master_rdb_instance_id)) # topslave_rdb_instance_id 写入文件 topslave_rdb_instance_id_conf = "/opt/topslave_rdb_instance_id_conf" topslave_rdb_instance_id = get_postgresql_cluster_topslave_rdb_instance_id( conn, cluster_id) print( "get_postgresql_cluster_topslave_rdb_instance_id topslave_rdb_instance_id == %s" % (topslave_rdb_instance_id)) if topslave_rdb_instance_id: with open(topslave_rdb_instance_id_conf, "w+") as f: f.write("TOPSLAVE_RDB_INSTANCE_ID %s" % (topslave_rdb_instance_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云数据库 %s' % (current_time) Common.attach_tags_to_resource(conn, user_id=user_id, tag_name=tag_name, resource_type='cluster', resource_id=cluster_id) else: print("deploy_app_version postresql timeout") create_rdb_status = "False" # create_rdb_status 写入文件 create_rdb_status_conf = "/opt/create_rdb_status_conf" with open(create_rdb_status_conf, "w+") as f1: f1.write("CREATE_RDB_STATUS %s" % (create_rdb_status)) elif app_ids == [const.MEMCACHED_APP_IDS]: if status == "successful": print("deploy_app_version memcached successful") # create_memcached ok create_memcached_status = "True" create_memcached_status_conf = "/opt/create_memcached_status_conf" with open(create_memcached_status_conf, "w+") as f: f.write("CREATE_MEMCACHED_STATUS %s" % (create_memcached_status)) # cluster_id 写入文件 cache_cluster_id_conf = "/opt/cache_cluster_id_conf" with open(cache_cluster_id_conf, "w+") as f: f.write("CACHE_CLUSTER_ID %s" % (cluster_id)) # cache_id 写入文件 cache_id_conf = "/opt/cache_id_conf" with open(cache_id_conf, "w+") as f: f.write("CACHE_ID %s" % (cluster_id)) # cache_master_ip 写入文件 cache_master_ip_conf = "/opt/cache_master_ip_conf" cache_master_ip = get_memcached_cluster_private_ip( conn, cluster_id) print("get_memcached_cluster_private_ip cache_master_ip == %s" % (cache_master_ip)) if cache_master_ip: with open(cache_master_ip_conf, "w+") as f: f.write("MEMCACHED_ADDRESS %s" % (cache_master_ip)) # cache_node_id 写入文件 cache_node_id_conf = "/opt/cache_node_id_conf" cache_node_id = get_memcached_cluster_cache_node_id( conn, cluster_id) print("get_memcached_cluster_cache_node_id cache_node_id == %s" % (cache_node_id)) if cache_node_id: with open(cache_node_id_conf, "w+") as f: f.write("CACHE_NODE_ID %s" % (cache_node_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云缓存 %s' % (current_time) Common.attach_tags_to_resource(conn, user_id=user_id, tag_name=tag_name, resource_type='cluster', resource_id=cluster_id) else: print("deploy_app_version memcached timeout") create_memcached_status = "False" # create_memcached_status 写入文件 create_memcached_status_conf = "/opt/create_memcached_status_conf" with open(create_memcached_status_conf, "w+") as f1: f1.write("CREATE_MEMCACHED_STATUS %s" % (create_memcached_status)) else: print("app_ids %s doesn't support" % (app_ids)) exit(-1) print("子线程结束")
def create_rdb(conn, user_id, vxnet_id, master_private_ip, topslave_private_ip): print("子线程启动") print( "create_rdb user_id == %s vxnet_id == %s master_private_ip == %s topslave_private_ip == %s" % (user_id, vxnet_id, master_private_ip, topslave_private_ip)) if not master_private_ip: print("master_private_ip is None") # CreateRDB action = const.ACTION_CREATE_RDB print("action == %s" % (action)) ret = conn.create_rdb(owner=user_id, vxnet=vxnet_id, rdb_engine='psql', engine_version='9.4', rdb_username='******', rdb_password='******', rdb_type=2, storage_size=10, rdb_name='数据库服务', description='数据库') print("create_rdb ret == %s" % (ret)) Common.check_ret_code(ret, action) else: print("master_private_ip is %s" % (master_private_ip)) # CreateRDB action = const.ACTION_CREATE_RDB print("action == %s" % (action)) private_ips_list = { "master": master_private_ip, "topslave": topslave_private_ip } print("private_ips_list == %s" % (private_ips_list)) ret = conn.create_rdb(owner=user_id, vxnet=vxnet_id, rdb_engine='psql', engine_version='9.4', rdb_username='******', rdb_password='******', rdb_type=2, storage_size=10, rdb_name='数据库服务', description='数据库', private_ips=[private_ips_list]) print("create_rdb ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] rdb_id = ret['rdb'] print("job_id == %s" % (job_id)) print("rdb_id == %s" % (rdb_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("create_rdb successful") break print("status == %s" % (status)) if status == "successful": print("create_rdb rdb successful") #create_rdb ok create_rdb_status = "True" # create_rdb_status 写入文件 create_rdb_status_conf = "/opt/create_rdb_status_conf" with open(create_rdb_status_conf, "w+") as f: f.write("CREATE_RDB_STATUS %s" % (create_rdb_status)) #rdb_id 写入文件 rdb_id_conf = "/opt/rdb_id_conf" with open(rdb_id_conf, "w+") as f: f.write("RDB_ID %s" % (rdb_id)) #rdb_master_ip 写入文件 rdb_master_ip_conf = "/opt/rdb_master_ip_conf" rdb_master_ip = get_rdb_master_ip(conn, user_id, rdb_id) print("get_rdb_master_ip rdb_master_ip == %s" % (rdb_master_ip)) if rdb_master_ip: with open(rdb_master_ip_conf, "w+") as f: f.write("POSTGRESQL_ADDRESS %s" % (rdb_master_ip)) #rdb_topslave_ip 写入文件 rdb_topslave_ip_conf = "/opt/rdb_topslave_ip_conf" rdb_topslave_ip = get_rdb_topslave_ip(conn, user_id, rdb_id) print("get_rdb_topslave_ip rdb_topslave_ip == %s" % (rdb_topslave_ip)) if rdb_topslave_ip: with open(rdb_topslave_ip_conf, "w+") as f: f.write("RDB_TOPSLAVE_IP %s" % (rdb_topslave_ip)) #master_rdb_instance_id 写入文件 master_rdb_instance_id_conf = "/opt/master_rdb_instance_id_conf" master_rdb_instance_id = get_master_rdb_instance_id( conn, user_id, rdb_id) print("get_master_rdb_instance_id master_rdb_instance_id == %s" % (master_rdb_instance_id)) if master_rdb_instance_id: with open(master_rdb_instance_id_conf, "w+") as f: f.write("MASTER_RDB_INSTANCE_ID %s" % (master_rdb_instance_id)) #topslave_rdb_instance_id 写入文件 topslave_rdb_instance_id_conf = "/opt/topslave_rdb_instance_id_conf" topslave_rdb_instance_id = get_topslave_rdb_instance_id( conn, user_id, rdb_id) print("get_topslave_rdb_instance_id topslave_rdb_instance_id == %s" % (topslave_rdb_instance_id)) if topslave_rdb_instance_id: with open(topslave_rdb_instance_id_conf, "w+") as f: f.write("TOPSLAVE_RDB_INSTANCE_ID %s" % (topslave_rdb_instance_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云数据库 %s' % (current_time) Common.attach_tags_to_resource(conn, user_id=user_id, tag_name=tag_name, resource_type='rdb', resource_id=rdb_id) print("子线程结束")
def create_cache(conn,user_id,vxnet_id,private_ips=None): print("子线程启动") print("create_cache user_id == %s vxnet_id == %s private_ips == %s" % (user_id,vxnet_id,private_ips)) if not private_ips: print("private_ips is None") # CreateCache action = const.ACTION_CREATE_CACHE print("action == %s" % (action)) ret = conn.create_cache(owner=user_id,vxnet=vxnet_id,cache_size=1,cache_type='memcached1.4.13',cache_name='缓存服务',description='缓存') print("create_cache ret == %s" % (ret)) Common.check_ret_code(ret, action) else: print("private_ips is %s" %(private_ips)) # CreateCache action = const.ACTION_CREATE_CACHE print("action == %s" % (action)) private_ips_list = {"cache_role": "master", "private_ips": private_ips} ret = conn.create_cache(owner=user_id,vxnet=vxnet_id,cache_size=1,cache_type='memcached1.4.13',cache_name='缓存服务',description='缓存',private_ips=[private_ips_list]) print("create_cache ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] cache_id = ret['cache_id'] print("job_id == %s" % (job_id)) print("cache_id == %s" % (cache_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("create_cache successful") break print("status == %s" % (status)) if status == "successful": print("create_cache cache successful") # cache_id 写入文件 cache_id_conf = "/opt/cache_id_conf" with open(cache_id_conf, "w+") as f: f.write("CACHE_ID %s" % (cache_id)) #cache_master_ip 写入文件 cache_master_ip_conf = "/opt/cache_master_ip_conf" cache_master_ip = get_cache_master_ip(conn,user_id,cache_id) print("get_cache_master_ip cache_master_ip == %s" %(cache_master_ip)) if cache_master_ip: with open(cache_master_ip_conf, "w+") as f: f.write("MEMCACHED_ADDRESS %s" %(cache_master_ip)) #cache_node_id 写入文件 cache_node_id_conf = "/opt/cache_node_id_conf" cache_node_id = get_cache_node_id(conn,user_id,cache_id) print("get_cache_node_id cache_node_id == %s" %(cache_node_id)) if cache_node_id: with open(cache_node_id_conf, "w+") as f: f.write("CACHE_NODE_ID %s" %(cache_node_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云缓存 %s' %(current_time) Common.attach_tags_to_resource(conn,user_id=user_id,tag_name=tag_name,resource_type='cache',resource_id=cache_id) print("子线程结束")
def clone_instances(conn, user_id, resource_id, vxnet_id, private_ips=None, hostname=None): print("子线程启动") print( "clone_instances user_id == %s resource_id == %s vxnet_id == %s private_ips == %s hostname == %s" % (user_id, resource_id, vxnet_id, private_ips, hostname)) if resource_id and not isinstance(resource_id, list): resource_id = [resource_id] print("resource_id == %s" % (resource_id)) global g_resource_ids global g_cloned_instance_ips if not private_ips: print("private_ips is None") # clone_instances action = const.ACTION_CLONE_INSTANCES print("action == %s" % (action)) vxnets_list = resource_id[0] + "|" + vxnet_id print("vxnets_list == %s" % (vxnets_list)) ret = conn.clone_instances(owner=user_id, instances=resource_id, vxnets=[vxnets_list]) print("clone_instances ret == %s" % (ret)) Common.check_ret_code(ret, action) else: print("private_ips is %s" % (private_ips)) # clone_instances action = const.ACTION_CLONE_INSTANCES print("action == %s" % (action)) vxnets_list = resource_id[0] + "|" + vxnet_id + "|" + private_ips print("vxnets_list == %s" % (vxnets_list)) ret = conn.clone_instances(owner=user_id, instances=resource_id, vxnets=[vxnets_list]) print("clone_instances ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] instance_id = ret['instances'] cloned_instance_id = instance_id[0] print("cloned_instance_id == %s" % (cloned_instance_id)) print("job_id == %s" % (job_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn, job_id) if status == "successful": print("clone_instances successful") break print("status == %s" % (status)) if status == "successful": print("clone_instances instance successful") cloned_instance_ip = get_cloned_instance_ip(conn, user_id, instance_id) num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) cloned_instance_ip = get_cloned_instance_ip( conn, user_id, instance_id) if cloned_instance_ip != "": print("get_cloned_instance_ip successful") break # cloned_instance_ip 写入文件 print("cloned_instance_ip == %s" % (cloned_instance_ip)) cloned_instance_ip_conf = "/opt/cloned_%s_instance_ip_conf" % ( hostname) with open(cloned_instance_ip_conf, "w+") as f1: f1.write("CLONED_%s_INSTANCE_IP %s" % (hostname.upper(), cloned_instance_ip)) if cloned_instance_ip not in g_cloned_instance_ips: g_cloned_instance_ips.append(cloned_instance_ip) # cloned_instance_id 写入文件 print("cloned_instance_id == %s" % (cloned_instance_id)) cloned_instance_id_conf = "/opt/cloned_%s_instance_id_conf" % ( hostname) with open(cloned_instance_id_conf, "w+") as f2: f2.write("CLONED_%s_INSTANCE_ID %s" % (hostname.upper(), cloned_instance_id)) if cloned_instance_id not in g_resource_ids: g_resource_ids.append(cloned_instance_id) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云服务器%s %s' % (hostname, current_time) Common.attach_tags_to_resource(conn, user_id=user_id, tag_name=tag_name, resource_type='instance', resource_id=cloned_instance_id) print("子线程结束")
def create_s2server(conn,user_id,vxnet_id,private_ips,instance_class): print("子线程启动") print("create_s2server user_id == %s vxnet_id == %s private_ips == %s instance_class == %s" % (user_id,vxnet_id,private_ips,instance_class)) global g_s2_server_id # get the s2_class corresponding to the instance class s2_class = const.INSTANCE_CLASS_S2_CLASS_MAP[instance_class] print("instance_class == %s" % (instance_class)) print("s2_class == %s" % (s2_class)) if not private_ips: print("private_ips is None") # CreateS2Server action = const.ACTION_CREATE_S2_SERVER print("action == %s" % (action)) ret = conn.create_s2_server(owner=user_id,vxnet=vxnet_id,service_type='vnas',s2_server_name='文件服务器',s2_server_type=0,description='文件存储vNAS',s2_class=s2_class) print("create_s2_server ret == %s" % (ret)) Common.check_ret_code(ret, action) else: print("private_ips is %s" %(private_ips)) # CreateS2Server action = const.ACTION_CREATE_S2_SERVER print("action == %s" % (action)) ret = conn.create_s2_server(owner=user_id,vxnet=vxnet_id,service_type='vnas',s2_server_name='文件服务器',s2_server_type=0,description='文件存储vNAS',s2_class=s2_class,private_ip=private_ips) print("create_s2_server ret == %s" % (ret)) Common.check_ret_code(ret, action) job_id = ret['job_id'] s2_server_id = ret['s2_server'] print("job_id == %s" % (job_id)) print("s2_server_id == %s" % (s2_server_id)) # check job status num = 0 while num < 300: num = num + 1 print("num == %d" % (num)) time.sleep(1) status = Common.get_job_status(conn,job_id) if status == "successful": print("create_s2_server successful") break print("status == %s" % (status)) if status == "successful": print("create_s2_server s2_server successful") g_s2_server_id = s2_server_id print("g_s2_server_id == %s" % (g_s2_server_id)) # s2_server_id 写入文件 s2server_id_conf = "/opt/s2server_id_conf" with open(s2server_id_conf, "w+") as f1: f1.write("S2SERVER_ID %s" % (s2_server_id)) # attach tags current_time = time.strftime("%Y-%m-%d", time.localtime()) tag_name = '桌面云文件服务器 %s' %(current_time) Common.attach_tags_to_resource(conn,user_id=user_id,tag_name=tag_name,resource_type='s2_server',resource_id=s2_server_id) else: print("create_s2_server s2_server failed") exit(-1) print("子线程结束")