def delete_data_volume(self): if not self.volume_id: return Result('done') rlt = VolumeDB.instance().read_volume_info(self.cluster_name, self.volume_id) if not rlt.success: Log( 1, 'AddStorageClassWork.read_volume_info [%s][%s]from etcd fail,as[%s]' % (self.cluster_name, self.volume_id, rlt.message)) return rlt volume = rlt.content app_node_list = [] mount_id_list = [] rlt = MountDB.instance().read_mount_list(self.cluster_name, self.pv_name) if not rlt.success: Log( 1, 'AddStorageClassWork.delete_data_volume read_mount_list[%s][%s]fail,as[%s]' % (self.cluster_name, self.pv_name, rlt.message)) else: for node in rlt.content: mount_id_list.append(node.get(ID)) app_node_list.append({ 'ip': node.get('ip'), 'port': node.get('port') }) if app_node_list: rlt = self.veclient.unmount_volume(volume, app_node_list) if not rlt.success: Log( 1, 'AddStorageClassWork.delete_data_volume unmount_volume[%s][%s]fail,as[%s]' % (self.cluster_name, volume.get('name'), rlt.message)) return rlt MountDB.instance().delete_mount_records(self.cluster_name, mount_id_list) rlt = self.veclient.delete_volume(volume['cluster_id'], volume.get('name')) if not rlt.success: Log( 1, 'AddStorageClassWork.delete_data_volume delete_volume[%s][%s]fail,as[%s]' % (self.cluster_name, volume.get('name'), rlt.message)) return rlt rlt = VolumeDB.instance().delete_volume(self.cluster_name, self.volume_id) if not rlt.success: Log( 1, 'AddStorageClassWork.delete_data_volume delete_volume[%s][%s]from etcd fail,as[%s]' % (self.cluster_name, self.volume_id, rlt.message)) return rlt
def run(self): # Log(4, "Factory start-----------:{}-----------, status:{}".format(self.thread_name, self.task_queue.empty())) while True: try: # 任务异步出队,Queue内部实现了同步机制 # Log(4, "name:{}, run queue size:{}, id(self):{}, id(thread):{}".format(self.thread_name, # self.task_queue.qsize(), id(self), id(self.thread_name))) if not self.task_queue.empty(): task = self.task_queue.get(timeout=2) task.run() while True: if task.is_finished(): # task中有一个status属性,用于标志任务是否执行完成 # 通知系统任务完成 self.task_queue.task_done() break time.sleep(2) time.sleep(10) # 防止cpu占用过高 Log( 4, "factory run one finished. name:{}, at:{}".format( self.thread_name, datetime.datetime.now())) except Queue.Empty: Log(3, "Factory.run empty...") except Exception as e: PrintStack() Log(3, "Factory run error:{}".format(e.message))
def set_net_ws(self, post_data, **kwargs): """ 设置子网,指派工作区 :param kwargs: :return:[] """ try: data_info = json.loads(post_data.replace("'", "\'")) Log(3, "set_net_ws:{}".format(kwargs.get('passport'))) data_info['creater'] = kwargs.get('passport', {}).get('username', '') if not all([ isinstance(data_info.get('workspace'), basestring), isinstance(data_info.get('cluster_name'), basestring), isinstance(data_info.get('fa_ip'), basestring), isinstance(data_info.get('key'), basestring), isinstance(data_info.get('subnet'), basestring) ]): return Result('', 400, 'param error', 400) rlt = NetworkMgr.instance().set_workspace(data_info) if not rlt.success: Log(3, 'set_net_ws error:{}'.format(rlt.message)) return Result('', rlt.result, rlt.message, 400) # data = {'num': len(rlt.content), 'data': rlt.content} return Result(rlt.content) except Exception as e: PrintStack() Log(1, "get_ippool error:{}".format(e.message)) return Result('', 500, e.message, 500)
def check_valid(self): """ # 检查数据 """ try: if not StorageNodeDB.instance().is_app_node_exist(self.cluster_name, self.ip): return Result('', INVALID_PARAM_ERR, 'mount host is invalid') if self.veclient is None: self.veclient = VespaceMgr.instance().get_cluster_client(self.cluster_name) if not self.veclient.test(): return Result('', INIT_VESPACE_CLIENT_FAILERR, 'init vespace client fail.') kube_client = KubeClientMgr.instance().get_cluster_client(self.cluster_name) if kube_client is None: Log(1, 'AddPVWork.check_valid get_cluster_client[%s]fail'%(self.cluster_name)) return Result('', INVALID_PARAM_ERR, 'cluster_name is invalid') else: self.kubeclient = kube_client if self.recovery_model not in ['Retain', 'Recycle', 'Delete']: self.recovery_model = 'Delete' except InternalException,e: Log(1,"AddPVWork.check_valid except[%s]"%(e.value)) return Result("AddPVWork",e.errid,e.value)
def connect(self): host_ip = self.host_ip rlt = Masterdb.instance().read_master(host_ip.replace('.', '-')) if not rlt.success: return rlt username = rlt.content.get('username', None) passwd = rlt.content.get('userpwd', None) pkey = rlt.content.get('pkey', None) port = int(rlt.content.get('port', 22)) sshclient = paramiko.SSHClient() sshclient.set_missing_host_key_policy(paramiko.AutoAddPolicy()) try: sshclient.connect(host_ip, port, username=username, password=passwd, pkey=pkey, timeout=4) except Exception as e: Log(3, 'remoteparam connect error:{}'.format(e.message)) return Result('', 400, 'connect host error:' + e.message) self.sshclient = sshclient t = paramiko.Transport((host_ip, port)) try: t.connect( username=username, password=passwd, pkey=pkey ) # 连接方式也可以用key,这里只需要将password=password改为pkey=key,其余的key代码与前面的一样 sftp = paramiko.SFTPClient.from_transport(t) # 使用t的设置方式连接远程主机 except Exception as e: Log(3, 'remoteparam connect error:{}'.format(e.message)) return Result('', 400, 'connect host error:' + e.message) self.sftpclient = sftp return Result('')
def get_node_pods(self, node_name): """ 获取某个node上的pods :param node_name: :return: """ url = 'proxy/nodes/{}/pods'.format(node_name) t1 = datetime.datetime.now() Log(4, "******** now:{}".format(datetime.datetime.now())) try: r = self.client.request(method='GET', url=url, timeout=self.timeout) except Exception as e: Log(2, 'get all pods list except{}'.format(e)) return Result('', msg='get_pod_list except{}'.format(e), result=400) Log(4, "get_all_pods cost:{}".format(datetime.datetime.now() - t1)) if r.status_code == 200: data = r.json() pod_list = data.get('items', []) if pod_list: return Result(pod_list) else: Log(1, "get node pod None:{}".format(pod_list)) return Result([]) else: Log(1, 'get all pods list fail,as[%s]' % r.text) return Result('', FAIL, r.text)
def get(self, container_id=None): try: if container_id: container_id = container_id.split( '//')[1] if '//' in container_id else container_id url = 'http://{}:4194'.format( self.host_ip) + self.uri + '/' + container_id else: url = 'http://{}:4194'.format(self.host_ip) + self.uri r = requests.get(url, timeout=self.timeout) except requests.exceptions.RequestException as e: Log(2, "can not get request from cadvisor:{}".format(e.message)) return Result('', 500, e.message, 400) except Exception as e: Log(1, "cadvisor Exception:{}".format(e.message)) return Result('', 500, e.message, 500) else: if r.status_code == 200: try: return Result(r.json()) except Exception as e: Log(3, "from cadvisor data.json() error:{}".format(e.message)) PrintStack() return Result('', 500, e.message, 500) else: return Result('', r.status_code, r.text, r.status_code)
def launch_task(self): Log(4, "CheckServiceTask.launch_task") try: ready = False for _ in range(120): rlt = self.workbench.schedule_status() if not rlt.success: Log(4, 'skip current action, as the schedule is failed') return rlt if self.workbench.is_service_ready(): Log(3, 'The vespace service is ready') if ready: return Result('ok') else: ready = True self.add_progress(1) time.sleep(30) return Result('', VESPACE_SERVICE_ABNORMAL_ERR, 'The vespace service abnormal.') except InternalException, ex: self.log( "CheckServiceTask test_strategy_service fail,as[%s]" % (ex.value), ex.errid) return Result( 'InternalException', ex.errid, "CheckServiceTask launch_task fail,as[%s]" % (ex.value))
def get_host_all_pods(self, cluster_name, host_name): """ # 获取主机上的pods """ client = self.get_cluster_client(cluster_name) if client is None: Log( 1, 'KubeClientMgr.get_host_pod_list[%s] fail,as[the cluster info invalid]' % (cluster_name)) return Result('', FAIL, 'get cluster info fail') rlt = WorkSpacedb.instance().get_ns_by_cluster(cluster_name) if not rlt.success: Log( 1, 'KubeClientMgr.get_host_pod_list get_ns_by_cluster[%s] fail,as[%s]' % (cluster_name, rlt.message)) return rlt namespace_list = rlt.content ns_name_list = [] for ns in namespace_list: ns_name_list.append(ns.get('name')) rlt = client.get_host_pods(ns_name_list, host_name) if not rlt.success: return Result('', 400, rlt.message, 400) host_pods = rlt.content return Result(host_pods)
def unmount_host(self): if self.volume_type == STORAGE_SHARE_TYPE_NFS: return Result('nfs type volume') rlt = MountDB.instance().read_mount_list(self.cluster_name, self.pv_name) if not rlt.success: Log(1, 'AddPVWork.unmount_host read_mount_list[%s][%s]fail,as[%s]'%(self.cluster_name, self.pv_name, rlt.message)) return Result('done') app_node_list = [] mount_id_list = [] for host in rlt.content: mount_id_list.append(host.get(ID)) node = {} node['ip'] = host.get('ip') node['port'] = host.get('port') app_node_list.append(node) if not app_node_list: return Result('not mount') data = {} data['name'] = self.pv_name data['cluster_id'] = self.get_cluster_id() rlt = self.veclient.unmount_volume(data, app_node_list) if not rlt.success: Log(1, 'AddPVWork.mount_host mount_volume[%s]fail,as[%s]'%(str(data), rlt.message)) return rlt rlt = MountDB.instance().delete_mount_records(self.cluster_name, mount_id_list) if not rlt.success: Log(1, 'AddPVWork.mount_host save_mount_info[%s]fail,as[%s]'%(str(data), rlt.message)) return rlt
def add_node(self, post_data, **kwargs): """ # 添加node # 判断创建主机是否创建成功的的方法:1.看http://192.168.14.166:31886/clusters/kubernetes-1/masters返回的数据的value中是否有 kubeletstatus和apiserverstatus这两个key,并且状态都是true :return: """ try: node_info = json.loads(post_data.replace("'", "\'")) passport = kwargs.get('passport', {}) # creater = kwargs.get('passport', {}).get('username') cluster_name = node_info.get("ClusterName", "") host_ip = node_info.get("HostIP", "") rlt = self.clumgr.add_node(node_info, passport) if not rlt.success: Log(1, "cluster add node error:{}".format(rlt.message)) return rlt rlt = StorageMgr.instance().add_storage_node(cluster_name, host_ip) if not rlt.success: Log( 1, 'Cluster.add_node add_storage_node[%s][%s] fail,as[%s]' % (cluster_name, host_ip, rlt.message)) return Result('ok') except Exception as e: PrintStack() Log(1, "add_node:{}".format(e)) return Result('', INVALID_JSON_DATA_ERR, str(e))
def syn_vip(self, clu_name): """ 获取cluster的vip :param clu_name: :return: """ rlt = LauncherClient.instance().get_cluster_info(clu_name) if not rlt.success: Log(1, "syn_vip get_cluster_info error:{}".format(rlt.message)) return json_data, s = self.parse_json(rlt.content.get('info')) if not s: Log(1, "syn_vip cluster_info can not parse to json:{}".format(rlt.content.get('info'))) return vip = json_data.get('vip') rlt = Clusterdb.instance().get_vip(clu_name) if rlt.success: vip0, s = self.parse_json(rlt.content) if not s: Log(1, "syn_vip the info can not parse to json:{}".format(rlt.content)) return else: if rlt.result == ETCD_KEY_NOT_FOUND_ERR: vip0 = '' else: Log(1, "syn_vip can get vip error:{}".format(rlt.message)) return if vip != vip0: Clusterdb.instance().save_vip(clu_name, {'vip': vip}) return
def delete_data_disk(self): rlt = DiskDB.instance().read_disk_list(self.ip) if not rlt.success: if rlt.result == ETCD_KEY_NOT_FOUND_ERR: return Result(0) Log(1, 'DeleteStorageNodeWork.delete_data_disk read_disk_list fail,as[%s]'%(rlt.message)) return rlt device_list = rlt.content if len(device_list) == 0: return Result('nothing to do') delete_disk_list = [dev['Path'] for dev in device_list] rlt = self.client.delete_data_disk(device_list[0]['cluster_id'], device_list[0]['domain_name'], device_list[0]['ip'], device_list[0]['store_api_port'], delete_disk_list) if rlt.success: LogDel(3, 'system', u'从存储集群[%s][%s]移除全部数据盘成功'%(self.cluster_name, self.ip)) else: LogDel(3, 'system', u'从存储集群[%s][%s]移除全部数据盘失败,因为【%s】'%(self.cluster_name, self.ip, rlt.message)) for disk in device_list: rlt = DiskDB.instance().delete_disk(self.ip, disk['disk_id']) if not rlt.success: Log(1, 'DeleteStorageNodeWork.delete_data_disk delete_disk in etcd fail,as[%s]'%(rlt.message)) return Result(len(delete_disk_list))
def log(self, act): if self.status_code == HTTP_OK_200: Log(4, '[%s] success, return [%s]' % (act, self.respond_body)) else: Log( 1, '[%s] fail, return [%s],massage[%s]' % (act, self.respond_body, self.message))
def create_mount_record(self, cluster_name, mount_info): rlt = self.get_identity_id() if not rlt.success: Log( 1, 'MountDB.create_mount_record.get_identity_id fail,as[%s]' % (rlt.message)) return Result(0, ETCD_CREATE_KEY_FAIL_ERR, 'get_identity_id fail.') _id = rlt.content data = {'create_time': NowMilli()} data['ip'] = mount_info.get('ip') data['port'] = mount_info.get('port', APPLICATION_HOST_PORT) data['name'] = mount_info.get('name') data['cluster_id'] = mount_info.get('cluster_id') data['share_type'] = mount_info.get('share_type') data['target_port'] = mount_info.get('target_port') rlt = self.set('%s/%s' % (cluster_name, _id), data) if not rlt.success: Log( 1, 'MountDB.create_mount_record save info fail,as[%s]' % (rlt.message)) return rlt return Result(_id)
def pod_set(self, resource_list): """ :param : :return: """ mem_total = 0 for con in resource_list: s_mem = con.get('resources', {}).get('limits', {}).get('memory', '') if 'Gi' in s_mem: mem_total += int(s_mem[:-2]) elif 'Mi' in s_mem: mem_total += int(s_mem[:-2]) / 1024 elif 'm' in s_mem: mem_total += int(s_mem[:-1]) / (1024 ** 3 * 1000) else: try: Log(1, "pod_set can not anly the mem data:{}".format(s_mem)) except ValueError: PrintStack() Log(4, "pod all limit mem:{}, resource_list:{}".format(mem_total, resource_list)) if not mem_total: Log(1, "get pod limit error.{}".format(mem_total)) return None return mem_total
def compare_metrics(self, deploy, m_type, cur_data, min_data, max_data): """ :param m_type: :param cur_data: :param min_data: :param max_data: """ cur_replicas = deploy.get('hpa', {}).get('replicas') min_replicas = deploy.get('hpa', {}).get('minReplicas') max_replicas = deploy.get('hpa', {}).get('maxReplicas') if max_replicas == 0: max_replicas = 10000000 Log(3, "elastic compare_metrics deploy:{}, type:{}, current data:{}, set_min:{}, set_max:{}, cur_replicas:{},\ min_replicas:{}, max_replicas:{}".format(deploy.get('name'), m_type, cur_data, min_data, max_data, cur_replicas, min_replicas, max_replicas)) if cur_data < min_data and cur_replicas - 1 >= min_replicas: # elastic -1 r = DeployClient.instance().service_up(deploy.get('name'), deploy.get('group', ''), deploy.get('workspace', ''), -1) if r: Log(3, u"deploy {} , 缩容 -1 成功".format(deploy.get('name'))) else: Log(1, u"deploy {}, 缩容 -1 失败".format(deploy.get('name'))) return if cur_data > max_data and cur_replicas + 1 <= max_replicas: # elastic +1 r = DeployClient.instance().service_up(deploy.get('name'), deploy.get('group', ''), deploy.get('workspace', ''), 1) if r: Log(3, u"deploy {}, 扩容 1 成功".format(deploy.get('name'))) else: Log(1, u"deploy {}, 扩容 1 失败".format(deploy.get('deploy', ''))) return return
def add_license(self): cluster_id = self.get_cluster_id() rlt = self.client.add_license(cluster_id, self.license_str) if not rlt.success: Log( 1, 'InitStorageWork.add_license [%s][%s]fail,as[%s]' % (self.cluster_name, self.ip0, rlt.message)) return rlt cluster_info = rlt.content arr = [self.ip0] if self.ip1: arr.append(self.ip1) if self.ip2: arr.append(self.ip2) cluster_info['ip'] = ','.join(arr) cluster_info['cluster_id'] = self.cluster_id rlt = StoregeClusterDB.instance().create_cluster( self.cluster_name, cluster_info) if not rlt.success: Log( 1, 'InitStorageWork.create_cluster[%s][%s]to etcd fail,as[%s]' % (self.cluster_name, self.ip0, rlt.message)) return rlt
def delete_volume(self, cluster_name, volume_id, mount_node_list): rlt = VolumeDB.instance().read_volume_info(cluster_name, volume_id) if not rlt.success: Log(1, 'DeleteStorageNodeWork.delete_volume read_volume_info[%s][%s]fail,as[%s]'%(cluster_name, volume_id, rlt.message)) return rlt volume = rlt.content app_node_list = [] mount_id_list = [] for node in rlt.mount_node_list: mount_id_list.append(node.get(ID)) app_node_list.append({'ip':node.get('ip'), 'port':node.get('port')}) if app_node_list: rlt = self.client.unmount_volume(volume, app_node_list) if not rlt.success: Log(1, 'DeleteStorageNodeWork.delete_volume unmount_volume[%s][%s]fail,as[%s]'%(cluster_name, volume.get('name'), rlt.message)) return rlt MountDB.instance().delete_mount_records(cluster_name, mount_id_list) cluster_id = self.get_cluster_id() rlt = self.client.delete_volume(cluster_id, volume.get('name')) if not rlt.success: Log(1, 'DeleteStorageNodeWork.delete_volume delete_volume[%s][%s]fail,as[%s]'%(cluster_name, volume.get('name'), rlt.message)) return rlt rlt = VolumeDB.instance().delete_volume(cluster_name, volume_id) if not rlt.success: Log(1, 'DeleteStorageNodeWork.delete_volume delete_volume[%s][%s] from etcd fail,as[%s]'%(cluster_name, volume_id, rlt.message)) return rlt
def create_cluster_full(self, cluster_name, cluster_info): # cluster_info["create_time"] = DateNowStr() # if auth_info: # rlt = self.set(cluster_name + '/auth_info', auth_info) # if not rlt.success: # Log(1, 'ClusterMgr.create_cluster save auth_info fail,as[%s]' % (rlt.message)) # return rlt rlt = self.set(cluster_name + '/cluster_info', cluster_info) if not rlt.success: Log( 1, 'ClusterMgr.create_cluster save cluster_info fail,as[%s]' % (rlt.message)) return rlt rlt = self.set(cluster_name + '/member', []) if not rlt.success: Log( 1, 'ClusterMgr.create_cluster save member fail,as[%s]' % (rlt.message)) return rlt rlt = self.set(cluster_name + '/apply_num', 0) if not rlt.success: Log( 1, 'ClusterMgr.create_cluster save apply_num fail,as[%s]' % (rlt.message)) return rlt return Result(cluster_name)
def umount_volume(self, cluster_name, volume_id, mount_node_list): node_list = [] mount_id = '' for node in mount_node_list: if node.get('ip') == self.ip: node_list.append({'ip':node.get('ip'),'port':node.get('port')}) mount_id = node.get(ID) if not node_list: Log(1, 'umount_volume[%s][%s]fail,as the volume not mount to this host[%s]'%(cluster_name, volume_id, self.ip)) return Result('skip') rlt = VolumeDB.instance().read_volume_info(cluster_name, volume_id) if not rlt.success: Log(1, 'DeleteStorageNodeWork.delete_volume read_volume_info[%s][%s]fail,as[%s]'%(cluster_name, volume_id, rlt.message)) return rlt volume = rlt.content rlt = self.client.unmount_volume(volume, node_list) if not rlt.success: Log(1, 'DeleteStorageNodeWork.delete_volume unmount_volume[%s][%s]fail,as[%s]'%(cluster_name, volume.get('name'), rlt.message)) return rlt rlt = MountDB.instance().delete_mount_record(cluster_name, mount_id) if not rlt.success: Log(1, 'DeleteStorageNodeWork.umount_volume delete_mount_record[%s][%s] from etcd fail,as[%s]'%(cluster_name, mount_id, rlt.message)) return rlt
def get_host_pod_num(self, ns_list, host_ip): t1 = datetime.datetime.now() pod_num = 0 for ns in ns_list: url = 'namespaces/%s/pods' % ns try: r = self.client.request(method='GET', url=url, timeout=self.timeout) except Exception as e: Log(1, 'get_pod_list except{}'.format(e)) return Result('', msg='get_pod_list except{}'.format(e), result=400) Log( 3, "get_host_pod_num host_ip:{}, time1:{}".format( host_ip, datetime.datetime.now() - t1)) if r.status_code == 200: data = r.json() pod_list = data.get('items', []) for pod in pod_list: if pod.get('status', {}).get('hostIP') == host_ip and pod.get( 'metadata', {}).get('namespace') in ns_list: info = self.parse_pod_info(pod) if info['status'] == 'running': pod_num += 1 else: Log(1, 'get_pod_list[%s]fail,as[%s]' % (ns_list, r.text)) return Result('', FAIL, r.text) return Result(pod_num)
def on_finish(self, task_rlt): ''' # 收尾工作,比如将事件置为结束,避免系统重新启动时继续监听任务。 ''' if task_rlt.success: Log(4, "Task Success.") else: Log(3, "Task Fail.")
def end_work(self, task_rlt): ''' 收尾工作,比如将保存的任务置为结束,避免系统重新启动时将任务重启。 ''' if task_rlt.success: Log(4, "Task Success.") else: Log(3, "Task Fail.")
def run(self): t1 = time.time() # 主机系统信息 self.check_info() # cpu mem disk net # cpu cpu = 0 alltime1, idletime1 = self.alltime_idletime() time.sleep(0.2) alltime2, idletime2 = self.alltime_idletime() total = alltime2 - alltime1 idle = idletime2 - idletime1 if total: cpu = round(100 * (total - idle) / total, 3) # mem mem = 0 memtotal, memfree = self.get_meminfo() if memtotal: mem = round(100 * (memtotal - memfree) / memtotal, 3) # disk disk = 0 avaldisk, totaldisk = self.get_disk_stat() if totaldisk: disk = round(100 * (totaldisk - avaldisk) / totaldisk, 3) # net ethernet_list = self.find_all_Ethernet_interface() Rx = 0 Tx = 0 for i in ethernet_list: d = self.get_network_data_o(i) Rx += d['RxBytes'] Tx += d['TxBytes'] data = { 'network': { 'rx': Rx, 'tx': Tx }, 'mem': mem, 'num': self.times, 'disk': disk, 'cpu': cpu, 'datetime': time.time() } rlt = UfleetHostdb.instance().update_info( self.hostip + '/' + str(self.times), data) if not rlt.success: Log(1, "ufleethost monitor update data error:{}".format(rlt.message)) self.status = 1 Log(3, "ufleet host monitor finished. cost:{}".format(time.time() - t1)) return
def end_work(self, task_rlt): self.task_result = task_rlt.to_json() if self.is_success(): Log(4,"work finished.") self.workbench.on_success() else: Log(1,"work failed.") self.error_code = task_rlt.result self.workbench.on_fail(task_rlt) self.update()
def start(self, s_time=10): try: while True: Log(3, "elastic #start start at:{}".format(datetime.datetime.now())) t1 = datetime.datetime.now() self.timeout() Log(3, 'elastic all cost:{}'.format(datetime.datetime.now() - t1)) time.sleep(s_time) except Exception as e: PrintStack() return
def delete_cluster(self): if not self.cluster_id: Log(4, 'delete_cluster skip, as the cluster id not exist.') return Result('done') rlt = self.client.delete_cluster(self.cluster_id) if not rlt.success: Log( 1, 'DeleteStorageWork.delete_cluster[%s]fail,as[%s]' % (self.cluster_name, rlt.message)) return rlt
def load_workbench(self, task_info): workbench_id = task_info.get("workbench_id",None) if workbench_id is None: Log(1,"Work.load_workbench fail,as[workbench_id not exist]") return None rlt = WorkDB.instance().read_work_info(workbench_id) if rlt.success and rlt.content: return self.new_workbench(rlt.content) else: Log(1,"Work.read_work_info[%s] fail,as[%s]"%(workbench_id, rlt.message)) return None
def update_to_db(self, task_data=None): if not self._id: Log(1, "TaskData.update_to_db fail,as[The id is invalid]") return if task_data is None: task_data = self.snapshot() rlt = WorkDB.instance().update_work_part_info(self._id, task_data) if not rlt.success: Log(1, "TaskData.update_work_part_info fail,as[%s]" % (rlt.message))