def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None): error_desc="" site = self.manager.getSite(site_id) group=self.manager.getGroup(auth,group_id) group_list = self.manager.getGroupList(auth, site_id) def_id_list = def_ids.split(",") for def_id in def_id_list: new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first() node = None try: associate=True self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate) #matching disks on association of storage. vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id) storage_disks = DBSession.query(StorageDisks).filter_by(storage_id=def_id) if storage_disks: for eachdisk in storage_disks: self.manager.matching_disk_on_discover_storage(vm_disks, eachdisk.id) except Exception, ex: error_desc = to_str(ex) print_traceback() LOGGER.error(to_str(ex).replace("'","")) #if we get any exception while adding/ sync definition then are removing the definition. add_mode=True try: self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'","")) raise Exception(to_str(ex1)) if error_desc: raise Exception(error_desc)
def resume_work(self, context): """ on resume setting value from task context """ execution_context = context["execution_context"] WRK_LOGGER.debug("RESUMING WORKER for :" + self.worker) if execution_context: self.start_time = execution_context.get("start_time", datetime.utcnow()) self.worker_ids = execution_context.get("worker_ids", []) self.sp_list = execution_context.get("sp_list", []) ##validate all the worker ids are taken care of ets = DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\ filter(not_(EntityTasks.worker_id.in_(self.worker_ids))).all() if len(ets) > 0: xtra_work_ids = [et.worker_id for et in ets] WRK_LOGGER.error("GOT ENT Tasks different from execution_context :"+self.worker+\ ": CONTEXT WORKERS : "+str(self.worker_ids) +": XTRA WORKERS :"+str(xtra_work_ids)) r = DBSession.query(EntityTasks.entity_id).\ filter(EntityTasks.worker_id.in_(xtra_work_ids)).\ filter(EntityTasks.worker==self.worker).\ update(values=dict(worker_id=None,finished=True,end_time=datetime.utcnow())) transaction.commit() WRK_LOGGER.debug("Cleaned Up entity_tasks . worker:rows : " + self.worker + ":" + str(r)) WRK_LOGGER.debug("RESUMING WORKER for :" + self.worker + ":" + str(self.start_time) + ":" + str(self.worker_ids)) self.do_work()
def get_storage_stats(self, vm_id=None): storage_stats = {} disk_stats = {} disk_detail = {} if not vm_id: vm_id = self.vm_id if vm_id: vm_disks = DBSession.query(VMDisks).filter_by(vm_id=vm_id) for vm_disk in vm_disks: disk_detail = {} disk_detail["DEV_TYPE"] = vm_disk.dev_type disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name) disk_detail["DISK_SIZE"] = vm_disk.disk_size disk_detail["DISK_NAME"] = vm_disk.disk_name storage_disk_id = None vm_storage_link = DBSession.query(VMStorageLinks).filter_by( vm_disk_id=vm_disk.id).first() if vm_storage_link: storage_disk_id = vm_storage_link.storage_disk_id disk_detail["STORAGE_DISK_ID"] = storage_disk_id disk_stats[vm_disk.disk_name] = disk_detail storage_stats["LOCAL_ALLOCATION"] = 0 storage_stats["SHARED_ALLOCATION"] = 0 storage_stats["DISK_STATS"] = disk_stats return storage_stats
def update_disks_size(self, auth): sites = DBSession.query(Site) if sites: for eachsite in sites: #site = DBSession.query(Sites).filter_by(id=eachsite.id).first() site_entity = auth.get_entity(eachsite.id) #get all groups in the site. group_entities = auth.get_entities( to_unicode(constants.SERVER_POOL), site_entity) #loop through each group in the site for eachgroup in group_entities: group = DBSession.query(ServerGroup).filter_by( id=eachgroup.entity_id).first() if group: group_entity = auth.get_entity(group.id) #get all nodes in the group node_entities = auth.get_entities( to_unicode(constants.MANAGED_NODE), group_entity) #loop through each node in the group for eachnode in node_entities: node = DBSession.query(ManagedNode).filter_by( id=eachnode.entity_id).first() server_def_link = DBSession.query( ServerDefLink).filter_by(server_id=node.id) if server_def_link: for each_link in server_def_link: defn = DBSession.query( StorageDef).filter_by( id=each_link.def_id).first() if defn: self.test_storage_def( auth, node, group, eachsite, defn)
def get_storage_stats(self, vm_id=None): storage_stats = {} disk_stats = {} disk_detail = {} if not vm_id: vm_id = self.vm_id if vm_id: vm_disks = DBSession.query(VMDisks).filter_by(vm_id=vm_id) for vm_disk in vm_disks: disk_detail = {} disk_detail["DEV_TYPE"] = vm_disk.dev_type disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name) disk_detail["DISK_SIZE"] = vm_disk.disk_size disk_detail["DISK_NAME"] = vm_disk.disk_name storage_disk_id = None vm_storage_link = DBSession.query(VMStorageLinks).filter_by(vm_disk_id=vm_disk.id).first() if vm_storage_link: storage_disk_id = vm_storage_link.storage_disk_id disk_detail["STORAGE_DISK_ID"] = storage_disk_id disk_stats[vm_disk.disk_name] = disk_detail storage_stats["LOCAL_ALLOCATION"] = 0 storage_stats["SHARED_ALLOCATION"] = 0 storage_stats["DISK_STATS"] = disk_stats return storage_stats
def resume_work(self,context): """ on resume setting value from task context """ execution_context=context["execution_context"] WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker ) if execution_context: self.start_time=execution_context.get("start_time",datetime.utcnow()) self.worker_ids=execution_context.get("worker_ids",[]) self.sp_list=execution_context.get("sp_list",[]) ##validate all the worker ids are taken care of ets = DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\ filter(not_(EntityTasks.worker_id.in_(self.worker_ids))).all() if len(ets) > 0: xtra_work_ids = [et.worker_id for et in ets] WRK_LOGGER.error("GOT ENT Tasks different from execution_context :"+self.worker+\ ": CONTEXT WORKERS : "+str(self.worker_ids) +": XTRA WORKERS :"+str(xtra_work_ids)) r = DBSession.query(EntityTasks.entity_id).\ filter(EntityTasks.worker_id.in_(xtra_work_ids)).\ filter(EntityTasks.worker==self.worker).\ update(values=dict(worker_id=None,finished=True,end_time=datetime.utcnow())) transaction.commit() WRK_LOGGER.debug("Cleaned Up entity_tasks . worker:rows : "+self.worker+":"+str(r)) WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker+":"+str(self.start_time)+":"+str(self.worker_ids) ) self.do_work()
def getDefnsFromGroupList(self, auth, site_id, group_list, defType, defs_array): if group_list: #getting definitions from each group for group in group_list: resultset = DBSession.query(SPDefLink).filter_by( group_id=group.id, def_type=defType) for row in resultset: defn = self.get_defn(row.def_id) if defn: #set the status here to return and display in grid with definition name. #node_id is None here defn.status = self.get_defn_status( defn, defType, site_id, group.id, None) defs_array.append(defn) #getting definitions from each server in the group for node in group.getNodeList(auth).itervalues(): resultset = DBSession.query(ServerDefLink).filter_by( server_id=node.id, def_type=defType) for row in resultset: defn = DBSession.query(NwDef).filter_by( id=row.def_id, scope=constants.SCOPE_S).first() if defn: #set the status here to return and display in grid with definition name. defn.status = row.status defs_array.append(defn) return defs_array
def get_vm_linked_with_storage(self, storage_disk_id): vm=None if storage_disk_id: vm_storage_link = DBSession.query(VMStorageLinks).filter_by(storage_disk_id=storage_disk_id).first() if vm_storage_link: vm_disk = DBSession.query(VMDisks).filter_by(id=vm_storage_link.vm_disk_id).first() if vm_disk: vm = DBSession.query(VM).filter_by(id=vm_disk.vm_id).first() return vm
def exec_task(self, auth, ctx,node_ids): try: LOGGER.debug('entered in exec task for NodesAvailability task') nodes=DBSession.query(ManagedNode).filter(ManagedNode.id.in_(node_ids)).all() node_names = "" port=0 for node in nodes: node_names += node.hostname + " " nport=node.get_connection_port() if port == 0: port=nport else: if port > 0 and nport != port: port=-1 strt = p_task_timing_start(AVL_LOGGER, "NodesAvailability", node_names.strip().split(' ')) strt1 = p_task_timing_start(AVL_LOGGER, "PreProcess", node_names.strip().split(' ')[0]) self.completed_nodes = [] self.pending_nodes = [node_id for node_id in node_ids] self.exc_node_ids = [node_id for node_id in node_ids] index = 0 node_id = self.get_next_node_id(index) use_nmap = eval(tg.config.get("use_nmap_for_heartbeat", "False")) if use_nmap == True and port > 0: strt2 = p_task_timing_start(AVL_LOGGER, "NodesNmap", node_names) (output, exit_code) = self.do_nmap_ping(node_names=node_names, port=port) p_task_timing_end(AVL_LOGGER, strt2) else: (output, exit_code) = ("", -1) p_task_timing_end(AVL_LOGGER, strt1) while node_id is not None: self.pending_nodes.remove(node_id) node = DBSession.query(ManagedNode).filter(ManagedNode.id == node_id).first() index+=1 node_id = self.get_next_node_id(index) strt1 = p_task_timing_start(AVL_LOGGER, "NodeRefreshAvail", node.hostname) if node: self.current_node = node self.start_time = datetime.utcnow() try: try: node.refresh_avail(auth, exit_code=exit_code, isUp="(" + node.hostname + ")" in output) except Exception, e: LOGGER.error("Error updating Node availability . Server :"+node.hostname) traceback.print_exc() finally: self.completed_nodes.append(node.id) p_task_timing_end(AVL_LOGGER, strt1) finally: self.check_if_hung() p_task_timing_end(AVL_LOGGER, strt)
def vm_action(self, dom_id, node_id, action,date=None,time=None): self.authenticate() try: wait_time=None dom=DBSession().query(VM).filter(VM.id==dom_id).one() self.tc.vm_action(session['auth'],dom_id,node_id,action,date,time) if action == constants.START: wait_time=dom.get_wait_time('view_console') except Exception, ex: print_traceback() return "{success: false,msg:'"+to_str(ex).replace("'","").replace("\n","")+"'}"
def add_node_defn(self, node_id, def_id, def_type, status, details): #Check whether the record is already present... row = DBSession.query(ServerDefLink).filter_by(server_id = node_id, def_id = def_id).first() if not row: node_defn = ServerDefLink() node_defn.server_id = to_unicode(node_id) node_defn.def_type = to_unicode(def_type) node_defn.def_id = def_id node_defn.status = to_unicode(status) node_defn.details = to_unicode(details) node_defn.dt_time = datetime.utcnow() DBSession.add(node_defn)
def add_site_defn(self, site_id, def_id, def_type, status, oos_count): #Check whether the record is already present... row = DBSession.query(DCDefLink).filter_by(site_id = site_id, def_id = def_id).first() if not row: DCDL = DCDefLink() DCDL.site_id = site_id DCDL.def_type = def_type DCDL.def_id = def_id DCDL.status = to_unicode(status) DCDL.oos_count = oos_count DCDL.dt_time = datetime.utcnow() DBSession.add(DCDL)
def get_defns(self, defType, site_id, group_id, node_id=None, op_level=None, auth=None, group_list=None): sync_manager = SyncDef() defs_array = [] if op_level == constants.SCOPE_DC: resultset = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_type=defType) for row in resultset: defn = DBSession.query(NwDef).filter_by( id=row.def_id).first() #is_deleted=False if defn: #set the status here to return and display in grid with definition name. defn.status = row.status defs_array.append(defn) #getting definitions from each group #getting definitions from each server in the group defs_array = self.getDefnsFromGroupList(auth, site_id, group_list, defType, defs_array) elif op_level == constants.SCOPE_SP: #getting definitions from group and each server in the group defs_array = self.getDefnsFromGroupList(auth, site_id, group_list, defType, defs_array) elif op_level == constants.SCOPE_S: resultset = DBSession.query(ServerDefLink).filter_by( server_id=node_id, def_type=defType) for row in resultset: defn = DBSession.query(NwDef).filter_by( id=row.def_id).first() #is_deleted=False if defn: #set the status here to return and display in grid with definition name. defn.status = row.status defs_array.append(defn) #Following condition is for NetworkService().get_available_nws() function. #when op_level is none then get all the networks created on the server (networks present in serverdeflinks table for that server) elif not op_level: resultset = DBSession.query(NwDef)\ .join((ServerDefLink, ServerDefLink.def_id == NwDef.id))\ .filter(ServerDefLink.server_id == node_id)\ .filter(ServerDefLink.def_type == defType) for defn in resultset: if defn: defs_array.append(defn) return defs_array
def getSiteDefListToAssociate(self, site_id, group_id, defType): sdArray=[] if site_id: dc_rs = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_type=defType) for row in dc_rs: sp_def = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id=row.def_id, def_type=defType).first() if not sp_def: defn = DBSession.query(NwDef).filter_by(id=row.def_id, scope=constants.SCOPE_DC).first() if defn: defn.status = row.status sdArray.append(defn) return sdArray
def get_vm_linked_with_storage(self, storage_disk_id): vm = None if storage_disk_id: vm_storage_link = DBSession.query(VMStorageLinks).filter_by( storage_disk_id=storage_disk_id).first() if vm_storage_link: vm_disk = DBSession.query(VMDisks).filter_by( id=vm_storage_link.vm_disk_id).first() if vm_disk: vm = DBSession.query(VM).filter_by( id=vm_disk.vm_id).first() return vm
def add_group_defn(self, group_id, def_id, def_type, status, oos_count): #Check whether the record is already present... row = DBSession.query(SPDefLink).filter_by(group_id = group_id, def_id = def_id).first() if not row: SPDL = SPDefLink() SPDL.group_id = group_id SPDL.def_type = def_type SPDL.def_id = def_id SPDL.status = status SPDL.oos_count = oos_count SPDL.dt_time = datetime.utcnow() DBSession.add(SPDL)
def get_lock(self,sub_system,entity_id,operation,table_name): # select query with lock lock_m=DBSession.query(CMS_Locks).with_lockmode("update").\ filter(CMS_Locks.sub_system==sub_system).\ filter(CMS_Locks.entity_id==entity_id).\ filter(CMS_Locks.operation==operation).\ filter(CMS_Locks.table_name==table_name).all() # lock_m=DBSession.query(CMS_Locks).with_lockmode("update").\ # filter(CMS_Locks.table_name==table_name).first() if len(lock_m) == 0: lm=CMS_Locks(sub_system,entity_id,operation,table_name) DBSession.add(lm)
def update_execution_context(self): """ storing context in task for resume process """ tid = TaskUtil.get_task_context() WRK_LOGGER.debug("in update_execution_context Parent task : "+str(tid)+" : child tasks :"+str(self.worker_ids)) task=Task.get_task(tid) if task is not None: self.execution_context["start_time"]=self.start_time self.execution_context["worker_ids"]=self.worker_ids task.context["execution_context"]=self.execution_context DBSession.add(task) WRK_LOGGER.debug("in update_execution_context updated Parent task : "+str(tid))
def get_lock(self, sub_system, entity_id, operation, table_name): # select query with lock lock_m=DBSession.query(CMS_Locks).with_lockmode("update").\ filter(CMS_Locks.sub_system==sub_system).\ filter(CMS_Locks.entity_id==entity_id).\ filter(CMS_Locks.operation==operation).\ filter(CMS_Locks.table_name==table_name).all() # lock_m=DBSession.query(CMS_Locks).with_lockmode("update").\ # filter(CMS_Locks.table_name==table_name).first() if len(lock_m) == 0: lm = CMS_Locks(sub_system, entity_id, operation, table_name) DBSession.add(lm)
def add_site_defn(self, site_id, def_id, def_type, status, oos_count): #Check whether the record is already present... row = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_id=def_id).first() if not row: DCDL = DCDefLink() DCDL.site_id = site_id DCDL.def_type = def_type DCDL.def_id = def_id DCDL.status = to_unicode(status) DCDL.oos_count = oos_count DCDL.dt_time = datetime.utcnow() DBSession.add(DCDL)
def add_group_defn(self, group_id, def_id, def_type, status, oos_count): #Check whether the record is already present... row = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id=def_id).first() if not row: SPDL = SPDefLink() SPDL.group_id = group_id SPDL.def_type = def_type SPDL.def_id = def_id SPDL.status = status SPDL.oos_count = oos_count SPDL.dt_time = datetime.utcnow() DBSession.add(SPDL)
def vm_action(self, dom_id, node_id, action, date=None, time=None): self.authenticate() try: wait_time = 0 dom = DBSession().query(VM).filter(VM.id == dom_id).one() self.tc.vm_action(session['auth'], dom_id, node_id, action, date, time) if action == constants.START: wait_time = dom.get_wait_time('view_console') except Exception, ex: print_traceback() return "{success: false,msg:'" + to_str(ex).replace( "'", "").replace("\n", "") + "'}"
def add_node_defn(self, node_id, def_id, def_type, status, details): #Check whether the record is already present... row = DBSession.query(ServerDefLink).filter_by(server_id=node_id, def_id=def_id).first() if not row: node_defn = ServerDefLink() node_defn.server_id = to_unicode(node_id) node_defn.def_type = to_unicode(def_type) node_defn.def_id = def_id node_defn.status = to_unicode(status) node_defn.details = to_unicode(details) node_defn.dt_time = datetime.utcnow() DBSession.add(node_defn)
def get_disk_stat(self, vm_id, filename): disk_detail = {} storage_disk = DBSession.query(StorageDisks).filter_by(unique_path=filename).first() if storage_disk: vm_disk = DBSession.query(VMDisks).filter_by(vm_id=vm_id, disk_name=filename).first() if vm_disk: disk_detail = {} disk_detail["DEV_TYPE"] = vm_disk.dev_type disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name) disk_detail["DISK_SIZE"] = vm_disk.disk_size disk_detail["DISK_NAME"] = vm_disk.disk_name disk_detail["STORAGE_DISK_ID"] = storage_disk.id return disk_detail
def send_deployment_stats(self): #task_service = self.svc_central.get_service(self.task_service_id) t = SendDeploymentStatsTask(u'Send Deployment Stats', {'quiet':True}, [],\ dict(), None, u'admin') dc_ent = DBSession.query(Entity).filter(Entity.type_id==1).first() t.set_entity_info(dc_ent) t.set_interval(TaskInterval(interval=None, next_execution=datetime.utcnow())) DBSession.add(t) import transaction transaction.commit() logger.debug("SendDeploymentStatsTask Submitted") return t.task_id
def on_remove_group(self, site_id, groupId, auth, def_manager): op = constants.DETACH defType = def_manager.getType() site = DBSession.query(Site).filter_by(id=site_id).first() group = DBSession.query(ServerGroup).filter_by(id = groupId).first() defn_list=[] #get all the definitions from the group #getting pool level definitions here sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) for each_defn in defn_list: group_defn = DBSession.query(SPDefLink).filter_by(def_id = each_defn.id, def_type = defType).first() if group_defn: DBSession.delete(group_defn) #delete only those definitions which are having scope server pool. #data center level definitions can not be deleted since we are removing server pool only. if each_defn.scope == constants.SCOPE_SP: DBSession.delete(each_defn)
def check_if_hung(self): WRK_LOGGER.debug("Check if Task, "+self.name+" is hung? ") marked_hung = False try: marked_hung = self.mark_hung if marked_hung : WRK_LOGGER.debug("Task, "+self.name+"("+str(self.task_id)+") was marked hung. updating entity_tasks") DBSession.query(EntityTasks).\ filter(EntityTasks.worker_id==to_unicode(self.task_id)).\ update(dict(worker_id=None,finished=True, end_time=datetime.utcnow())) # transaction.commit() except AttributeError, e: pass
def on_remove_group(self, site_id, groupId, auth, def_manager): op = constants.DETACH defType = def_manager.getType() site = DBSession.query(Site).filter_by(id=site_id).first() group = DBSession.query(ServerGroup).filter_by(id=groupId).first() defn_list = [] #get all the definitions from the group #getting pool level definitions here sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) for each_defn in defn_list: group_defn = DBSession.query(SPDefLink).filter_by( def_id=each_defn.id, def_type=defType).first() if group_defn: DBSession.delete(group_defn) #delete only those definitions which are having scope server pool. #data center level definitions can not be deleted since we are removing server pool only. if each_defn.scope == constants.SCOPE_SP: DBSession.delete(each_defn)
def send_deployment_stats(self): #task_service = self.svc_central.get_service(self.task_service_id) t = SendDeploymentStatsTask(u'Send Deployment Stats', {'quiet':True}, [],\ dict(), None, u'admin') dc_ent = DBSession.query(Entity).filter(Entity.type_id == 1).first() t.set_entity_info(dc_ent) t.set_interval( TaskInterval(interval=None, next_execution=datetime.utcnow())) DBSession.add(t) import transaction transaction.commit() logger.debug("SendDeploymentStatsTask Submitted") return t.task_id
def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None): error_desc = "" site = self.manager.getSite(site_id) group = self.manager.getGroup(auth, group_id) group_list = self.manager.getGroupList(auth, site_id) def_id_list = def_ids.split(",") for def_id in def_id_list: new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first() node = None try: associate = True self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate) #matching disks on association of storage. vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id) storage_disks = DBSession.query(StorageDisks).filter_by( storage_id=def_id) if storage_disks: for eachdisk in storage_disks: self.manager.matching_disk_on_discover_storage( vm_disks, eachdisk.id) except Exception, ex: error_desc = to_str(ex) print_traceback() LOGGER.error(to_str(ex).replace("'", "")) #if we get any exception while adding/ sync definition then are removing the definition. add_mode = True try: self.sync_manager.remove_defn( new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level) except Exception, ex1: print_traceback() LOGGER.error(to_str(ex1).replace("'", "")) raise Exception(to_str(ex1)) if error_desc: raise Exception(error_desc)
def get_defn_status(self, defn, defType, site_id, group_id, node_id): status=None if defn.scope == constants.SCOPE_DC: dc_defn = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_id = defn.id, def_type = defType).first() if dc_defn: status = dc_defn.status elif defn.scope == constants.SCOPE_SP: sp_defn = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id = defn.id, def_type = defType).first() if sp_defn: status = sp_defn.status elif defn.scope == constants.SCOPE_S: s_defn = DBSession.query(ServerDefLink).filter_by(server_id=node_id, def_id = defn.id, def_type = defType).first() if s_defn: status = s_defn.status return status
def get_storage_id(self, filename): storage_id = None storage_disk = DBSession.query(StorageDisks).filter_by( unique_path=filename).first() if storage_disk: storage_id = storage_disk.storage_id return storage_id
def get_entities(self, enttype_id): result = [] entities = DBSession.query(Entity).filter(Entity.type_id == enttype_id) for ent in entities: result.append(dict(entid=ent.entity_id, entname=ent.name)) return result
def user_login(self,args): try: username = args.get('login') password = args.get('password') user = DBSession.query(User).filter(User.user_name==username).first() if user: if user.status != True: msg="User: "******" is not Active." LOGGER.info(msg) return dict(success=False,user=None,msg=msg) sqa_sts = user.validate_password(password) if not sqa_sts: msg="Invalid password provided for CMS authentication." LOGGER.info(msg) return dict(success=False,user=None,msg=msg) if not len(user.groups): msg="User should belongs to a group" LOGGER.info(msg) return dict(success=False,user=None,msg=msg) else: msg="Invalid username provided for CMS authentication." LOGGER.info(msg) return dict(success=False,user=None,msg=msg) return dict(success=True,user=username) except Exception, e: print "Exception", e LOGGER.error(e) return dict(success=False,user=None,msg=str(e))
def resume_task(self, auth, ctx, appliance_entry, image_store, group_id, \ image_name, platform, force): ###TODO:disk cleanup img = DBSession.query(Image).filter(Image.name==image_name).first() if img is None: raise Exception(constants.INCOMPLETE_TASK)
def exec_task(self, auth, ctx,node_ids): LOGGER.debug('entered in exec task for VMAvailability task') strt = p_task_timing_start(AVL_LOGGER, "VMAvailability", node_ids) try: self.completed_nodes = [] self.pending_nodes = [node_id for node_id in node_ids] self.exc_node_ids = [node_id for node_id in node_ids] index = 0 node_id = self.get_next_node_id(index) while node_id is not None: self.pending_nodes.remove(node_id) node = DBSession.query(ManagedNode).filter(ManagedNode.id == node_id).first() index+=1 node_id = self.get_next_node_id(index) if node and node.is_up(): self.current_node = node self.start_time = datetime.utcnow() try: try: strt1 = p_task_timing_start(AVL_LOGGER, "RefreshVMAvail", node.id) node.refresh_vm_avail() p_task_timing_end(AVL_LOGGER, strt1) except Exception, e: LOGGER.error("Error updating VM availability . Server :"+node.hostname) traceback.print_exc() finally: self.completed_nodes.append(node.id) finally: self.check_if_hung() p_task_timing_end(AVL_LOGGER, strt)
def on_add_node(self, nodeId, groupId, site_id, auth, def_manager): op = constants.ATTACH #If one of them is not present then return from here. if not (nodeId or groupId): return defn_list = [] errs = [] sync_manager = SyncDef() defType = def_manager.getType() #Link all the definitions in the server pool to this new server node. sp_defns = DBSession.query(SPDefLink).filter_by( group_id=to_unicode(groupId)) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) #Add these default value to this link definition. These values would get changed after sync operation. status = to_unicode(constants.OUT_OF_SYNC) details = None sync_manager.add_node_defn(nodeId, defn.id, defType, status, details)
def get_entities(self,enttype_id): result= [] entities=DBSession.query(Entity).filter(Entity.type_id==enttype_id) for ent in entities: result.append(dict(entid=ent.entity_id, entname=ent.name)) return result
def get_imagegrp_summary_info(self,grp_id): result= [] grp=DBSession.query(Entity).filter(Entity.entity_id==grp_id).one() count = len(grp.children) result.append(dict(name='Group Name', value=grp.name)) result.append(dict(name='Total Templates', value=count)) return result
def send_email_to_user(self, msg): # Query sender and password from email credential table # Query mail_server,port,use_secure from the email setup table for curenly logged in user # receiver: to be queried from users table self.msg = msg curr_user_id = session.get('userid') #query users table to retrieve email address of currenlt logged in user userRecord = DBSession.query(User.email_address).filter(User.user_name == curr_user_id).first() if userRecord: self.receivers = userRecord.email_address emailservers = self.get_mailservers() for eachmailserver in emailservers: if eachmailserver: self.mail_server = eachmailserver['MailSetup'].mail_server self.port = int(eachmailserver['MailSetup'].port) self.secure_type = int(eachmailserver['MailSetup'].use_secure) self.cred_details = eachmailserver['Creds'].cred_details self.password = self.cred_details['password'] self.sender = self.cred_details['user_email'] result = False if (self.secure_type== NONSECURE): result = EmailManager().send_nonsecure(self.mail_server,self.port,self.sender,self.receivers,msg) elif (self.secure_type== TLS): result = EmailManager().send_tls(self.mail_server,self.port,self.sender,self.password,self.receivers,msg) else: result = EmailManager().send_ssl(self.mail_server,self.port,self.sender,self.password,self.receivers,msg) if (result == True): return "Test mail sent from " + eachmailserver['MailSetup'].mail_server
def delete_group(self,groupid): groupid=int(groupid) group=DBSession.query(Group).filter(Group.group_id==groupid).first() if group is not None: if group.group_name in constants.DEFAULT_GROUPS: raise Exception("Can not delete "+group.group_name+" group.") DBHelper().delete_all(Group,[],[Group.group_id==groupid])
def metric_cache(self, node_id, metric, metric_type, rollup_type, per_type, date1, date2, period): """ Setting value for cache by checking the conditions """ now = datetime.utcnow() status = False ent = DBSession.query(Entity).filter(Entity.entity_id == node_id).one() cache_key = (node_id, ent.type.name, metric, period) # checking cache's key is already exisiting if self.cache.has_key(cache_key): # print "FOUNDDDDDDDDDDDDDDDDDD==",(node_id[0],ent.type.name,period,metric) cached_time = self.cache[cache_key].get("cached_time") if now > cached_time: status = True else: self.check_cache_limit(self.cache) status = True if status: # quering the result and set it to cache result = self.chart_service.get_metrics_specific_value( [node_id], metric, metric_type, rollup_type, per_type, date1, date2 ) cache_time = now + timedelta(minutes=int(tg.config.get(constants.CACHE_TIME))) self.cache[cache_key] = {"cached_time": cache_time, "value": result} self.cache[cache_key]["last_accessed"] = now return self.cache[cache_key].get("value")
def send_test_email(self, desc, servername, port, useremail, password, secure): self.sender = useremail Record = DBSession.query(User.email_address).filter(User.user_name =='admin').first() self.receivers =Record.email_address self.mail_server = servername if port: self.port = int(port) self.secure_type = int(secure) self.password = password self.subject = "Test Email" self.content="\Test message Sent on " + to_str(ct_time()) self.msg = MIMEText(self.content, self.text_subtype) self.msg['Subject']= "WishCloud Test Email" # SendSuccess = False try: if (self.secure_type== NONSECURE): EmailManager().send_nonsecure(servername,self.port,useremail,Record.email_address,self.msg.as_string()) elif (self.secure_type== TLS): EmailManager().send_tls(servername,self.port,useremail,password,Record.email_address,self.msg.as_string()) else: EmailManager().send_ssl(servername,self.port,useremail,password,Record.email_address,self.msg.as_string()) except Exception, ex: # traceback.print_exc() LOGGER.error("Error sending mails:"+to_str(ex).replace("'","")) raise ex
def update_execution_context(self): """ storing context in task for resume process """ tid = TaskUtil.get_task_context() WRK_LOGGER.debug("in update_execution_context Parent task : " + str(tid) + " : child tasks :" + str(self.worker_ids)) task = Task.get_task(tid) if task is not None: self.execution_context["start_time"] = self.start_time self.execution_context["worker_ids"] = self.worker_ids task.context["execution_context"] = self.execution_context DBSession.add(task) WRK_LOGGER.debug( "in update_execution_context updated Parent task : " + str(tid))
def get_server_def_list(self,site_id, group_id, def_id): try: server_def_list=[] node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.STORAGE)) if node_defns: for eachdefn in node_defns: temp_dic={} if eachdefn: node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first() temp_dic['id']=eachdefn.server_id if node: temp_dic['name']=node.hostname else: temp_dic['name']=None temp_dic['status']=eachdefn.status if eachdefn.details: temp_dic['details']=eachdefn.details else: temp_dic['details']=None server_def_list.append(temp_dic) except Exception, ex: LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def delete_user(self, userid): userid = int(userid) user = DBSession.query(User).filter(User.user_id == userid).first() if user is not None: if user.user_name in constants.DEFAULT_USERS: raise Exception("Can not delete " + user.user_name + " user.") DBHelper().delete_all(User, [], [User.user_id == userid])
def get_server_def_list(self,site_id, group_id, def_id): try: server_def_list=[] node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.NETWORK)) if node_defns: for eachdefn in node_defns: temp_dic={} if eachdefn: node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first() temp_dic['id']=eachdefn.server_id if node: temp_dic['name']=node.hostname else: temp_dic['name']=None temp_dic['status']=eachdefn.status if eachdefn.details: temp_dic['details']=eachdefn.details else: temp_dic['details']=None server_def_list.append(temp_dic) except Exception, ex: print_traceback() LOGGER.error(to_str(ex).replace("'","")) return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
def metric_cache(self, node_id, metric, metric_type, rollup_type, per_type, date1, date2, period): """ Setting value for cache by checking the conditions """ now = datetime.utcnow() status = False ent = DBSession.query(Entity).filter(Entity.entity_id == node_id).one() cache_key = (node_id, ent.type.name, metric, period) #checking cache's key is already exisiting if self.cache.has_key(cache_key): # print "FOUNDDDDDDDDDDDDDDDDDD==",(node_id[0],ent.type.name,period,metric) cached_time = self.cache[cache_key].get("cached_time") if (now > cached_time): status = True else: self.check_cache_limit(self.cache) status = True if status: #quering the result and set it to cache result = self.chart_service.get_metrics_specific_value( [node_id], metric, metric_type, rollup_type, per_type, date1, date2) cache_time = now + timedelta( minutes=int(tg.config.get(constants.CACHE_TIME))) self.cache[cache_key] = { "cached_time": cache_time, "value": result } self.cache[cache_key]["last_accessed"] = now return self.cache[cache_key].get("value")
def set_entity_details(self, ent_id): ent = DBSession.query(Entity).filter( Entity.entity_id == ent_id).first() if ent is not None: self.entity_id = ent.entity_id self.entity_type = ent.type_id self.entity_name = ent.name
def __init__(self, config): self.config = config self.storage_stats = {} self.vm_id = None if self.config: vm = DBSession.query(VM).filter_by(id=to_unicode(config.id)).first() if vm: self.vm_id = vm.id self.storage_stats = self.get_storage_stats(vm.id) self.disk_stats = {} if self.storage_stats is not None: ds = self.storage_stats.get(self.DISK_STATS) if ds is None: self.storage_stats[self.DISK_STATS] = self.disk_stats # initial value of {} else: self.disk_stats = ds self.local_allocation = self.storage_stats.get(self.LOCAL_ALLOC) if not self.local_allocation: self.local_allocation = 0 self.shared_allocation = self.storage_stats.get(self.SHARED_ALLOC) if not self.shared_allocation: self.shared_allocation = 0 self.storage_disk_id = None
def save_user_det(self, login,userid, username, fname, lname, displayname, password, email, phone, status): user1=DBSession.query(User).filter(User.user_name==username).first() if user1 is None: if not self.check_email(email): return 'Email_exist' result = [] user=User() user.password=(password) user.firstname=(fname) user.lastname=(lname) user.display_name=(displayname) user.user_name=(username) user.phone_number=(phone) user.email_address=(email) user.created_by=(login) user.modified_by=(login) user.created_date=datetime.now() if status=="InActive": user.status=False # L=(groupids).split(',') # if groupids !="": # for i in L: # group=DBSession.query(Group).filter(Group.group_id==int(i)).first() # user.groups.append(group); DBHelper().add(user) return result else: result='False' return result
def send_email_to_user(self, msg): # Query sender and password from email credential table # Query mail_server,port,use_secure from the email setup table for curenly logged in user # receiver: to be queried from users table self.msg = msg curr_user_id = session.get("userid") # query users table to retrieve email address of currenlt logged in user userRecord = DBSession.query(User.email_address).filter(User.user_name == curr_user_id).first() if userRecord: self.receivers = userRecord.email_address emailservers = self.get_mailservers() for eachmailserver in emailservers: if eachmailserver: self.mail_server = eachmailserver["MailSetup"].mail_server self.port = int(eachmailserver["MailSetup"].port) self.secure_type = int(eachmailserver["MailSetup"].use_secure) self.cred_details = eachmailserver["Creds"].cred_details self.password = self.cred_details["password"] self.sender = self.cred_details["user_email"] result = False if self.secure_type == NONSECURE: result = EmailManager().send_nonsecure( self.mail_server, self.port, self.sender, self.receivers, msg ) elif self.secure_type == TLS: result = EmailManager().send_tls( self.mail_server, self.port, self.sender, self.password, self.receivers, msg ) else: result = EmailManager().send_ssl( self.mail_server, self.port, self.sender, self.password, self.receivers, msg ) if result == True: return "Test mail sent from " + eachmailserver["MailSetup"].mail_server
def delete_user(self,userid): userid=int(userid) user=DBSession.query(User).filter(User.user_id==userid).first() if user is not None: if user.user_name in constants.DEFAULT_USERS: raise Exception("Can not delete "+user.user_name+" user.") DBHelper().delete_all(User,[],[User.user_id==userid])
def get_template_info(self): """ returns template name, template's current version and vm & template version match for imported vms template name alone is available from config file """ template_info = {} template_info["template_name"] = self._config['image_name'] template_info["template_version"] = '0.0' template_info["version_comment"] = '' try: if self.image_id is not None: from convirt.model.ImageStore import Image img = DBSession.query(Image).filter( Image.id == self.image_id).one() template_info["template_name"] = img.name template_info["template_version"] = to_str( self.template_version) template_info["version_comment"] = '' if self.template_version != img.version: template_info["version_comment"]="*Current version of the Template is "+\ to_str(img.version) except Exception, e: LOGGER.error(e) pass
def check_user_updates(self,username): update_items = [] dep=None try: from convirt.model import Deployment deps=DBSession.query(Deployment).all() if len(deps) > 0: dep=deps[0] user_config_filename=os.path.abspath(tg.config.get("user_config")) if not os.path.exists(user_config_filename): user_config_file=open(user_config_filename,"w") user_config_file.close() user_config=PyConfig(filename=user_config_filename) date=user_config.get(username) if date !=None: p_r_date = time.strptime(date, "%Y-%m-%d %H:%M:%S") r_date =datetime(*p_r_date[0:5]) else: r_date=datetime.utcnow() edition = get_product_edition() (update_items,max_dt) = self.get_new_updates(dep.deployment_id,r_date, edition) user_config[username]=max_dt user_config.write() else: LOGGER.error("Deployment table is not set.Update can not proceed.") return except Exception, ex: traceback.print_exc() LOGGER.error("Error fetching updates:"+to_str(ex)) return
def send_test_email(self, desc, servername, port, useremail, password, secure): self.sender = useremail Record = DBSession.query(User.email_address).filter(User.user_name == "admin").first() self.receivers = Record.email_address self.mail_server = servername if port: self.port = int(port) self.secure_type = int(secure) self.password = password self.subject = "Test Email" self.content = "\Test message Sent on " + to_str(ct_time()) self.msg = MIMEText(self.content, self.text_subtype) self.msg["Subject"] = "ConVirt Test Email" # SendSuccess = False try: if self.secure_type == NONSECURE: EmailManager().send_nonsecure( servername, self.port, useremail, Record.email_address, self.msg.as_string() ) elif self.secure_type == TLS: EmailManager().send_tls( servername, self.port, useremail, password, Record.email_address, self.msg.as_string() ) else: EmailManager().send_ssl( servername, self.port, useremail, password, Record.email_address, self.msg.as_string() ) except Exception, ex: # traceback.print_exc() LOGGER.error("Error sending mails:" + to_str(ex).replace("'", "")) raise ex