def on_remove_group(self, site_id, groupId, auth, def_manager): op = constants.DETACH defType = def_manager.getType() site = DBSession.query(Site).filter_by(id=site_id).first() group = DBSession.query(ServerGroup).filter_by(id = groupId).first() defn_list=[] #get all the definitions from the group #getting pool level definitions here sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) for each_defn in defn_list: group_defn = DBSession.query(SPDefLink).filter_by(def_id = each_defn.id, def_type = defType).first() if group_defn: DBSession.delete(group_defn) #delete only those definitions which are having scope server pool. #data center level definitions can not be deleted since we are removing server pool only. if each_defn.scope == constants.SCOPE_SP: DBSession.delete(each_defn)
def on_remove_group(self, site_id, groupId, auth, def_manager): op = constants.DETACH defType = def_manager.getType() site = DBSession.query(Site).filter_by(id=site_id).first() group = DBSession.query(ServerGroup).filter_by(id=groupId).first() defn_list = [] #get all the definitions from the group #getting pool level definitions here sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) for each_defn in defn_list: group_defn = DBSession.query(SPDefLink).filter_by( def_id=each_defn.id, def_type=defType).first() if group_defn: DBSession.delete(group_defn) #delete only those definitions which are having scope server pool. #data center level definitions can not be deleted since we are removing server pool only. if each_defn.scope == constants.SCOPE_SP: DBSession.delete(each_defn)
def disassociate_defn(self, site, group, auth, defn, defType, add_mode, grid_manager): LOGGER.info("Disassociating definition...") #Go through loop here for each server in the server pool. #delete all the definition links for each server from SPDefLink table for node in group.getNodeList(auth).itervalues(): if node: node_defn = DBSession.query(ServerDefLink).filter_by( server_id=node.id, def_id=defn.id, def_type=defType).first() if node_defn: DBSession.delete(node_defn) #delete definition link from SPDefLink table. There would be only one record of the definition since it is server pool level record. group_defn = DBSession.query(SPDefLink).filter_by( group_id=group.id, def_id=defn.id, def_type=defType).first() if group_defn: DBSession.delete(group_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. vm_id_list = [] for node in group.getNodeList(auth).itervalues(): if node: for vm in grid_manager.get_node_doms(auth, node.id): if vm: vm_id_list.append(vm.id) grid_manager.remove_vm_links_to_storage(defn.id, vm_id_list) transaction.commit()
def on_remove_node(self, nodeId, groupId, site_id, auth, def_manager, isTransfer=False): op = constants.DETACH #If one of them is not present then return from here. if not groupId: return defType = def_manager.getType() node = DBSession.query(ManagedNode).filter_by(id=nodeId).first() if node: #Get all the definitions linked with this server defn_list = [] node_defns = DBSession.query(ServerDefLink).filter_by( server_id=nodeId, def_type=defType) if node_defns: for eachdefn in node_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) #delete all definition links with this server from serverdeflinks table if node_defns: for eachdefn in node_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: #While transferring the server do not delete server definition link. #while deleting node, delete all links with this server. if defn.scope != constants.SCOPE_S: #and isTransfer==False: #Log the error if the definition status is out of sync. But go ahead with deleting the definition link with the server. if eachdefn.status == constants.OUT_OF_SYNC: LOGGER.error( "WARNING: The definition status is OUT_OF_SYNC. Still the definition linking with the server is getting deleted. server_id=" + node.id + ", def_id=" + eachdefn.def_id + ", def_type=" + eachdefn.def_type + ", details=" + to_str(eachdefn.details)) DBSession.delete(eachdefn) #While transferring the server, do not delete definition. #While deleting node, delete only server level definition. if defn.scope == constants.SCOPE_S and isTransfer == False: DBSession.delete(defn)
def on_remove_node(self, nodeId, groupId, site_id, auth, def_manager, isTransfer=False): op = constants.DETACH #If one of them is not present then return from here. if not groupId: return defType = def_manager.getType() node = DBSession.query(ManagedNode).filter_by(id = nodeId).first() if node: #Get all the definitions linked with this server defn_list=[] node_defns = DBSession.query(ServerDefLink).filter_by(server_id=nodeId, def_type=defType) if node_defns: for eachdefn in node_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) #delete all definition links with this server from serverdeflinks table if node_defns: for eachdefn in node_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: #While transferring the server do not delete server definition link. #while deleting node, delete all links with this server. if defn.scope != constants.SCOPE_S: #and isTransfer==False: #Log the error if the definition status is out of sync. But go ahead with deleting the definition link with the server. if eachdefn.status == constants.OUT_OF_SYNC: LOGGER.error("WARNING: The definition status is OUT_OF_SYNC. Still the definition linking with the server is getting deleted. server_id=" + node.id + ", def_id=" + eachdefn.def_id + ", def_type=" + eachdefn.def_type + ", details=" + to_str(eachdefn.details)) DBSession.delete(eachdefn) #While transferring the server, do not delete definition. #While deleting node, delete only server level definition. if defn.scope == constants.SCOPE_S and isTransfer==False: DBSession.delete(defn)
def disassociate_defn(self, site, group, auth, defn, defType, add_mode, grid_manager): LOGGER.info("Disassociating definition...") #Go through loop here for each server in the server pool. #delete all the definition links for each server from SPDefLink table for node in group.getNodeList(auth).itervalues(): if node: node_defn = DBSession.query(ServerDefLink).filter_by(server_id = node.id, def_id = defn.id, def_type = defType).first() if node_defn: DBSession.delete(node_defn) #delete definition link from SPDefLink table. There would be only one record of the definition since it is server pool level record. group_defn = DBSession.query(SPDefLink).filter_by(group_id=group.id, def_id = defn.id, def_type = defType).first() if group_defn: DBSession.delete(group_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. vm_id_list=[] for node in group.getNodeList(auth).itervalues(): if node: for vm in grid_manager.get_node_doms(auth, node.id): if vm: vm_id_list.append(vm.id) grid_manager.remove_vm_links_to_storage(defn.id, vm_id_list) transaction.commit()
def delete(self,obj): DBSession.delete(obj)
def delete_defn(self, defn, site, group, node, auth, defType, def_manager, grid_manager, add_mode=False, group_list=None): LOGGER.info("Deleting definition...") scope = defn.scope if defn.is_deleted == True: if scope == constants.SCOPE_S: #server level allows_delete = False node_defn = DBSession.query(ServerDefLink).filter_by( server_id=node.id, def_id=defn.id, def_type=defType).first() if node_defn: if add_mode == True: allows_delete = True else: if node_defn.status == constants.IN_SYNC: allows_delete = True if node_defn.status == constants.OUT_OF_SYNC and add_mode == False: LOGGER.info("Definition " + defn.name + " is OUT_OF_SYNC on the server " + node.hostname) if allows_delete == True: LOGGER.info("Allowing to delete definition...") #delete the defn link with the server from serverdeflinks table DBSession.delete(node_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. grid_manager.remove_vm_links_to_storage(defn.id) #delete entry from storage disks table def_manager.remove_storage_disk(defn.id) #delete the definition from network_definitions table DBSession.delete(defn) elif scope == constants.SCOPE_SP: #server pool level rowGroupDef = DBSession.query(SPDefLink).filter_by( group_id=group.id, def_id=defn.id, def_type=defType).first() if rowGroupDef: #delete definition link from SPDefLink table. There would be only one record of the definition since it is server pool level record. group_defn = DBSession.query(SPDefLink).filter_by( group_id=group.id, def_id=defn.id, def_type=defType).first() #call delete here if group_defn: DBSession.delete(group_defn) #Go through loop here for each server in the server pool. #delete all the definition links for each server from SPDefLink table for node in group.getNodeList(auth).itervalues(): node_defn = DBSession.query(ServerDefLink).filter_by( server_id=node.id, def_id=defn.id, def_type=defType).first() # call delete if node_defn: DBSession.delete(node_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. grid_manager.remove_vm_links_to_storage(defn.id) #This function is deleting vm storage links as well as vm_disks related to the storage defn. def_manager.remove_storage_disk(defn.id) #delete the definition from storag/ network definitions table DBSession.delete(defn) elif scope == constants.SCOPE_DC: #data center level #rowGroupDef = DBSession.query(DCDefLink).filter_by(site_id=site.id, def_id = defn.id, def_type = defType).first() #if rowGroupDef: for group in group_list: #delete definition link from DCDefLink table. There would be only one record of the definition since it is data center level record. site_defn = DBSession.query(DCDefLink).filter_by( site_id=site.id, def_id=defn.id, def_type=defType).first() if site_defn: DBSession.delete(site_defn) #delete definition link from SPDefLink table. There would be only one record of the definition since it is server pool level record. group_defn = DBSession.query(SPDefLink).filter_by( group_id=group.id, def_id=defn.id, def_type=defType).first() if group_defn: DBSession.delete(group_defn) #Go through loop here for each server in the server pool. #delete all the definition links for each server from SPDefLink table for node in group.getNodeList(auth).itervalues(): node_defn = DBSession.query(ServerDefLink).filter_by( server_id=node.id, def_id=defn.id, def_type=defType).first() if node_defn: DBSession.delete(node_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. grid_manager.remove_vm_links_to_storage(defn.id) #delete entry from storage disks table def_manager.remove_storage_disk(defn.id) #delete records from storage_stats table DBSession.query(Storage_Stats).filter_by( storage_id=defn.id).delete() #delete the definition from storage/ network definitions table when there should not be any link in deflinks and serverdeflinks table so we are checking as follow since this is data center level definition. group_defn = DBSession.query(DCDefLink).filter_by( site_id=site.id, def_id=defn.id, def_type=defType).first() if not group_defn: node_defn = DBSession.query(ServerDefLink).filter_by( def_id=defn.id, def_type=defType).first() if not node_defn: DBSession.delete(defn) transaction.commit()
class Purging(Task): def get_descriptions(self): short_desc = m_("Purge Historical Data") return (short_desc, (), short_desc, ()) def exec_task(self, auth, ctx): LOGGER.debug('entered in excec_task for Purging task') MetricsService().purging_for_all_nodes(auth) #purge the task results #no need to catch exception since task service will log #and rollback in case of an exception import tg from datetime import datetime, timedelta purge_interval = tg.config.get("task_results_purge_interval") cutoff_date = datetime.utcnow() + timedelta(days=-int(purge_interval)) DBSession.query(TaskResult).\ filter(TaskResult.timestamp <= cutoff_date).\ delete() #also purge the non-repeating tasks that were submitted long time #ago limit = 5000 try: limit=int(tg.config.get(constants.TASK_PURGE_COUNT)) except Exception, e: print "Exception: ", e offset = 0 while True: tasks=DBSession.query(Task).\ filter(Task.submitted_on <= cutoff_date).\ filter(Task.interval == None).\ filter(Task.calendar == None).order_by(Task.submitted_on.asc()).\ limit(limit).offset(offset).all() if len(tasks) == 0: break offset += limit for task in tasks: DBSession.delete(task) transaction.commit() #purge results entries of repeating tasks rept_purge_interval = tg.config.get("repeating_tasks_purge_interval") cutoff_date = datetime.utcnow() + timedelta(days=-int(rept_purge_interval)) rpt_tasks = ['TimeBasisRollupForNodes','EmailTask'] rpt_prvnt_tasks = ['CollectMetricsForNodes','NodeAvailTask','VMAvailTask'] rpt_task = DBSession.query(Task.task_id).filter(Task.task_type.in_(rpt_tasks)).all() rpt_prvnt_task = DBSession.query(Task.task_id).filter(Task.task_type.in_(rpt_prvnt_tasks)).all() rpt_task_ids = [x.task_id for x in rpt_task] rpt_prvnt_task_ids = [x.task_id for x in rpt_prvnt_task] print rpt_task_ids,"=========XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX=========",rpt_prvnt_task_ids task_ids = rpt_task_ids + rpt_prvnt_task_ids DBSession.query(TaskResult).\ filter(TaskResult.task_id.in_(task_ids)).\ filter(TaskResult.timestamp <= cutoff_date).\ delete() transaction.commit() #purge entries of child tasks of repeating tasks DBSession.query(TaskResult).\ filter(TaskResult.task_id.in_(\ DBSession.query(Task.task_id).\ filter(Task.parent_task_id.in_(rpt_prvnt_task_ids)).\ filter(Task.submitted_on <= cutoff_date))\ ).\ delete() transaction.commit() DBSession.query(Task).\ filter(Task.parent_task_id.in_(rpt_prvnt_task_ids)).\ filter(Task.submitted_on <= cutoff_date).\ delete()
def delete_defn(self, defn, site, group, node, auth, defType, def_manager, grid_manager, add_mode=False, group_list=None): LOGGER.info("Deleting definition...") scope = defn.scope if defn.is_deleted == True: if scope == constants.SCOPE_S: #server level allows_delete = False node_defn = DBSession.query(ServerDefLink).filter_by(server_id = node.id, def_id = defn.id, def_type = defType).first() if node_defn: if add_mode == True: allows_delete = True else: if node_defn.status == constants.IN_SYNC: allows_delete = True if node_defn.status == constants.OUT_OF_SYNC and add_mode == False: LOGGER.info("Definition " + defn.name + " is OUT_OF_SYNC on the server " + node.hostname) if allows_delete == True: LOGGER.info("Allowing to delete definition...") #delete the defn link with the server from serverdeflinks table DBSession.delete(node_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. grid_manager.remove_vm_links_to_storage(defn.id) #delete entry from storage disks table def_manager.remove_storage_disk(defn.id) #delete the definition from network_definitions table DBSession.delete(defn) elif scope == constants.SCOPE_SP: #server pool level rowGroupDef = DBSession.query(SPDefLink).filter_by(group_id = group.id, def_id = defn.id, def_type = defType).first() if rowGroupDef: #delete definition link from SPDefLink table. There would be only one record of the definition since it is server pool level record. group_defn = DBSession.query(SPDefLink).filter_by(group_id = group.id, def_id = defn.id, def_type = defType).first() #call delete here if group_defn: DBSession.delete(group_defn) #Go through loop here for each server in the server pool. #delete all the definition links for each server from SPDefLink table for node in group.getNodeList(auth).itervalues(): node_defn = DBSession.query(ServerDefLink).filter_by(server_id = node.id, def_id = defn.id, def_type = defType).first() # call delete if node_defn: DBSession.delete(node_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. grid_manager.remove_vm_links_to_storage(defn.id) #This function is deleting vm storage links as well as vm_disks related to the storage defn. def_manager.remove_storage_disk(defn.id) #delete the definition from storag/ network definitions table DBSession.delete(defn) elif scope == constants.SCOPE_DC: #data center level #rowGroupDef = DBSession.query(DCDefLink).filter_by(site_id=site.id, def_id = defn.id, def_type = defType).first() #if rowGroupDef: for group in group_list: #delete definition link from DCDefLink table. There would be only one record of the definition since it is data center level record. site_defn = DBSession.query(DCDefLink).filter_by(site_id=site.id, def_id = defn.id, def_type = defType).first() if site_defn: DBSession.delete(site_defn) #delete definition link from SPDefLink table. There would be only one record of the definition since it is server pool level record. group_defn = DBSession.query(SPDefLink).filter_by(group_id=group.id, def_id = defn.id, def_type = defType).first() if group_defn: DBSession.delete(group_defn) #Go through loop here for each server in the server pool. #delete all the definition links for each server from SPDefLink table for node in group.getNodeList(auth).itervalues(): node_defn = DBSession.query(ServerDefLink).filter_by(server_id = node.id, def_id = defn.id, def_type = defType).first() if node_defn: DBSession.delete(node_defn) #This function is deleting vm storage links as well as vm_disks related to the storage defn. grid_manager.remove_vm_links_to_storage(defn.id) #delete entry from storage disks table def_manager.remove_storage_disk(defn.id) #delete records from storage_stats table DBSession.query(Storage_Stats).filter_by(storage_id = defn.id).delete() #delete the definition from storage/ network definitions table when there should not be any link in deflinks and serverdeflinks table so we are checking as follow since this is data center level definition. group_defn = DBSession.query(DCDefLink).filter_by(site_id=site.id, def_id = defn.id, def_type = defType).first() if not group_defn: node_defn = DBSession.query(ServerDefLink).filter_by(def_id = defn.id, def_type = defType).first() if not node_defn: DBSession.delete(defn) transaction.commit()