def disassociate_defn(self, site, group, auth, defn, defType, add_mode, grid_manager): LOGGER.info('Disassociating definition...') allows_delete = False if add_mode==False: dc_defn = DBSession.query(DCDefLink).filter_by(site_id=site.id, def_id=defn.id, def_type=defType).first() if dc_defn: if dc_defn.oos_count == 0: allows_delete = True if dc_defn.oos_count>0: LOGGER.info('Definition is OUT_OF_SYNC at data center level') else: if add_mode == True: allows_delete = True if allows_delete==True: LOGGER.info('Allowing to delete definition...') for node in group.getNodeList(auth).itervalues(): if node: node_defn = DBSession.query(ServerDefLink).filter_by(server_id=node.id, def_id=defn.id, def_type=defType).first() if node_defn: DBSession.delete(node_defn) group_defn = DBSession.query(SPDefLink).filter_by(group_id=group.id, def_id=defn.id, def_type=defType).first() if group_defn: DBSession.delete(group_defn) vm_id_list = [] for node in group.getNodeList(auth).itervalues(): if node: for vm in grid_manager.get_node_doms(auth, node.id): if vm: vm_id_list.append(vm.id) grid_manager.remove_vm_links_to_storage(defn.id, vm_id_list) transaction.commit()
def delete_pool(self, pool_ids): if not isinstance(pool_ids, list): pool_ids = [pool_ids] pools = DBSession.query(IPPool).filter(IPPool.id.in_(pool_ids)).all() DBSession.query(IPS).filter(IPS.pool_id.in_(pool_ids)).delete() for pool in pools: DBSession.delete(pool)
def remove_vlan_id_pool(self, vlan_id_pool_id): LOGGER.info("Removing VLAN ID Pool..") DBSession.query(VLANIDPoolSPRelation).filter_by(vlan_id_pool_id=vlan_id_pool_id).delete() DBSession.query(VLANID).filter_by(vlan_id_pool_id=vlan_id_pool_id).delete() DBSession.query(VLANNetworkInfo).filter_by(vlan_pool_id=vlan_id_pool_id).delete() vlan_id_pool = DBSession.query(VLANIDPool).filter_by(id=vlan_id_pool_id).first() DBSession.delete(vlan_id_pool) transaction.commit()
def delete_ip(self, exclude_ips, pool_id, cidr=None): if not isinstance(exclude_ips, list): exclude_ips = [exclude_ips] del_ips = DBSession.query(IPS).filter(~IPS.ip.in_(exclude_ips)).filter(IPS.pool_id == pool_id).all() for ip in del_ips: if not self.can_remove_ip(ip.id): LOGGER.info('Can not delete reserved IP:%s' % ip.ip) raise Exception('Can not delete reserved IP:%s' % ip.ip) for ip in del_ips: DBSession.delete(ip)
def remove_fencing_device(self, res_id): try: hafr = DBSession.query(HAFenceResource).filter(HAFenceResource.id == res_id).one() if len(hafr.entity_resources) == 0: DBSession.delete(hafr) else: return ("{success: false,msg: '", 'This fencing device is used by some of the servers', "'}") except Exception as ex: print_traceback() LOGGER.error(to_str(ex).replace("'", '')) return ("{success: false,msg: '", to_str(ex).replace("'", ''), "'}") return dict(success=True)
def detach_vlan_id_pool(self, site_id, sp_ids, vlan_id_pool_id): sp_id_list = [] if sp_ids: sp_id_list = sp_ids.split(",") ent_rel_list = DBSession.query(EntityRelation).filter_by(src_id=site_id, relation="Children") for ent_rel in ent_rel_list: group_id = ent_rel.dest_id if group_id not in sp_id_list: vlan_id_pool_rel = ( DBSession.query(VLANIDPoolSPRelation) .filter_by(vlan_id_pool_id=vlan_id_pool_id, sp_id=group_id) .first() ) if vlan_id_pool_rel: DBSession.delete(vlan_id_pool_rel) LOGGER.info("VLAN ID Pool relation with SP is removed")
def on_remove_group(self, site_id, groupId, auth, def_manager): op = constants.DETACH sync_manager = SyncDef() defType = def_manager.getType() site = DBSession.query(Site).filter_by(id=site_id).first() group = DBSession.query(ServerGroup).filter_by(id=groupId).first() defn_list = [] sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) for each_defn in defn_list: group_defn = DBSession.query(SPDefLink).filter_by(def_id=each_defn.id, def_type=defType).first() if group_defn: DBSession.delete(group_defn) if each_defn.scope == constants.SCOPE_SP: DBSession.delete(each_defn)
def on_remove_node(self, nodeId, groupId, site_id, auth, def_manager, isTransfer=False, csep_id=None): op = constants.DETACH if not groupId: return None errs = [] sync_manager = SyncDef() defType = def_manager.getType() node = DBSession.query(ManagedNode).filter_by(id=nodeId).first() if node: defn_list = [] node_defns = DBSession.query(ServerDefLink).filter_by(server_id=nodeId, def_type=defType) if node_defns: for eachdefn in node_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) continue try: update_status = True sync_manager.sync_node(defn_list, node, groupId, site_id, auth, defType, op, def_manager, update_status, errs) except Exception as ex: LOGGER.error('Error: ' + to_str(ex)) if errs: if len(errs)>0: LOGGER.error('Error in syncing definition while removing node: ' + to_str(errs)) if node_defns: for eachdefn in node_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: if defn.scope!=constants.SCOPE_S: if eachdefn.status == constants.OUT_OF_SYNC: LOGGER.error('WARNING: The definition status is OUT_OF_SYNC. Still the definition linking with the server is getting deleted. server_id=' + node.id + ', def_id=' + eachdefn.def_id + ', def_type=' + eachdefn.def_type + ', details=' + to_str(eachdefn.details)) DBSession.delete(eachdefn) if defn.scope == constants.SCOPE_S and isTransfer == False: DBSession.delete(defn) self.update_node_defn(auth, nodeId, groupId, site_id, defn.id, defn.type, '', datetime.now(), '', defn.scope, defType, csep_id)
def delete(self, obj): DBSession.delete(obj)
def delete_defn(self, defn, auth, defType, def_manager, grid_manager): LOGGER.info('Deleting definition...') #from stackone.cloud.DbModel.platforms.cms.CSEP import CSEP from stackone.model.SyncDefinition import SyncDef scope = defn.scope logical_delete = False if defn.is_deleted==True: vm_id = None ext_nw_svc_host = None csep_id = None if defType==constants.NETWORK: if defn.scope==constants.SCOPE_CP: csep_defn = DBSession.query(CSEPDefLink).filter_by(def_id=defn.id).first() if csep_defn: csep_id = csep_defn.csep_id csep = DBSession.query(CSEP).filter_by(id=csep_id).first() if csep: ext_nw_svc_host = csep.get_nw_service_host() else: ext_nw_svc_host = get_cms_network_service_node() if ext_nw_svc_host: if ext_nw_svc_host.is_up() and not ext_nw_svc_host.maintenance: LOGGER.info('Syncing network service host - ' + to_str(ext_nw_svc_host.hostname)) group_id = None site_id = None defType = constants.NETWORK op = constants.DETACH update_status = True errs = [] processor = None sync_forcefully = None use_auth = False SyncDef().sync_node_defn(auth, ext_nw_svc_host, group_id, site_id, defn, defType, op, def_manager, update_status, errs, processor, sync_forcefully, csep_id, use_auth) def_manager.remove_defn_dependencies(csep_id, defn.id, vm_id) else: LOGGER.info('Network Service Node (' + to_str(ext_nw_svc_host.hostname) + ') is down') logical_delete = True if scope==constants.SCOPE_S: node_defn = DBSession.query(ServerDefLink).filter_by(def_id=defn.id, def_type=defType, status=constants.OUT_OF_SYNC).first() if node_defn: node = grid_manager.getNode(auth, node_defn.server_id) if node: LOGGER.info('Definition ' + defn.name + ' is OUT_OF_SYNC on the server ' + node.hostname) else: LOGGER.info('Allowing to delete definition...') DBSession.query(ServerDefLink).filter_by(def_id=defn.id, def_type=defType).delete() grid_manager.remove_vm_links_to_storage(defn.id) def_manager.remove_storage_disk(defn.id) DBSession.delete(defn) elif scope==constants.SCOPE_SP: rowGroupDef = DBSession.query(SPDefLink).filter_by(def_id=defn.id, def_type=defType).first() if rowGroupDef: if rowGroupDef.oos_count>0: LOGGER.info('Definition is OUT_OF_SYNC at server pool level') else: LOGGER.info('Allowing to delete definition...') DBSession.query(SPDefLink).filter_by(def_id=defn.id, def_type=defType).delete() DBSession.query(ServerDefLink).filter_by(def_id=defn.id, def_type=defType).delete() grid_manager.remove_vm_links_to_storage(defn.id) def_manager.remove_storage_disk(defn.id) if logical_delete: DBSession.delete(defn) elif scope==constants.SCOPE_DC: rowGroupDef = DBSession.query(DCDefLink).filter_by(def_id=defn.id, def_type=defType).first() if rowGroupDef: if rowGroupDef.oos_count>0: LOGGER.info('Definition is OUT_OF_SYNC at data center level') else: LOGGER.info('Allowing to delete definition...') DBSession.query(DCDefLink).filter_by(def_id=defn.id, def_type=defType).delete() DBSession.query(SPDefLink).filter_by(def_id=defn.id, def_type=defType).delete() DBSession.query(ServerDefLink).filter_by(def_id=defn.id, def_type=defType).delete() grid_manager.remove_vm_links_to_storage(defn.id) def_manager.remove_storage_disk(defn.id) DBSession.query(Storage_Stats).filter_by(storage_id=defn.id).delete() group_defn = DBSession.query(DCDefLink).filter_by(def_id=defn.id, def_type=defType).first() if not group_defn: node_defn = DBSession.query(ServerDefLink).filter_by(def_id=defn.id, def_type=defType).first() if not logical_delete and not node_defn: DBSession.delete(defn) transaction.commit() if scope==constants.SCOPE_CP: rowGroupDef = DBSession.query(CSEPDefLink).filter_by(def_id=defn.id, def_type=defType).first() if rowGroupDef: if rowGroupDef.oos_count>0: LOGGER.info('Definition is OUT_OF_SYNC at csep level') else: LOGGER.info('Allowing to delete definition...') if not logical_delete: DBSession.query(CSEPDefLink).filter_by(def_id=defn.id, def_type=defType).delete() DBSession.query(SPDefLink).filter_by(def_id=defn.id, def_type=defType).delete() DBSession.query(ServerDefLink).filter_by(def_id=defn.id, def_type=defType).delete() grid_manager.remove_vm_links_to_storage(defn.id) def_manager.remove_storage_disk(defn.id) DBSession.query(Storage_Stats).filter_by(storage_id=defn.id).delete() csep_defn = DBSession.query(CSEPDefLink).filter_by(def_id=defn.id, def_type=defType).first() if not csep_defn: node_defn = DBSession.query(ServerDefLink).filter_by(def_id=defn.id, def_type=defType).first() if not logical_delete and not node_defn: DBSession.delete(defn) transaction.commit()
def process_ha_sp(self, auth, node_id, node_type, ha_data): general = ha_data.get('general_object') pre_serve_list = general.get('preferred_servers_list') grp_ent = auth.get_entity(node_id) if grp_ent is None: return None grp_ent.set_ha(general.get('enable_ha')) mnids = [] standby_servers = [] for server in pre_serve_list: mnids.append(server.get('server_id')) if server.get('is_standby'): standby_servers.append(server.get('server_id')) sg = DBSession.query(ServerGroup).filter(ServerGroup.id == node_id).one() sg.migrate_back = general.get('migrate_back') sg.use_standby = general.get('use_standby') sg.failover = general.get('failover') DBSession.add(sg) mnodes = DBSession.query(ManagedNode).filter(ManagedNode.id.in_(mnids)).all() for mnode in mnodes: if mnode.id in standby_servers: mnode.set_standby(True) else: mnode.set_standby(False) DBSession.add(mnode) vm_priority = ha_data.get('vm_priority_object') vm_priority_list = vm_priority.get('vm_priority_list') vmids = [] for val in vm_priority_list: vmids.append(val.get('vm_id')) vms = DBSession.query(VM).filter(VM.id.in_(vmids)).all() ha_priorities = constants.HA_PRIORITIES for vm in vms: for val in vm_priority_list: if vm.id == val.get('vm_id'): vm.ha_priority = ha_priorities.get(val.get('ha_priority')) DBSession.add(vm) break fence_object = ha_data.get('fence_object') fencing_det = fence_object.get('fence_details') servers = [] for val in fencing_det: servers.append(val.get('server_id')) ha_entity_res = DBSession.query(HAEntityResource).filter(HAEntityResource.entity_id.in_(servers)).all() if len(ha_entity_res) != 0: for ha_entity_r in ha_entity_res: DBSession.delete(ha_entity_r) try: for fen in fencing_det: entity_id = fen.get('server_id') for key in fen.get('fencing_data'): resource_id = key.get('id') order = 1 haer = HAEntityResource(entity_id, resource_id, order) param_list = key.get('params') for param in param_list: name = param.get('attribute') value = param.get('value') type = param.get('type') field = param.get('field') field_datatype = param.get('field_datatype') sequence = param.get('sequence') is_environ = eval(param.get('is_environ')) haerp = HAEntityResourceParam(name, value, type, field, field_datatype, sequence, is_environ) haer.params.append(haerp) DBSession.add(haer) except Exception as e: traceback.extract_stack() #print e adv_object = ha_data.get('advance_object') entity = DBSession.query(Entity).filter(Entity.entity_id == node_id).one() attributes = [] for ea in entity.attributes: attributes.append(ea) for i in range(len(attributes)): entity.attributes.remove(attributes[i]) for name in adv_object: value = adv_object[name] ea = EntityAttribute(name, value) entity.attributes.append(ea) DBSession.add(entity)