def add_vm_states(self, vm_ids): try: transaction.begin() avail_states = DBSession.query(AvailState).filter(AvailState.entity_id.in_(vm_ids)).all() from stackone.model.VM import VM (vshs, avails) = ([], []) for avail in avail_states: vsh = DBSession.query(VMStateHistory).filter(VMStateHistory.node_id == self.node_id).filter(VMStateHistory.vm_id == avail.entity_id).first() if vsh is None: vsh = VMStateHistory(self.node_id, avail.entity_id, avail.avail_state, avail.monit_state, avail.transient_state, avail.transient_state_time, avail.owner) else: vsh.avail_state = avail.avail_state vsh.monit_state = avail.monit_state vsh.transient_state = avail.transient_state vsh.transient_state_time = avail.transient_state_time vsh.owner = avail.owner vsh.timestamp = datetime.now() vshs.append(vsh) avails.append(avail) DBSession.add_all(vshs) DBSession.add_all(avails) transaction.commit() except Exception as e: LOGGER.error(to_str(e)) DBSession.rollback() transaction.begin() traceback.print_exc() raise e
def set_transient_state(self, defn, transient_state, op, site_id=None, group_id=None, node_id=None): scope = defn.scope if op==constants.SCOPE_LEVEL: if scope == constants.SCOPE_S: def_link = DBSession.query(ServerDefLink).filter_by(server_id=node_id, def_id=defn.id).first() else: if scope == constants.SCOPE_SP: def_link = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id=defn.id).first() else: if scope == constants.SCOPE_DC: def_link = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_id=defn.id).first() if def_link: if transient_state: def_link.transient_state = to_unicode(transient_state) else: def_link.transient_state = None LOGGER.info('Transient state of ' + to_str(defn.name) + ' is changed to ' + to_str(transient_state) + ' at definition scope level') if op==constants.NODE_LEVEL: def_link = DBSession.query(ServerDefLink).filter_by(server_id=node_id, def_id=defn.id).first() if def_link: if transient_state: def_link.transient_state = to_unicode(transient_state) else: def_link.transient_state = None LOGGER.info('Transient state of ' + to_str(defn.name) + ' is changed to ' + to_str(transient_state) + ' at node level')
def get_vm_restore_task_result(self, auth, vm_id): task_backup_info = {} tasklist_info = [] msg = " Number of VM's backed up= 2 \n Number of VM backup suceeded = 2\n Number of VM backup failed = 0" vmrestore_result_obj_list = DBSession.query(VMRestoreResult, VMBackupResult).join((VMBackupResult, VMRestoreResult.backup_result_id == VMBackupResult.id)).filter(VMRestoreResult.vm_id == vm_id).order_by(VMRestoreResult.start_time).all() if vmrestore_result_obj_list: for vm_result_obj in vmrestore_result_obj_list: vmrestore_result_obj = vm_result_obj[0] vmbackup_result_obj = vm_result_obj[1] sp_bkp_conf = DBSession.query(SPBackupConfig).filter_by(id=vmrestore_result_obj.backup_id).first() if sp_bkp_conf: detailResult_obj_list = DBSession.query(VMRestoreDetailResult).filter_by(result_id=vmrestore_result_obj.id).order_by(VMRestoreDetailResult.details).all() msg2 = '' if detailResult_obj_list: for detailResult_obj in detailResult_obj_list: if detailResult_obj: msg2 += detailResult_obj.details + '\n' task_info = {} task_info['taskid'] = vmrestore_result_obj.id task_info['status'] = vmrestore_result_obj.status task_info['name'] = sp_bkp_conf.name location = '' exec_context = vmbackup_result_obj.execution_context location = exec_context['SERVER'] + ':' + exec_context['BACKUP_DESTINATION'] task_info['location'] = location task_info['backup_size'] = 0 task_info['starttime'] = vmrestore_result_obj.start_time task_info['endtime'] = vmrestore_result_obj.end_time task_info['errmsg'] = msg2 task_info['restore'] = vmrestore_result_obj.id tasklist_info.append(task_info) task_backup_info['rows'] = tasklist_info return task_backup_info
def on_add_node(self, nodeId, groupId, site_id, auth, def_manager): op = constants.ATTACH if not(nodeId or groupId): return None defn_list = [] errs = [] sync_manager = SyncDef() defType = def_manager.getType() sp_defns = DBSession.query(SPDefLink).filter_by(group_id=to_unicode(groupId)) if sp_defns: for eachdefn in sp_defns: defn = def_manager.get_defn(eachdefn.def_id) if defn: defn_list.append(defn) status = to_unicode(constants.OUT_OF_SYNC) details = None sync_manager.add_node_defn(nodeId, defn.id, defType, status, details) node = DBSession.query(ManagedNode).filter_by(id=nodeId).first() if node: update_status = True try: sync_manager.sync_node(defn_list, node, groupId, site_id, auth, defType, op, def_manager, update_status, errs) except Exception as ex: if errs: if len(errs)>0: LOGGER.error('Error in syncing definition while adding node: ' + to_str(errs))
def get_storage_usage(self, auth, site_id, group_id, scope, defn): usage = 0 vm_id_list = [] if scope==constants.SCOPE_DC: site_entity = auth.get_entity(site_id) groups = auth.get_entities(constants.SERVER_POOL, parent=site_entity) for eachgroup in groups: nodes = auth.get_entities(constants.MANAGED_NODE, parent=eachgroup) vm_id_list = self.get_vm_id_list(auth, nodes, vm_id_list) if scope == constants.SCOPE_SP: group_entity = auth.get_entity(group_id) nodes = auth.get_entities(constants.MANAGED_NODE, parent=group_entity) vm_id_list = self.get_vm_id_list(auth, nodes, vm_id_list) storage_disks = DBSession.query(StorageDisks).filter_by(storage_id=defn.id) if storage_disks: for each_disk in storage_disks: vm_storage_link = DBSession.query(VMStorageLinks).filter_by(storage_disk_id=each_disk.id).first() if vm_storage_link: vm_id = None vm_disk = DBSession.query(VMDisks).filter_by(id=vm_storage_link.vm_disk_id).first() if vm_disk: vm_id = vm_disk.vm_id for each_vm_id in vm_id_list: if each_vm_id == vm_id: usage += vm_disk.disk_size return usage
def disassociate_defn(self, site, group, auth, defn, defType, add_mode, grid_manager): LOGGER.info('Disassociating definition...') allows_delete = False if add_mode==False: dc_defn = DBSession.query(DCDefLink).filter_by(site_id=site.id, def_id=defn.id, def_type=defType).first() if dc_defn: if dc_defn.oos_count == 0: allows_delete = True if dc_defn.oos_count>0: LOGGER.info('Definition is OUT_OF_SYNC at data center level') else: if add_mode == True: allows_delete = True if allows_delete==True: LOGGER.info('Allowing to delete definition...') for node in group.getNodeList(auth).itervalues(): if node: node_defn = DBSession.query(ServerDefLink).filter_by(server_id=node.id, def_id=defn.id, def_type=defType).first() if node_defn: DBSession.delete(node_defn) group_defn = DBSession.query(SPDefLink).filter_by(group_id=group.id, def_id=defn.id, def_type=defType).first() if group_defn: DBSession.delete(group_defn) vm_id_list = [] for node in group.getNodeList(auth).itervalues(): if node: for vm in grid_manager.get_node_doms(auth, node.id): if vm: vm_id_list.append(vm.id) grid_manager.remove_vm_links_to_storage(defn.id, vm_id_list) transaction.commit()
def sync_defn(self, auth, server_ids, def_id, site_id, group_id): server_id_list = server_ids.split(',') for server_id in server_id_list: node = DBSession.query(ManagedNode).filter_by(id=server_id).first() defn = DBSession.query(StorageDef).filter_by(id=def_id).first() self.sync_manager.sync_node_defn(auth, node, group_id, site_id, defn, constants.STORAGE, constants.ATTACH, self.storage_manager) return dict(success='true')
def delete_vlan_network_info(self, vlan_id_pool, vlan_ids): if not isinstance(vlan_ids, list): vlan_ids = [vlan_ids] DBSession.query(VLANNetworkInfo).filter(VLANNetworkInfo.vlan_id.in_(vlan_ids)).filter( VLANNetworkInfo.vlan_pool_id == vlan_id_pool.id ).delete() LOGGER.info("deleted vlan_network_info for vlan_id_pool:%s" % vlan_id_pool.name)
def get_next_id(self, pool_tag, context): available_id = None interface = None if pool_tag == constants.VLAN_ID_POOL: LOGGER.info("Get next vlan id from pool...") pool_id = context.get("pool_id") nw_def_id = context.get("nw_def_id") null_value = None LOGGER.info("Getting CMS Lock...") LockManager().get_lock( constants.VLAN_ID_POOL, pool_id, constants.NEXT_VLAN_ID, constants.Table_vlan_id_pools ) interface = VLANManager().get_interface(pool_id) rs = ( DBSession.query(func.min(VLANID.vlan_id).label("vlan_id")) .join((VLANIDPool, VLANIDPool.id == VLANID.vlan_id_pool_id)) .filter(VLANID.used_by == null_value) .filter(VLANIDPool.id == pool_id) .first() ) is_tuple = isinstance(rs, tuple) if is_tuple and rs[0]: available_id = rs.vlan_id LOGGER.info("Available vlan id is " + to_str(available_id)) context["vlan_id"] = available_id self.mark_used(pool_tag, context) vlan = DBSession.query(VLANIDPool).filter(VLANIDPool.id == pool_id).first() if vlan: vlan.used = int(vlan.used) + 1 LOGGER.info("Releasing CMS Lock...") LockManager().release_lock() return (available_id, interface)
def add_firewall_for_entity(self, entity_id): LOGGER.info("Add firewall for CSEP/ Data Center...") if entity_id: csep = DBSession.query(CSEP).filter_by(id=entity_id).first() if csep: LOGGER.info("Got CSEP") nw_service_host = csep.get_nw_service_host() if nw_service_host: fw = IptablesManager(csep.name) self.fw_map[entity_id] = fw LOGGER.info("Firewall is added to CSEP") self.set_nw_service_host(fw, nw_service_host) LOGGER.info("Network service host is added to firewall") return fw dc = DBSession.query(Site).filter_by(id=entity_id).first() if dc: LOGGER.info("Got DC") nw_service_host = get_cms_network_service_node() if nw_service_host: fw = IptablesManager(dc.name) self.fw_map[entity_id] = fw LOGGER.info("Firewall is added to DC") self.set_nw_service_host(fw, nw_service_host) LOGGER.info("Network service host is added to firewall") return fw
def get_fw_info(self): fw_main_data = [] LOGGER.info("Getting firewall rules info for all csep and Data Center...") print "Getting firewall rules info for all csep and Data Center..." sites = DBSession.query(Site) if sites[0]: LOGGER.info("Got the site. Site name is " + to_str(sites[0L].name)) site_id = sites[0L].id site_name = sites[0L].name nw_service_host = get_cms_network_service_node() if nw_service_host: fw = self.get_firewall(site_id) fw.set_chain_name(site_name) fw_data = self.dump(fw) fw_main_data.append(fw_data) csep_list = DBSession.query(CSEP) for each_csep in csep_list: LOGGER.info("Got the CSEP. CSEP name is " + to_str(each_csep.name)) nw_service_host = each_csep.get_nw_service_host() if nw_service_host: fw = self.get_firewall(each_csep.id) fw.set_chain_name(each_csep.name) fw_data = self.dump(fw) fw_main_data.append(fw_data) return fw_main_data
def remove_context(cls, context_id): try: DBSession.query(cls).filter(cls.id == context_id).delete() except Exception as e: LOGGER.error(e) traceback.print_exc() raise e
def update_avail(node, new_state, monit_state, timestamp, reason, logger, update=True, auth=None): sv_point = transaction.savepoint() try: strt = p_task_timing_start(logger, "UpdateAvailability", node.id, log_level="DEBUG") node.current_state.avail_state = new_state node.current_state.timestamp = timestamp node.current_state.description = reason avh=DBSession.query(AvailHistory).filter(AvailHistory.entity_id==node.id).\ order_by(AvailHistory.timestamp.desc()).first() if avh is not None: avh.endtime=timestamp time_diff=timestamp-avh.timestamp avh.period=time_diff.days*24*60+time_diff.seconds/60 DBSession.add(avh) #insert availability history ah = AvailHistory(node.id, new_state, monit_state, timestamp, reason) DBSession.add(ah) DBSession.add(node) if update==True: 487 ev_contents = {'state':new_state} ev = AvailabilityEvent(node.id, ev_contents, timestamp) DBSession.add(ev) ent = DBSession.query(Entity).filter(Entity.entity_id==node.id).first() from stackone.model.ManagedNode import ManagedNode if ent.type.name == constants.MANAGED_NODE and new_state == ManagedNode.DOWN: if node.is_maintenance(): logger.info('Node:%s is in Maintenance mode' %node.hostname) else: notify_node_down(ent,reason) except Exception, e: import traceback traceback.print_exc() logger.error(e) sv_point.rollback()
def delete_restore_single_result(self, result_id): try: DBSession.query(VMRestoreResult).filter(VMRestoreResult.id == result_id).delete() LOGGER.info('VM restore single result is deleted.') transaction.commit() except Exception as ex: LOGGER.error(to_str(ex).replace("'", ''))
def manage_public_ip(self, vm_id, public_ip_id, public_ip, csep_id, add_flag): LOGGER.info('Calling network service...') if vm_id: nw_vm_rel = DBSession.query(NetworkVMRelation).filter_by(vm_id=vm_id).first() elif public_ip: nw_vm_rel = DBSession.query(NetworkVMRelation).filter_by(public_ip_id=public_ip_id).first() if nw_vm_rel: LOGGER.info('Got network VM relation') nw_def_id = nw_vm_rel.nw_def_id private_ip_id = nw_vm_rel.private_ip_id nw_defn = NwManager().get_defn(nw_def_id) public_interface = '' if nw_defn: bridge_name = nw_defn.bridge_info.get('name') private_ip = '' ip_rec = self.get_ip_by_id(private_ip_id) if ip_rec: private_ip = self.remove_cidr_format_from_ip(ip_rec.ip) dom_id = None nw_service_host = NwManager().get_nw_service_host(csep_id) if nw_service_host: LOGGER.info('Got network service host') public_interface = self.get_public_interface(csep_id) ctx = {} ctx['public_ip'] = public_ip ctx['private_ip'] = private_ip ctx['public_interface'] = public_interface ctx['bridge_name'] = bridge_name ctx['add_flag'] = add_flag ctx['csep_id'] = csep_id LOGGER.info('context=' + to_str(ctx)) NwManager().manage_public_ip(nw_service_host, ctx)
def delete_pool(self, pool_ids): if not isinstance(pool_ids, list): pool_ids = [pool_ids] pools = DBSession.query(IPPool).filter(IPPool.id.in_(pool_ids)).all() DBSession.query(IPS).filter(IPS.pool_id.in_(pool_ids)).delete() for pool in pools: DBSession.delete(pool)
def get_vm_linked_with_storage(self, storage_disk_id): vm = None if storage_disk_id: vm_storage_link = DBSession.query(VMStorageLinks).filter_by(storage_disk_id=storage_disk_id).first() if vm_storage_link: vm_disk = DBSession.query(VMDisks).filter_by(id=vm_storage_link.vm_disk_id).first() if vm_disk: vm = DBSession.query(VM).filter_by(id=vm_disk.vm_id).first() return vm
def migrate_to_servers(self, grpid, tried_sb_nodes=[], failed_doms=[], dom_ids=[]): msg = 'Starting Maintenance migration on Node ' + self.nodename + '. Checking for VMs.' self.msg += '\n' + msg LOGGER.info(msg) node = DBSession.query(ManagedNode).filter(ManagedNode.id == self.entity_id).first() grp = DBSession.query(ServerGroup).filter(ServerGroup.id == grpid).first() gretry_count = grp.getAttributeValue(constants.retry_count, 3) gwait_interval = grp.getAttributeValue(constants.wait_interval, 3) if grp.use_standby == True: while len(failed_doms) > 0: msg = 'Finding standby node.' self.msg += '\n' + msg LOGGER.info(msg) new_node = self.find_standby_node(self.auth, None, node, exclude_ids=tried_sb_nodes) if new_node is None: msg = 'All standby nodes are exhausted.' self.msg += '\n' + msg LOGGER.info(msg) break failed_doms = self.dom_fail_over(failed_doms, node, new_node, gretry_count, gwait_interval, self.FENCING) tried_sb_nodes.append(new_node.id) tot_failed = failed_doms tmp_failed_doms = [d for d in failed_doms] if len(failed_doms) > 0: tot_failed = [] for domid in tmp_failed_doms: self.step = self.PAUSE_IN_STANDBY dom = DBSession.query(VM).filter(VM.id == domid).first() domname = dom.name msg = 'Starting initial placement for ' + domname self.msg += '\n' + msg LOGGER.info(msg) new_node = self.get_allocation_candidate(self.auth,dom,node) failed = self.dom_fail_over(domid, node, new_node, gretry_count, gwait_interval, self.PAUSE_IN_STANDBY) if len(failed) == 1: tot_failed.append(failed[0]) if len(tot_failed)>0: doms = DBSession.query(VM).filter(VM.id.in_(tot_failed)).all() domnames = [d.name for d in doms] msg = 'Failed to migrate following VMs' + to_str(domnames) self.msg += '\n' + msg LOGGER.info(msg) else: self.status = self.SUCCESS msg = 'Successfully migrated all VMs' self.msg += '\n' + msg LOGGER.info(msg) if len(tot_failed) != 0 and len(tot_failed) < len(dom_ids): self.status = self.PARTIAL vm_info_tup = self.get_maint_task_context(key='migrated_vms') vm_ids = [x[0] for x in vm_info_tup] self.add_vm_states(vm_ids) return True
def get_standby_nodes(self): standby_nodes = [] try: grp_ent = DBSession.query(Entity).filter(Entity.entity_id == self.id).first() node_ids = [x.entity_id for x in grp_ent.children] from stackone.model.ManagedNode import ManagedNode standby_nodes = DBSession.query(ManagedNode).filter(ManagedNode.id.in_(node_ids)).filter(ManagedNode.standby_status == ManagedNode.STANDBY).all() except Exception,e: import traceback traceback.print_exc()
def get_peer_node(self, exclude_ids=[]): grp_ent = DBSession.query(Entity).filter(Entity.entity_id == self.id).first() node_ids = [x.entity_id for x in grp_ent.children] from stackone.model.ManagedNode import ManagedNode nodes = DBSession.query(ManagedNode).filter(ManagedNode.id.in_(node_ids)).all() up_nodes = [] for node in nodes: if node.current_state.avail_state == ManagedNode.UP and node.id not in exclude_ids: up_nodes.append(node) if len(up_nodes) == 0: return None return up_nodes[0]
def get_vm_local_usage(self, vm_name): local_usage = 0.0 if vm_name: vm = DBSession.query(VM).filter_by(name=vm_name).first() if vm: vm_id = vm.id vm_disks = DBSession.query(VMDisks).filter_by(vm_id=vm_id) if vm_disks: for each_vm_disk in vm_disks: vm_storage_link = DBSession.query(VMStorageLinks).filter_by(vm_disk_id=each_vm_disk.id).first() if not vm_storage_link: local_usage += each_vm_disk.disk_size return local_usage
def save_email_setup_details(self, desc, servername, port, useremail, password, secure): SiteRecord = DBSession.query(Site).filter(Site.name == 'Data Center').first() if SiteRecord: site_id = SiteRecord.id EmailRecord = DBSession.query(EmailSetup).filter(EmailSetup.site_id == site_id).filter(EmailSetup.mail_server == servername).first() if EmailRecord: return dict(success=True, msg='Duplicaate Record found in list') else: email_setup_obj = EmailSetup(servername, desc, port, secure, site_id, useremail, password) DBSession.add(email_setup_obj) emailsetupid = email_setup_obj.getEmailSetupId() EmailManager().add_entity(to_unicode(servername), emailsetupid, to_unicode(constants.EMAIL), None) return dict(success=True, msg='New Record Added Sucessfully')
def get_vm_backups(self, sp_id, vm_id): result = [] config_list = DBSession.query(SPBackupConfig).filter_by(sp_id=sp_id).all() for configobj in config_list: vm_backup_result = DBSession.query(VMBackupResult).filter_by(backup_id=configobj.id, status='Success', vm_id=vm_id).order_by(VMBackupResult.start_time.desc()).first() if vm_backup_result: last_backup = str(vm_backup_result.start_time) location = '' if configobj.rel_setting_conf.is_remote: location = str(configobj.rel_setting_conf.backup_server_details['server']) + ':' location = 'managednode:' location += configobj.rel_setting_conf.backup_destination configobj.rel_setting_conf.backup_type('Backup Type') return result
def get_piechart_data(self, image_id): result = [] image_vms = DBSession.query(VM).filter(VM.image_id == image_id).all() total_image_vms = len(image_vms) all_vms = DBSession.query(VM).all() image_instance = DBSession.query(Image).filter(Image.id == image_id).first() image_name = image_instance.name total_vms = len(all_vms) if total_vms == 0: result.append(dict(total=to_str(0), label='No VMs provisioned')) return result result.append(dict(value=to_str(total_vms - total_image_vms), label='Other Templates')) result.append(dict(value=to_str(total_image_vms), label=image_name)) return result
def get_imagestore_details(self, auth, imagestore_id): result = [] imagestore_entity = auth.get_entity(imagestore_id) if imagestore_entity is not None: for imagegroup_instance in auth.get_child_entities(imagestore_entity): group_name = imagegroup_instance.name for image_instance in auth.get_child_entities(imagegroup_instance): image_name = image_instance.name vms = DBSession.query(VM).filter(VM.image_id == image_instance.entity_id).all() img = DBSession.query(Image).filter(Image.id == image_instance.entity_id).one() desc_string = image_name desc_string = desc_string + ' template creates a 10G virtual disk (vbd) for use as the primary hard drive. The VM boots from a bootable Linux CD/DVD which, in turn, should kick off the distribution specific installation routine and deploy the OS on the primary hard drive.' result.append(dict(tg=group_name, template=image_name, version=to_str(img.version), vm_num=to_str(len(vms)), desc=desc_string, image_id=image_instance.entity_id, node_id=image_instance.entity_id)) return result
def dom_fail_over(self, dom_ids, node, new_node, gretry_count, gwait_interval, step, migrated=False, started=False): #LOGGER.debug('In dom_fail_over:\ndom_ids, node, new_node, gretry_count, gwait_interval, step, migrated, started \n%s, %s, %s, %s, %s, %s, %s, %s,' %(ndom_ids, node, new_node, gretry_count, gwait_interval, step, migrated, started)) \n%s, %s, %s, %s, %s, %s, %s, %s,' % (dom_ids, node, new_node, gretry_count, gwait_interval, step, migrated, started)) if new_node is None: msg = 'No suitable server. None' self.msg += '\n' + msg LOGGER.info(msg) return dom_ids failed_doms = [] doms = DBSession.query(VM).filter(VM.id.in_(dom_ids)).order_by(VM.ha_priority.desc()).all() for dom in doms: try: dom = DBSession.query(VM).filter(VM.id == dom.id).first() domname = dom.name msg = 'Maintenance on Node ' + self.nodename + '. ' + 'Processing VM ' + domname self.msg += '\n\n' + msg + '.\n' self.msg += '==============================\n' LOGGER.info(msg) new_nodename = new_node.hostname new_node = DBSession.query(ManagedNode).filter(ManagedNode.id == new_node.id).first() if self.has_local_storage(dom.id, new_node) == True: msg = 'VM ' + domname + ' has local storage. ' + 'Can not migrate to ' + new_nodename self.msg += '\n' + msg LOGGER.info(msg) failed_doms.append(dom.id) migrated = False else: msg = 'Migrating VM ' + domname + ' to the node ' + new_nodename self.msg += '\n' + msg LOGGER.info(msg) migrated = False if self.dom_migrate(self.auth, dom, node, new_node): migrated = True msg = 'Migrating VM ' + domname + ' to the node ' + new_nodename + ' successful.' self.msg += '\n' + msg LOGGER.info(msg) else: failed_doms.append(dom.id) msg = 'Migrating VM ' + domname + ' to the node ' + new_nodename + ' failed.' self.msg += '\n' + msg LOGGER.info(msg) except Exception as e: traceback.print_exc() msg = 'Failover of VM ' + domname + ' to the node ' + new_nodename + ' failed.' + '\n' + to_str(e) self.msg += '\n' + msg LOGGER.info(msg) self.msg += '\n\nFinished processing VM.\n' self.msg += '==============================\n' return failed_doms
def get_group(self, dom=None, node=None, grp=None): LOGGER.debug('In get_group') group = None if grp is None: node_ent = None if node is None: dom_ent = DBSession.query(Entity).filter(Entity.entity_id == dom.id).first() node_ent = dom_ent.parents[0] else: node_ent = DBSession.query(Entity).filter(Entity.entity_id == node.id).first() grp_ent = node_ent.parents[0] group = DBSession.query(ServerGroup).filter(ServerGroup.id == grp_ent.entity_id).first() else: group = grp return group
def is_syncing(self, entity, state_owner): syncing = False ent_type = entity.type.name if ent_type == constants.MANAGED_NODE: def_link = DBSession.query(ServerDefLink).filter_by(server_id=entity.id, def_type=state_owner, transient_state=constants.SYNCING) else: if ent_type == constants.SERVER_POOL: def_link = DBSession.query(SPDefLink).filter_by(group_id=entity.id, def_type=state_owner, transient_state=constants.SYNCING) else: if ent_type == constants.DATA_CENTER: def_link = DBSession.query(DCDefLink).filter_by(site_id=entity.id, def_type=state_owner, transient_state=constants.SYNCING) if def_link: if def_link.count() > 0: syncing = True return syncing
def delete_dwm_schedule_details(self, policy_id, group_id): try: dwm_schedules = DBSession.query(DWMPolicySchedule).filter(DWMPolicySchedule.sp_id == group_id).filter(DWMPolicySchedule.policy_id == policy_id).delete() return True except Exception as ex: traceback.print_exc() raise ex
def update_size(self, defn, scan_result): LOGGER.info('Updating sizes...') if not scan_result: LOGGER.info('Scan result is empty. Not updating sizes.') return None success = scan_result.get('success') if not success: LOGGER.info('Scan is failed. Not updating sizes.') return None objDetailsList = scan_result['DETAILS'] if not objDetailsList: LOGGER.error('DETAILS object is not found in scan result. Can not update size in storage_definitions table.') return None for each_disk in objDetailsList: unique_path = each_disk.get('uuid') if unique_path: used_size = each_disk.get('USED') if not used_size: used_size = 0 if float(used_size) < 0: used_size = 0 storage_disk = DBSession.query(StorageDisks).filter_by(storage_id=defn.id, unique_path=to_unicode(unique_path)).first() if storage_disk: storage_disk.disk_size = used_size LOGGER.info('Storage disk is updated') transaction.commit()