def setup(self): try: new_attrs = {} new_attrs.update(self.attrs) new_attrs.update(self.do_get_dependencies()) self.obj = self.klass(**new_attrs) DBSession.add(self.obj) DBSession.flush() return self.obj except: DBSession.rollback() raise
def update_node_defn(self, node_id, group_id, site_id, def_id, def_type, status, dt_time, details, scope, defType): #update definition status in ServerDefLink table node_defn = DBSession.query(ServerDefLink).filter_by( server_id=node_id, def_id=def_id).first() if node_defn: node_defn.status = status node_defn.dt_time = datetime.utcnow() node_defn.details = details oos_count = 0 # out of sync count g_status = to_unicode(constants.IN_SYNC) #Here we are finding that how many servers are OUT_OF_SYNC with this definition. So getting out of sync count and decide group level sync status to update SPDefLink table with these values. rowNodeDefn = DBSession.query(ServerDefLink).filter_by( def_id=def_id, def_type=to_unicode(defType), status=to_unicode(constants.OUT_OF_SYNC)) if rowNodeDefn: oos_count = rowNodeDefn.count() #Get the status for updating SPDefLink table if oos_count > 0: g_status = to_unicode(constants.OUT_OF_SYNC) else: g_status = to_unicode(constants.IN_SYNC) #update definition status and oos_count in SPDefLink table group_sd = None if scope == constants.SCOPE_SP: group_sd = DBSession.query(SPDefLink).filter_by( group_id=group_id, def_id=def_id, def_type=to_unicode(defType)).first() elif scope == constants.SCOPE_DC: group_sd = DBSession.query(DCDefLink).filter_by( site_id=site_id, def_id=def_id, def_type=to_unicode(defType)).first() if group_sd: group_sd.status = g_status group_sd.dt_time = datetime.utcnow() group_sd.oos_count = oos_count #Keep a note here saying that commit would have to be called here. DBSession.flush() transaction.commit()
def exec_task(self, auth, ctx,node_ids,sp_id): LOGGER.debug('entered in excec task for CollectMetricsForNodes task') strt = p_task_timing_start(MTR_LOGGER, "CollectMetrics", node_ids) try: manager = Basic.getGridManager() self.completed_nodes = [] self.pending_nodes = [node_id for node_id in node_ids] self.exc_node_ids = [node_id for node_id in node_ids] index = 0 node_id = self.get_next_node_id(index) while node_id is not None: self.pending_nodes.remove(node_id) m_node=DBSession.query(ManagedNode).filter(ManagedNode.id==node_id).one() index+=1 node_id = self.get_next_node_id(index) if m_node is None : continue self.current_node = m_node self.start_time = datetime.utcnow() try: try: strt1 = p_task_timing_start(MTR_LOGGER, "NodeGetMterics", m_node.id) #call function to store the Server metrics into the database node_snapshot=manager.collectServerMetrics(auth, m_node,filter=True) #call function to store the VM metrics into the database table manager.collectVMMetrics(auth, m_node.id, node_snapshot) #collect metrics at serverpool level manager.collectServerPoolMetrics(auth, sp_id) DBSession.flush() transaction.commit() p_task_timing_end(MTR_LOGGER, strt1) except Exception, e: LOGGER.error("Error updating metrics . Server :"+m_node.hostname) traceback.print_exc() finally: self.completed_nodes.append(m_node.id) finally: self.check_if_hung() p_task_timing_end(MTR_LOGGER, strt)
def exec_task(self, auth, ctx,node_ids,sp_id): LOGGER.debug('entered in excec task for CollectMetricsForNodes task') strt = p_task_timing_start(MTR_LOGGER, "CollectMetrics", node_ids) try: manager = Basic.getGridManager() self.completed_nodes = [] self.pending_nodes = [node_id for node_id in node_ids] self.exc_node_ids = [node_id for node_id in node_ids] index = 0 node_id = self.get_next_node_id(index) while node_id is not None: self.pending_nodes.remove(node_id) m_node=DBSession.query(ManagedNode).filter(ManagedNode.id==node_id).one() index+=1 node_id = self.get_next_node_id(index) if m_node is None : continue self.current_node = m_node self.start_time = datetime.utcnow() try: try: strt1 = p_task_timing_start(MTR_LOGGER, "NodeGetMterics", m_node.id) #call function to store the Server metrics into the database node_snapshot=manager.collectServerMetrics(auth, m_node,filter=True) #call function to store the VM metrics into the database table manager.collectVMMetrics(auth, m_node.id, node_snapshot) #collect metrics at serverpool level manager.collectServerPoolMetrics(auth, sp_id) DBSession.flush() transaction.commit() p_task_timing_end(MTR_LOGGER, strt1) except Exception, e: LOGGER.error("Error updating metrics . Server :"+m_node.hostname) traceback.print_exc() finally: self.completed_nodes.append(m_node.id) finally: self.check_if_hung() p_task_timing_end(MTR_LOGGER, strt)
def update_node_defn(self, node_id, group_id, site_id, def_id, def_type, status, dt_time, details, scope, defType): #update definition status in ServerDefLink table node_defn = DBSession.query(ServerDefLink).filter_by(server_id = node_id, def_id = def_id).first() if node_defn: node_defn.status = status node_defn.dt_time = datetime.utcnow() node_defn.details = details oos_count = 0 # out of sync count g_status = to_unicode(constants.IN_SYNC) #Here we are finding that how many servers are OUT_OF_SYNC with this definition. So getting out of sync count and decide group level sync status to update SPDefLink table with these values. rowNodeDefn = DBSession.query(ServerDefLink).filter_by(def_id = def_id, def_type = to_unicode(defType), status = to_unicode(constants.OUT_OF_SYNC)) if rowNodeDefn: oos_count = rowNodeDefn.count() #Get the status for updating SPDefLink table if oos_count > 0: g_status = to_unicode(constants.OUT_OF_SYNC) else: g_status = to_unicode(constants.IN_SYNC) #update definition status and oos_count in SPDefLink table group_sd=None if scope == constants.SCOPE_SP: group_sd = DBSession.query(SPDefLink).filter_by(group_id = group_id, def_id = def_id, def_type = to_unicode(defType)).first() elif scope == constants.SCOPE_DC: group_sd = DBSession.query(DCDefLink).filter_by(site_id = site_id, def_id = def_id, def_type = to_unicode(defType)).first() if group_sd: group_sd.status = g_status group_sd.dt_time = datetime.utcnow() group_sd.oos_count = oos_count #Keep a note here saying that commit would have to be called here. DBSession.flush() transaction.commit()