예제 #1
0
 def resume_work(self,context):
     """
     on resume setting value from task context
     """
     execution_context=context["execution_context"]
     WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker )
     if execution_context:
         self.start_time=execution_context.get("start_time",datetime.utcnow())
         self.worker_ids=execution_context.get("worker_ids",[])
         self.sp_list=execution_context.get("sp_list",[])
         ##validate all the worker ids are taken care of
         ets = DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
                                 filter(not_(EntityTasks.worker_id.in_(self.worker_ids))).all()
         if len(ets) > 0:
             xtra_work_ids = [et.worker_id for et in ets]
             WRK_LOGGER.error("GOT ENT Tasks different from execution_context :"+self.worker+\
             ": CONTEXT WORKERS : "+str(self.worker_ids) +": XTRA WORKERS :"+str(xtra_work_ids))
             r = DBSession.query(EntityTasks.entity_id).\
                     filter(EntityTasks.worker_id.in_(xtra_work_ids)).\
                     filter(EntityTasks.worker==self.worker).\
                     update(values=dict(worker_id=None,finished=True,end_time=datetime.utcnow()))
             transaction.commit()
             WRK_LOGGER.debug("Cleaned Up entity_tasks . worker:rows : "+self.worker+":"+str(r))
             
     WRK_LOGGER.debug("RESUMING WORKER for :"+self.worker+":"+str(self.start_time)+":"+str(self.worker_ids) )
     self.do_work()
예제 #2
0
파일: VM.py 프로젝트: RDTeam/openconvirt
    def get_storage_stats(self, vm_id=None):
        storage_stats = {}
        disk_stats = {}
        disk_detail = {}
        if not vm_id:
            vm_id = self.vm_id

        if vm_id:
            vm_disks = DBSession.query(VMDisks).filter_by(vm_id=vm_id)
            for vm_disk in vm_disks:
                disk_detail = {}
                disk_detail["DEV_TYPE"] = vm_disk.dev_type
                disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
                disk_detail["DISK_SIZE"] = vm_disk.disk_size
                disk_detail["DISK_NAME"] = vm_disk.disk_name
                storage_disk_id = None
                vm_storage_link = DBSession.query(VMStorageLinks).filter_by(vm_disk_id=vm_disk.id).first()
                if vm_storage_link:
                    storage_disk_id = vm_storage_link.storage_disk_id

                disk_detail["STORAGE_DISK_ID"] = storage_disk_id
                disk_stats[vm_disk.disk_name] = disk_detail

            storage_stats["LOCAL_ALLOCATION"] = 0
            storage_stats["SHARED_ALLOCATION"] = 0
            storage_stats["DISK_STATS"] = disk_stats
        return storage_stats
예제 #3
0
 def associate_defns(self, site_id, group_id, def_type, def_ids, auth, op_level=None):
     error_desc=""
     site = self.manager.getSite(site_id)
     group=self.manager.getGroup(auth,group_id)
     group_list = self.manager.getGroupList(auth, site_id)
     def_id_list = def_ids.split(",")
     for def_id in def_id_list:
         new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first()
         node = None
         try:
             associate=True
             self.sync_manager.add_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.ATTACH, "ADD_STORAGE_DEF", self.storage_manager, self.manager, op_level, associate)
             
             #matching disks on association of storage.
             vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id)
             storage_disks = DBSession.query(StorageDisks).filter_by(storage_id=def_id)
             if storage_disks:
                 for eachdisk in storage_disks:
                     self.manager.matching_disk_on_discover_storage(vm_disks, eachdisk.id)
         except Exception, ex:
             error_desc = to_str(ex)
             print_traceback()
             LOGGER.error(to_str(ex).replace("'",""))
             #if we get any exception while adding/ sync definition then are removing the definition.
             add_mode=True
             try:
                 self.sync_manager.remove_defn(new_sd, site, group, node, auth, to_unicode(constants.STORAGE), constants.DETACH, "REMOVE_STORAGE_DEF", self.storage_manager, self.manager, add_mode, group_list, op_level)
             except Exception, ex1:
                 print_traceback()
                 LOGGER.error(to_str(ex1).replace("'",""))
                 raise Exception(to_str(ex1))
             if error_desc:
                 raise Exception(error_desc)
예제 #4
0
파일: VM.py 프로젝트: gladmustang/cloudvirt
    def get_storage_stats(self, vm_id=None):
        storage_stats = {}
        disk_stats = {}
        disk_detail = {}
        if not vm_id:
            vm_id = self.vm_id

        if vm_id:
            vm_disks = DBSession.query(VMDisks).filter_by(vm_id=vm_id)
            for vm_disk in vm_disks:
                disk_detail = {}
                disk_detail["DEV_TYPE"] = vm_disk.dev_type
                disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
                disk_detail["DISK_SIZE"] = vm_disk.disk_size
                disk_detail["DISK_NAME"] = vm_disk.disk_name
                storage_disk_id = None
                vm_storage_link = DBSession.query(VMStorageLinks).filter_by(
                    vm_disk_id=vm_disk.id).first()
                if vm_storage_link:
                    storage_disk_id = vm_storage_link.storage_disk_id

                disk_detail["STORAGE_DISK_ID"] = storage_disk_id
                disk_stats[vm_disk.disk_name] = disk_detail

            storage_stats["LOCAL_ALLOCATION"] = 0
            storage_stats["SHARED_ALLOCATION"] = 0
            storage_stats["DISK_STATS"] = disk_stats
        return storage_stats
예제 #5
0
    def resume_work(self, context):
        """
        on resume setting value from task context
        """
        execution_context = context["execution_context"]
        WRK_LOGGER.debug("RESUMING WORKER for :" + self.worker)
        if execution_context:
            self.start_time = execution_context.get("start_time",
                                                    datetime.utcnow())
            self.worker_ids = execution_context.get("worker_ids", [])
            self.sp_list = execution_context.get("sp_list", [])
            ##validate all the worker ids are taken care of
            ets = DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
                                    filter(not_(EntityTasks.worker_id.in_(self.worker_ids))).all()
            if len(ets) > 0:
                xtra_work_ids = [et.worker_id for et in ets]
                WRK_LOGGER.error("GOT ENT Tasks different from execution_context :"+self.worker+\
                ": CONTEXT WORKERS : "+str(self.worker_ids) +": XTRA WORKERS :"+str(xtra_work_ids))
                r = DBSession.query(EntityTasks.entity_id).\
                        filter(EntityTasks.worker_id.in_(xtra_work_ids)).\
                        filter(EntityTasks.worker==self.worker).\
                        update(values=dict(worker_id=None,finished=True,end_time=datetime.utcnow()))
                transaction.commit()
                WRK_LOGGER.debug("Cleaned Up entity_tasks . worker:rows : " +
                                 self.worker + ":" + str(r))

        WRK_LOGGER.debug("RESUMING WORKER for :" + self.worker + ":" +
                         str(self.start_time) + ":" + str(self.worker_ids))
        self.do_work()
예제 #6
0
    def on_remove_group(self, site_id, groupId, auth, def_manager):
        op = constants.DETACH
        
        defType = def_manager.getType()
        site = DBSession.query(Site).filter_by(id=site_id).first()
        group = DBSession.query(ServerGroup).filter_by(id = groupId).first()
        
        defn_list=[]
        #get all the definitions from the group
        #getting pool level definitions here
        sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId)
        if sp_defns:
            for eachdefn in sp_defns:
                defn = def_manager.get_defn(eachdefn.def_id)
                if defn:
                    defn_list.append(defn)

        for each_defn in defn_list:
            group_defn = DBSession.query(SPDefLink).filter_by(def_id = each_defn.id, def_type = defType).first()
            if group_defn:
                DBSession.delete(group_defn)

            #delete only those definitions which are having scope server pool.
            #data center level definitions can not be deleted since we are removing server pool only.
            if each_defn.scope == constants.SCOPE_SP:
                DBSession.delete(each_defn)
예제 #7
0
 def update_disks_size(self, auth):
     sites = DBSession.query(Site)
     if sites:
         for eachsite in sites:
             #site = DBSession.query(Sites).filter_by(id=eachsite.id).first()
             site_entity = auth.get_entity(eachsite.id)
             #get all groups in the site.
             group_entities = auth.get_entities(
                 to_unicode(constants.SERVER_POOL), site_entity)
             #loop through each group in the site
             for eachgroup in group_entities:
                 group = DBSession.query(ServerGroup).filter_by(
                     id=eachgroup.entity_id).first()
                 if group:
                     group_entity = auth.get_entity(group.id)
                     #get all nodes in the group
                     node_entities = auth.get_entities(
                         to_unicode(constants.MANAGED_NODE), group_entity)
                     #loop through each node in the group
                     for eachnode in node_entities:
                         node = DBSession.query(ManagedNode).filter_by(
                             id=eachnode.entity_id).first()
                         server_def_link = DBSession.query(
                             ServerDefLink).filter_by(server_id=node.id)
                         if server_def_link:
                             for each_link in server_def_link:
                                 defn = DBSession.query(
                                     StorageDef).filter_by(
                                         id=each_link.def_id).first()
                                 if defn:
                                     self.test_storage_def(
                                         auth, node, group, eachsite, defn)
예제 #8
0
    def disassociate_defn(self, site, group, auth, defn, defType, add_mode,
                          grid_manager):
        LOGGER.info("Disassociating definition...")
        #Go through loop here for each server in the server pool.
        #delete all the definition links for each server from SPDefLink table
        for node in group.getNodeList(auth).itervalues():
            if node:
                node_defn = DBSession.query(ServerDefLink).filter_by(
                    server_id=node.id, def_id=defn.id,
                    def_type=defType).first()
                if node_defn:
                    DBSession.delete(node_defn)

        #delete definition link from SPDefLink table. There would be only one record of the definition since it is server pool level record.
        group_defn = DBSession.query(SPDefLink).filter_by(
            group_id=group.id, def_id=defn.id, def_type=defType).first()
        if group_defn:
            DBSession.delete(group_defn)
        #This function is deleting vm storage links as well as vm_disks related to the storage defn.
        vm_id_list = []
        for node in group.getNodeList(auth).itervalues():
            if node:
                for vm in grid_manager.get_node_doms(auth, node.id):
                    if vm:
                        vm_id_list.append(vm.id)

        grid_manager.remove_vm_links_to_storage(defn.id, vm_id_list)
        transaction.commit()
예제 #9
0
    def on_remove_group(self, site_id, groupId, auth, def_manager):
        op = constants.DETACH

        defType = def_manager.getType()
        site = DBSession.query(Site).filter_by(id=site_id).first()
        group = DBSession.query(ServerGroup).filter_by(id=groupId).first()

        defn_list = []
        #get all the definitions from the group
        #getting pool level definitions here
        sp_defns = DBSession.query(SPDefLink).filter_by(group_id=groupId)
        if sp_defns:
            for eachdefn in sp_defns:
                defn = def_manager.get_defn(eachdefn.def_id)
                if defn:
                    defn_list.append(defn)

        for each_defn in defn_list:
            group_defn = DBSession.query(SPDefLink).filter_by(
                def_id=each_defn.id, def_type=defType).first()
            if group_defn:
                DBSession.delete(group_defn)

            #delete only those definitions which are having scope server pool.
            #data center level definitions can not be deleted since we are removing server pool only.
            if each_defn.scope == constants.SCOPE_SP:
                DBSession.delete(each_defn)
예제 #10
0
    def getDefnsFromGroupList(self, auth, site_id, group_list, defType,
                              defs_array):
        if group_list:
            #getting definitions from each group
            for group in group_list:
                resultset = DBSession.query(SPDefLink).filter_by(
                    group_id=group.id, def_type=defType)
                for row in resultset:
                    defn = self.get_defn(row.def_id)
                    if defn:
                        #set the status here to return and display in grid with definition name.
                        #node_id is None here
                        defn.status = self.get_defn_status(
                            defn, defType, site_id, group.id, None)
                        defs_array.append(defn)

                #getting definitions from each server in the group
                for node in group.getNodeList(auth).itervalues():
                    resultset = DBSession.query(ServerDefLink).filter_by(
                        server_id=node.id, def_type=defType)
                    for row in resultset:
                        defn = DBSession.query(NwDef).filter_by(
                            id=row.def_id, scope=constants.SCOPE_S).first()
                        if defn:
                            #set the status here to return and display in grid with definition name.
                            defn.status = row.status
                            defs_array.append(defn)
        return defs_array
예제 #11
0
def update_avail(node, new_state, monit_state, timestamp, reason, logger, update=True, auth=None):
    sv_point = transaction.savepoint()
    try:
        strt = p_task_timing_start(logger, "UpdateAvailability", node.id, log_level="DEBUG")
        #there is a status change, update and send event
        #update current availability,
        #we only update avail-state, monit_state is updated
        #only by user actions
        node.current_state.avail_state = new_state
        node.current_state.timestamp = timestamp
        node.current_state.description = reason
        avh=DBSession.query(AvailHistory).filter(AvailHistory.entity_id==node.id).\
            order_by(AvailHistory.timestamp.desc()).first()
        if avh is not None:
            avh.endtime=timestamp
            time_diff=timestamp-avh.timestamp
            avh.period=time_diff.days*24*60+time_diff.seconds/60
            DBSession.add(avh)
        #insert availability history
        ah = AvailHistory(node.id, new_state, monit_state, timestamp, reason)
        DBSession.add(ah)
        if update==True:
            ent = DBSession.query(Entity).filter(Entity.entity_id==node.id).first()
            from convirt.model.ManagedNode import ManagedNode
            if ent.type.name == constants.MANAGED_NODE:
                if new_state == ManagedNode.DOWN:
                    notify_node_down(ent.name, reason)
                else:
                    node_up_action(auth, node.id)
    except Exception, e:
        #defer to next time
        import traceback
        traceback.print_exc()
        logger.error(e)
        sv_point.rollback()
예제 #12
0
 def get_vm_linked_with_storage(self, storage_disk_id):
     vm=None
     if storage_disk_id:
         vm_storage_link = DBSession.query(VMStorageLinks).filter_by(storage_disk_id=storage_disk_id).first()
         if vm_storage_link:
             vm_disk = DBSession.query(VMDisks).filter_by(id=vm_storage_link.vm_disk_id).first()
             if vm_disk:
                 vm = DBSession.query(VM).filter_by(id=vm_disk.vm_id).first()
     return vm
예제 #13
0
    def exec_task(self, auth, ctx,node_ids):
        try:
            LOGGER.debug('entered in exec task for NodesAvailability task')
            nodes=DBSession.query(ManagedNode).filter(ManagedNode.id.in_(node_ids)).all()
            node_names = ""
            port=0
            for node in nodes:
                node_names += node.hostname + " "
                nport=node.get_connection_port()
                if port == 0:
                    port=nport
                else:
                    if port > 0 and nport != port:
                        port=-1

            strt = p_task_timing_start(AVL_LOGGER, "NodesAvailability", node_names.strip().split(' '))
            strt1 = p_task_timing_start(AVL_LOGGER, "PreProcess", node_names.strip().split(' ')[0])
            
            self.completed_nodes = []
            self.pending_nodes = [node_id for node_id in node_ids]
            self.exc_node_ids = [node_id for node_id in node_ids]
            index = 0
            node_id = self.get_next_node_id(index)

            use_nmap = eval(tg.config.get("use_nmap_for_heartbeat", "False"))

            if use_nmap == True and port > 0:
                strt2 = p_task_timing_start(AVL_LOGGER, "NodesNmap", node_names)
                (output, exit_code) = self.do_nmap_ping(node_names=node_names, port=port)
                p_task_timing_end(AVL_LOGGER, strt2)
            else:
                (output, exit_code) = ("", -1)
            p_task_timing_end(AVL_LOGGER, strt1)

            while node_id is not None:
                self.pending_nodes.remove(node_id)
                node = DBSession.query(ManagedNode).filter(ManagedNode.id == node_id).first()
                index+=1
                node_id = self.get_next_node_id(index)

                strt1 = p_task_timing_start(AVL_LOGGER, "NodeRefreshAvail", node.hostname)
                if node:
                    self.current_node = node
                    self.start_time = datetime.utcnow()

                    try:
                        try:
                            node.refresh_avail(auth, exit_code=exit_code, isUp="(" + node.hostname + ")" in output)
                        except Exception, e:
                            LOGGER.error("Error updating Node availability . Server :"+node.hostname)
                            traceback.print_exc()
                    finally:
                        self.completed_nodes.append(node.id)
                p_task_timing_end(AVL_LOGGER, strt1)
        finally:
            self.check_if_hung()
            p_task_timing_end(AVL_LOGGER, strt)
예제 #14
0
    def wait_for_workers_to_finish(self, task_ids):

        WRK_LOGGER.debug("wait_for_workers_to_finish for "+self.worker+" max_worker_wait_time: "+str(self.max_worker_wait_time))
        task_completed = False
        self.wait_start_time=datetime.utcnow()
        ###this is an infinite loop until we find a completed task
        ###we need to add some wait time to check on the status of child tasks
        while task_completed == False:
            time.sleep(5)
            completed_tasks = self.check_tasks_completed(task_ids)
            WRK_LOGGER.debug("wait_for_workers_to_finish for "+self.worker+" completed_tasks :"+str(completed_tasks))
            if len(completed_tasks) > 0:
                task_completed = True

                for task in completed_tasks:
                    self.worker_ids.remove(task['task_id'])
                    WRK_LOGGER.debug("child task completed, update EntityTasks "+self.worker+" completed_tasks :"+str(task['task_id']))
                    ets = DBSession.query(EntityTasks).\
                        filter(EntityTasks.worker_id==to_unicode(task['task_id'])).all()
                    for et in ets:
                        et.worker_id=None
                        et.finished=True
                        et.end_time=datetime.utcnow()
                        DBSession.merge(et)

                    transaction.commit()
                    WRK_LOGGER.debug("child tasks completed, updated EntityTasks "+self.worker)
            else :
#                if True:
#                    continue
                wait_time_sec=(datetime.utcnow()-self.wait_start_time).seconds
                WRK_LOGGER.debug("No completed child tasks for "+self.worker+". waiting for "+str(wait_time_sec))
                if wait_time_sec > self.max_worker_wait_time:
                    task_service = self.svc_central.get_service(self.task_service_id)
                    past_time = self.start_time-timedelta(minutes=1)

                    for task_id in task_ids:
                        task_obj = task_service.get_running_task_obj(task_id)
                        if task_obj:
                            (hung, completed, pending) = task_obj.get_running_status()
                            WRK_LOGGER.debug("HUNG STATUS for "+self.worker+":"+str(hung)+":"+str(task_id)+\
                                ":"+str(completed)+":"+str(pending))
                            if hung:
                                task_completed = True
                                self.worker_ids.remove(task_id)

                                WRK_LOGGER.debug("Hung task. Cleanup EntityTask for "+self.worker+". task id : "+str(task_id))
                                DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
                                    filter(EntityTasks.entity_id.in_(completed)).\
                                    update(dict(worker_id=None,finished=True, end_time=datetime.utcnow()))
                                DBSession.query(EntityTasks).filter(EntityTasks.worker==self.worker).\
                                    filter(EntityTasks.entity_id.in_(pending)).\
                                    update(dict(worker_id=None,finished=True, start_time=past_time))

                                transaction.commit()
                                WRK_LOGGER.debug("Hung task. Cleaned up EntityTask for "+self.worker+". task id : "+str(task_id))
예제 #15
0
    def get_defns(self,
                  defType,
                  site_id,
                  group_id,
                  node_id=None,
                  op_level=None,
                  auth=None,
                  group_list=None):
        sync_manager = SyncDef()
        defs_array = []

        if op_level == constants.SCOPE_DC:
            resultset = DBSession.query(DCDefLink).filter_by(site_id=site_id,
                                                             def_type=defType)
            for row in resultset:
                defn = DBSession.query(NwDef).filter_by(
                    id=row.def_id).first()  #is_deleted=False
                if defn:
                    #set the status here to return and display in grid with definition name.
                    defn.status = row.status
                    defs_array.append(defn)

            #getting definitions from each group
            #getting definitions from each server in the group
            defs_array = self.getDefnsFromGroupList(auth, site_id, group_list,
                                                    defType, defs_array)
        elif op_level == constants.SCOPE_SP:
            #getting definitions from group and each server in the group
            defs_array = self.getDefnsFromGroupList(auth, site_id, group_list,
                                                    defType, defs_array)
        elif op_level == constants.SCOPE_S:
            resultset = DBSession.query(ServerDefLink).filter_by(
                server_id=node_id, def_type=defType)
            for row in resultset:
                defn = DBSession.query(NwDef).filter_by(
                    id=row.def_id).first()  #is_deleted=False
                if defn:
                    #set the status here to return and display in grid with definition name.
                    defn.status = row.status
                    defs_array.append(defn)
        #Following condition is for NetworkService().get_available_nws() function.
        #when op_level is none then get all the networks created on the server (networks present in serverdeflinks table for that server)
        elif not op_level:
            resultset = DBSession.query(NwDef)\
                .join((ServerDefLink, ServerDefLink.def_id == NwDef.id))\
                .filter(ServerDefLink.server_id == node_id)\
                .filter(ServerDefLink.def_type == defType)

            for defn in resultset:
                if defn:
                    defs_array.append(defn)

        return defs_array
예제 #16
0
 def getSiteDefListToAssociate(self, site_id, group_id, defType):
     sdArray=[]
     if site_id:
         dc_rs = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_type=defType)
         for row in dc_rs:
             sp_def = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id=row.def_id, def_type=defType).first()
             if not sp_def:
                 defn = DBSession.query(NwDef).filter_by(id=row.def_id, scope=constants.SCOPE_DC).first()
                 if defn:
                     defn.status = row.status
                     sdArray.append(defn)
     return sdArray
예제 #17
0
 def get_vm_linked_with_storage(self, storage_disk_id):
     vm = None
     if storage_disk_id:
         vm_storage_link = DBSession.query(VMStorageLinks).filter_by(
             storage_disk_id=storage_disk_id).first()
         if vm_storage_link:
             vm_disk = DBSession.query(VMDisks).filter_by(
                 id=vm_storage_link.vm_disk_id).first()
             if vm_disk:
                 vm = DBSession.query(VM).filter_by(
                     id=vm_disk.vm_id).first()
     return vm
예제 #18
0
    def check_if_hung(self):
        WRK_LOGGER.debug("Check if Task, "+self.name+" is hung? ")
        marked_hung = False
        try:
            marked_hung = self.mark_hung

            if marked_hung :
                WRK_LOGGER.debug("Task, "+self.name+"("+str(self.task_id)+") was marked hung. updating entity_tasks")
                DBSession.query(EntityTasks).\
                            filter(EntityTasks.worker_id==to_unicode(self.task_id)).\
                            update(dict(worker_id=None,finished=True, end_time=datetime.utcnow()))
#                transaction.commit()
        except AttributeError, e:
            pass
예제 #19
0
파일: tasks.py 프로젝트: RDTeam/openconvirt
    def check_if_hung(self):
        WRK_LOGGER.debug("Check if Task, "+self.name+" is hung? ")
        marked_hung = False
        try:
            marked_hung = self.mark_hung

            if marked_hung :
                WRK_LOGGER.debug("Task, "+self.name+"("+str(self.task_id)+") was marked hung. updating entity_tasks")
                DBSession.query(EntityTasks).\
                            filter(EntityTasks.worker_id==to_unicode(self.task_id)).\
                            update(dict(worker_id=None,finished=True, end_time=datetime.utcnow()))
#                transaction.commit()
        except AttributeError, e:
            pass
예제 #20
0
파일: VM.py 프로젝트: RDTeam/openconvirt
    def get_disk_stat(self, vm_id, filename):
        disk_detail = {}

        storage_disk = DBSession.query(StorageDisks).filter_by(unique_path=filename).first()
        if storage_disk:
            vm_disk = DBSession.query(VMDisks).filter_by(vm_id=vm_id, disk_name=filename).first()
            if vm_disk:
                disk_detail = {}
                disk_detail["DEV_TYPE"] = vm_disk.dev_type
                disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
                disk_detail["DISK_SIZE"] = vm_disk.disk_size
                disk_detail["DISK_NAME"] = vm_disk.disk_name
                disk_detail["STORAGE_DISK_ID"] = storage_disk.id
        return disk_detail
예제 #21
0
    def associate_defns(self,
                        site_id,
                        group_id,
                        def_type,
                        def_ids,
                        auth,
                        op_level=None):
        error_desc = ""
        site = self.manager.getSite(site_id)
        group = self.manager.getGroup(auth, group_id)
        group_list = self.manager.getGroupList(auth, site_id)
        def_id_list = def_ids.split(",")
        for def_id in def_id_list:
            new_sd = DBSession.query(StorageDef).filter_by(id=def_id).first()
            node = None
            try:
                associate = True
                self.sync_manager.add_defn(new_sd, site, group, node, auth,
                                           to_unicode(constants.STORAGE),
                                           constants.ATTACH, "ADD_STORAGE_DEF",
                                           self.storage_manager, self.manager,
                                           op_level, associate)

                #matching disks on association of storage.
                vm_disks = self.manager.get_vm_disks_from_pool(auth, group_id)
                storage_disks = DBSession.query(StorageDisks).filter_by(
                    storage_id=def_id)
                if storage_disks:
                    for eachdisk in storage_disks:
                        self.manager.matching_disk_on_discover_storage(
                            vm_disks, eachdisk.id)
            except Exception, ex:
                error_desc = to_str(ex)
                print_traceback()
                LOGGER.error(to_str(ex).replace("'", ""))
                #if we get any exception while adding/ sync definition then are removing the definition.
                add_mode = True
                try:
                    self.sync_manager.remove_defn(
                        new_sd, site, group, node, auth,
                        to_unicode(constants.STORAGE), constants.DETACH,
                        "REMOVE_STORAGE_DEF", self.storage_manager,
                        self.manager, add_mode, group_list, op_level)
                except Exception, ex1:
                    print_traceback()
                    LOGGER.error(to_str(ex1).replace("'", ""))
                    raise Exception(to_str(ex1))
                if error_desc:
                    raise Exception(error_desc)
예제 #22
0
 def get_defn_status(self, defn, defType, site_id, group_id, node_id):
     status=None
     if defn.scope == constants.SCOPE_DC:
         dc_defn = DBSession.query(DCDefLink).filter_by(site_id=site_id, def_id = defn.id, def_type = defType).first()
         if dc_defn:
             status = dc_defn.status
     elif defn.scope == constants.SCOPE_SP:
         sp_defn = DBSession.query(SPDefLink).filter_by(group_id=group_id, def_id = defn.id, def_type = defType).first()
         if sp_defn:
             status = sp_defn.status
     elif defn.scope == constants.SCOPE_S:
         s_defn = DBSession.query(ServerDefLink).filter_by(server_id=node_id, def_id = defn.id, def_type = defType).first()
         if s_defn:
             status = s_defn.status
     return status
예제 #23
0
    def on_remove_node(self,
                       nodeId,
                       groupId,
                       site_id,
                       auth,
                       def_manager,
                       isTransfer=False):
        op = constants.DETACH
        #If one of them is not present then return from here.
        if not groupId:
            return

        defType = def_manager.getType()
        node = DBSession.query(ManagedNode).filter_by(id=nodeId).first()

        if node:
            #Get all the definitions linked with this server
            defn_list = []
            node_defns = DBSession.query(ServerDefLink).filter_by(
                server_id=nodeId, def_type=defType)
            if node_defns:
                for eachdefn in node_defns:
                    defn = def_manager.get_defn(eachdefn.def_id)
                    if defn:
                        defn_list.append(defn)

            #delete all definition links with this server from serverdeflinks table
            if node_defns:
                for eachdefn in node_defns:
                    defn = def_manager.get_defn(eachdefn.def_id)
                    if defn:
                        #While transferring the server do not delete server definition link.
                        #while deleting node, delete all links with this server.
                        if defn.scope != constants.SCOPE_S:  #and isTransfer==False:
                            #Log the error if the definition status is out of sync. But go ahead with deleting the definition link with the server.
                            if eachdefn.status == constants.OUT_OF_SYNC:
                                LOGGER.error(
                                    "WARNING: The definition status is OUT_OF_SYNC. Still the definition linking with the server is getting deleted. server_id="
                                    + node.id + ", def_id=" + eachdefn.def_id +
                                    ", def_type=" + eachdefn.def_type +
                                    ", details=" + to_str(eachdefn.details))

                            DBSession.delete(eachdefn)

                        #While transferring the server, do not delete definition.
                        #While deleting node, delete only server level definition.
                        if defn.scope == constants.SCOPE_S and isTransfer == False:
                            DBSession.delete(defn)
예제 #24
0
 def user_login(self,args):
     try:
         username = args.get('login')
         password = args.get('password')
         
         user = DBSession.query(User).filter(User.user_name==username).first()
         if user:
             if user.status != True:
                 msg="User: "******" is not Active."
                 LOGGER.info(msg)
                 return dict(success=False,user=None,msg=msg)
             sqa_sts = user.validate_password(password)
             if not sqa_sts:
                 msg="Invalid password provided for CMS authentication."
                 LOGGER.info(msg)
                 return dict(success=False,user=None,msg=msg)
             if not len(user.groups):
                 msg="User should belongs to a group"
                 LOGGER.info(msg)
                 return dict(success=False,user=None,msg=msg)
         else:
             msg="Invalid username provided for CMS authentication."
             LOGGER.info(msg)
             return dict(success=False,user=None,msg=msg)
         return dict(success=True,user=username)
     except Exception, e:
         print "Exception", e
         LOGGER.error(e)
         return dict(success=False,user=None,msg=str(e))
예제 #25
0
    def get_server_def_list(self,site_id, group_id, def_id):
        try:
            server_def_list=[]
            node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.NETWORK))
            if node_defns:
                for eachdefn in node_defns:
                    temp_dic={}
                    if eachdefn:
                        node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first()
                        temp_dic['id']=eachdefn.server_id
                        if node:
                            temp_dic['name']=node.hostname
                        else:
                            temp_dic['name']=None

                        temp_dic['status']=eachdefn.status
                        if eachdefn.details:
                            temp_dic['details']=eachdefn.details
                        else:
                            temp_dic['details']=None

                        server_def_list.append(temp_dic)
        except Exception, ex:
            print_traceback()
            LOGGER.error(to_str(ex).replace("'",""))
            return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
예제 #26
0
    def get_entities(self, enttype_id):
        result = []

        entities = DBSession.query(Entity).filter(Entity.type_id == enttype_id)
        for ent in entities:
            result.append(dict(entid=ent.entity_id, entname=ent.name))
        return result
예제 #27
0
 def delete_user(self, userid):
     userid = int(userid)
     user = DBSession.query(User).filter(User.user_id == userid).first()
     if user is not None:
         if user.user_name in constants.DEFAULT_USERS:
             raise Exception("Can not delete " + user.user_name + " user.")
         DBHelper().delete_all(User, [], [User.user_id == userid])
예제 #28
0
파일: tasks.py 프로젝트: RDTeam/openconvirt
    def exec_task(self, auth, ctx,node_ids):
        LOGGER.debug('entered in exec task for VMAvailability task')
        strt = p_task_timing_start(AVL_LOGGER, "VMAvailability", node_ids)
        try:
            self.completed_nodes = []
            self.pending_nodes = [node_id for node_id in node_ids]
            self.exc_node_ids = [node_id for node_id in node_ids]
            index = 0
            node_id = self.get_next_node_id(index)
            while node_id is not None:
                self.pending_nodes.remove(node_id)
                node = DBSession.query(ManagedNode).filter(ManagedNode.id == node_id).first()
                index+=1
                node_id = self.get_next_node_id(index)
                if node and node.is_up():
                    self.current_node = node
                    self.start_time = datetime.utcnow()

                    try:
                        try:
                            strt1 = p_task_timing_start(AVL_LOGGER, "RefreshVMAvail", node.id)
                            node.refresh_vm_avail()
                            p_task_timing_end(AVL_LOGGER, strt1)
                        except Exception, e:
                            LOGGER.error("Error updating VM availability . Server :"+node.hostname)
                            traceback.print_exc()
                    finally:
                        self.completed_nodes.append(node.id)
        finally:
            self.check_if_hung()
            p_task_timing_end(AVL_LOGGER, strt)
예제 #29
0
파일: VM.py 프로젝트: gladmustang/cloudvirt
    def get_disk_stat(self, vm_id, filename):
        disk_detail = {}

        storage_disk = DBSession.query(StorageDisks).filter_by(
            unique_path=filename).first()
        if storage_disk:
            vm_disk = DBSession.query(VMDisks).filter_by(
                vm_id=vm_id, disk_name=filename).first()
            if vm_disk:
                disk_detail = {}
                disk_detail["DEV_TYPE"] = vm_disk.dev_type
                disk_detail["IS_LOCAL"] = self.get_remote(vm_disk.disk_name)
                disk_detail["DISK_SIZE"] = vm_disk.disk_size
                disk_detail["DISK_NAME"] = vm_disk.disk_name
                disk_detail["STORAGE_DISK_ID"] = storage_disk.id
        return disk_detail
예제 #30
0
 def check_user_updates(self,username):
     update_items = []
     dep=None
     try:
         from convirt.model import Deployment
         deps=DBSession.query(Deployment).all()
         if len(deps) > 0:
             dep=deps[0]
             user_config_filename=os.path.abspath(tg.config.get("user_config"))               
             if not os.path.exists(user_config_filename):
                 user_config_file=open(user_config_filename,"w")                   
                 user_config_file.close()                    
             user_config=PyConfig(filename=user_config_filename)
             date=user_config.get(username)               
             if date !=None:
                 p_r_date =  time.strptime(date, "%Y-%m-%d %H:%M:%S")                   
                 r_date =datetime(*p_r_date[0:5])
             else:
                 r_date=datetime.utcnow()
             edition = get_product_edition()
             (update_items,max_dt) = self.get_new_updates(dep.deployment_id,r_date, edition)
             user_config[username]=max_dt
             user_config.write()
         else:
             LOGGER.error("Deployment table is not set.Update can not proceed.")
             return
     except Exception, ex:
         traceback.print_exc()
         LOGGER.error("Error fetching updates:"+to_str(ex))
         return        
예제 #31
0
    def on_add_node(self, nodeId, groupId, site_id, auth, def_manager):
        op = constants.ATTACH

        #If one of them is not present then return from here.
        if not (nodeId or groupId):
            return

        defn_list = []
        errs = []
        sync_manager = SyncDef()
        defType = def_manager.getType()

        #Link all the definitions in the server pool to this new server node.
        sp_defns = DBSession.query(SPDefLink).filter_by(
            group_id=to_unicode(groupId))
        if sp_defns:
            for eachdefn in sp_defns:
                defn = def_manager.get_defn(eachdefn.def_id)
                if defn:
                    defn_list.append(defn)

                    #Add these default value to this link definition. These values would get changed after sync operation.
                    status = to_unicode(constants.OUT_OF_SYNC)
                    details = None
                    sync_manager.add_node_defn(nodeId, defn.id, defType,
                                               status, details)
예제 #32
0
파일: VM.py 프로젝트: gladmustang/cloudvirt
 def get_remote(self, filename):
     isLocal = True
     vm_disk = DBSession.query(VMDisks).filter_by(
         disk_name=filename).first()
     if vm_disk:
         isLocal = vm_disk.is_shared
     return isLocal
예제 #33
0
 def getSiteDefListToAssociate(self, site_id, group_id, defType):
     sdArray = []
     if site_id:
         dc_rs = DBSession.query(DCDefLink).filter_by(site_id=site_id,
                                                      def_type=defType)
         for row in dc_rs:
             sp_def = DBSession.query(SPDefLink).filter_by(
                 group_id=group_id, def_id=row.def_id,
                 def_type=defType).first()
             if not sp_def:
                 defn = DBSession.query(NwDef).filter_by(
                     id=row.def_id, scope=constants.SCOPE_DC).first()
                 if defn:
                     defn.status = row.status
                     sdArray.append(defn)
     return sdArray
예제 #34
0
    def resume_task(self, auth, ctx, appliance_entry, image_store, group_id, \
                                image_name, platform, force):

        ###TODO:disk cleanup
        img = DBSession.query(Image).filter(Image.name==image_name).first()
        if img is None:
            raise Exception(constants.INCOMPLETE_TASK)
예제 #35
0
파일: VM.py 프로젝트: gladmustang/cloudvirt
    def set_storage_disk_id(self, filename, storage_disk_id):
        try:
            file_exists = False
            vm = DBSession.query(VM).filter_by(name=self.config.name).first()
            if vm:
                for de in self.getDisks(vm.id):
                    if filename == de.disk_name:
                        file_exists = True

                if file_exists == True:
                    de_stat = self.get_disk_stat(
                        vm.id, filename)  #self.disk_stats.get(filename)
                    if de_stat is not None:
                        de_stat[self.STORAGE_DISK_ID] = storage_disk_id

                    else:
                        de_stat = {
                            self.DISK_NAME: filename,
                            self.DISK_SIZE: 0,
                            self.DISK_DEV_TYPE: self.UNKNOWN,
                            self.DISK_IS_LOCAL: not is_remote,
                            self.STORAGE_DISK_ID: storage_disk_id
                        }
        except Exception, ex:
            LOGGER.error("Error in set_storage_disk_id(): " + str(ex))
예제 #36
0
    def send_test_email(self, desc, servername, port, useremail, password, secure):
        self.sender = useremail
        Record = DBSession.query(User.email_address).filter(User.user_name == "admin").first()
        self.receivers = Record.email_address

        self.mail_server = servername
        if port:
            self.port = int(port)
        self.secure_type = int(secure)
        self.password = password
        self.subject = "Test Email"
        self.content = "\Test message Sent on " + to_str(ct_time())
        self.msg = MIMEText(self.content, self.text_subtype)
        self.msg["Subject"] = "ConVirt Test Email"

        #        SendSuccess = False
        try:
            if self.secure_type == NONSECURE:
                EmailManager().send_nonsecure(
                    servername, self.port, useremail, Record.email_address, self.msg.as_string()
                )
            elif self.secure_type == TLS:
                EmailManager().send_tls(
                    servername, self.port, useremail, password, Record.email_address, self.msg.as_string()
                )
            else:
                EmailManager().send_ssl(
                    servername, self.port, useremail, password, Record.email_address, self.msg.as_string()
                )
        except Exception, ex:
            #            traceback.print_exc()
            LOGGER.error("Error sending mails:" + to_str(ex).replace("'", ""))
            raise ex
예제 #37
0
    def metric_cache(self, node_id, metric, metric_type, rollup_type, per_type,
                     date1, date2, period):
        """
        Setting value for cache by checking the conditions
        """
        now = datetime.utcnow()
        status = False
        ent = DBSession.query(Entity).filter(Entity.entity_id == node_id).one()
        cache_key = (node_id, ent.type.name, metric, period)
        #checking cache's key is already exisiting
        if self.cache.has_key(cache_key):
            #            print "FOUNDDDDDDDDDDDDDDDDDD==",(node_id[0],ent.type.name,period,metric)
            cached_time = self.cache[cache_key].get("cached_time")
            if (now > cached_time):
                status = True
        else:
            self.check_cache_limit(self.cache)
            status = True

        if status:
            #quering the result and set it to cache
            result = self.chart_service.get_metrics_specific_value(
                [node_id], metric, metric_type, rollup_type, per_type, date1,
                date2)
            cache_time = now + timedelta(
                minutes=int(tg.config.get(constants.CACHE_TIME)))
            self.cache[cache_key] = {
                "cached_time": cache_time,
                "value": result
            }

        self.cache[cache_key]["last_accessed"] = now
        return self.cache[cache_key].get("value")
예제 #38
0
    def send_email_to_user(self, msg):
        # Query sender and password from email credential table
        # Query mail_server,port,use_secure from the email setup table for curenly  logged in user
        # receiver: to be queried from users table
        self.msg = msg
        curr_user_id = session.get('userid')
        #query users table to retrieve email address of currenlt logged in user
        userRecord = DBSession.query(User.email_address).filter(User.user_name == curr_user_id).first()
        if userRecord:
            self.receivers = userRecord.email_address
        emailservers = self.get_mailservers()
        for eachmailserver in emailservers:
            if eachmailserver:
                self.mail_server = eachmailserver['MailSetup'].mail_server
                self.port = int(eachmailserver['MailSetup'].port)
                self.secure_type = int(eachmailserver['MailSetup'].use_secure)
                self.cred_details = eachmailserver['Creds'].cred_details
                self.password = self.cred_details['password']
                self.sender = self.cred_details['user_email']
                result = False
                if (self.secure_type== NONSECURE):
                    result = EmailManager().send_nonsecure(self.mail_server,self.port,self.sender,self.receivers,msg)
                elif (self.secure_type== TLS):
                    result = EmailManager().send_tls(self.mail_server,self.port,self.sender,self.password,self.receivers,msg)
                else:
                    result = EmailManager().send_ssl(self.mail_server,self.port,self.sender,self.password,self.receivers,msg)

                if (result == True):
                    return "Test mail sent from " + eachmailserver['MailSetup'].mail_server
예제 #39
0
파일: VM.py 프로젝트: gladmustang/cloudvirt
    def __init__(self, config):
        self.config = config
        self.storage_stats = {}
        self.vm_id = None
        if self.config:
            vm = DBSession.query(VM).filter_by(
                id=to_unicode(config.id)).first()
            if vm:
                self.vm_id = vm.id
                self.storage_stats = self.get_storage_stats(vm.id)

        self.disk_stats = {}
        if self.storage_stats is not None:
            ds = self.storage_stats.get(self.DISK_STATS)
            if ds is None:
                self.storage_stats[
                    self.DISK_STATS] = self.disk_stats  # initial value of {}
            else:
                self.disk_stats = ds

        self.local_allocation = self.storage_stats.get(self.LOCAL_ALLOC)
        if not self.local_allocation:
            self.local_allocation = 0
        self.shared_allocation = self.storage_stats.get(self.SHARED_ALLOC)
        if not self.shared_allocation:
            self.shared_allocation = 0

        self.storage_disk_id = None
예제 #40
0
    def send_test_email(self, desc, servername, port, useremail, password, secure):
        self.sender = useremail
        Record = DBSession.query(User.email_address).filter(User.user_name =='admin').first()
        self.receivers =Record.email_address

        self.mail_server = servername
        if port:
            self.port = int(port)
        self.secure_type = int(secure)
        self.password = password
        self.subject = "Test Email"
        self.content="\Test message Sent on " + to_str(ct_time())
        self.msg = MIMEText(self.content, self.text_subtype)
        self.msg['Subject']= "WishCloud Test Email"
        
#        SendSuccess = False
        try:
            if (self.secure_type== NONSECURE):
                EmailManager().send_nonsecure(servername,self.port,useremail,Record.email_address,self.msg.as_string())
            elif (self.secure_type== TLS):
                EmailManager().send_tls(servername,self.port,useremail,password,Record.email_address,self.msg.as_string())
            else:
                EmailManager().send_ssl(servername,self.port,useremail,password,Record.email_address,self.msg.as_string())
        except Exception, ex:
#            traceback.print_exc()
            LOGGER.error("Error sending mails:"+to_str(ex).replace("'",""))
            raise ex
예제 #41
0
파일: tasks.py 프로젝트: RDTeam/openconvirt
    def resume_task(self, auth, ctx, appliance_entry, image_store, group_id, \
                                image_name, platform, force):

        ###TODO:disk cleanup
        img = DBSession.query(Image).filter(Image.name==image_name).first()
        if img is None:
            raise Exception(constants.INCOMPLETE_TASK)
예제 #42
0
    def get_server_def_list(self,site_id, group_id, def_id):
        try:
            server_def_list=[]
            node_defns = self.sync_manager.get_node_defns(def_id, to_unicode(constants.STORAGE))
            if node_defns:
                for eachdefn in node_defns:
                    temp_dic={}
                    if eachdefn:
                        node = DBSession.query(ManagedNode).filter_by(id=eachdefn.server_id).first()

                        temp_dic['id']=eachdefn.server_id
                        if node:
                            temp_dic['name']=node.hostname
                        else:
                            temp_dic['name']=None
                        temp_dic['status']=eachdefn.status
                        if eachdefn.details:
                            temp_dic['details']=eachdefn.details
                        else:
                            temp_dic['details']=None

                        server_def_list.append(temp_dic)
        except Exception, ex:
            LOGGER.error(to_str(ex).replace("'",""))
            return "{success: false,msg: '",to_str(ex).replace("'",""),"'}"
예제 #43
0
파일: VM.py 프로젝트: gladmustang/cloudvirt
 def get_storage_id(self, filename):
     storage_id = None
     storage_disk = DBSession.query(StorageDisks).filter_by(
         unique_path=filename).first()
     if storage_disk:
         storage_id = storage_disk.storage_id
     return storage_id
예제 #44
0
파일: VM.py 프로젝트: RDTeam/openconvirt
    def __init__(self, config):
        self.config = config
        self.storage_stats = {}
        self.vm_id = None
        if self.config:
            vm = DBSession.query(VM).filter_by(id=to_unicode(config.id)).first()
            if vm:
                self.vm_id = vm.id
                self.storage_stats = self.get_storage_stats(vm.id)

        self.disk_stats = {}
        if self.storage_stats is not None:
            ds = self.storage_stats.get(self.DISK_STATS)
            if ds is None:
                self.storage_stats[self.DISK_STATS] = self.disk_stats  # initial value of {}
            else:
                self.disk_stats = ds

        self.local_allocation = self.storage_stats.get(self.LOCAL_ALLOC)
        if not self.local_allocation:
            self.local_allocation = 0
        self.shared_allocation = self.storage_stats.get(self.SHARED_ALLOC)
        if not self.shared_allocation:
            self.shared_allocation = 0

        self.storage_disk_id = None
예제 #45
0
파일: VM.py 프로젝트: gladmustang/cloudvirt
    def get_template_info(self):
        """
        returns template name, template's current version and vm & template version match
        for imported vms template name alone is available from config file
        """
        template_info = {}
        template_info["template_name"] = self._config['image_name']
        template_info["template_version"] = '0.0'
        template_info["version_comment"] = ''

        try:
            if self.image_id is not None:
                from convirt.model.ImageStore import Image
                img = DBSession.query(Image).filter(
                    Image.id == self.image_id).one()
                template_info["template_name"] = img.name
                template_info["template_version"] = to_str(
                    self.template_version)
                template_info["version_comment"] = ''
                if self.template_version != img.version:
                    template_info["version_comment"]="*Current version of the Template is "+\
                                                    to_str(img.version)
        except Exception, e:
            LOGGER.error(e)
            pass
예제 #46
0
    def send_email_to_user(self, msg):
        # Query sender and password from email credential table
        # Query mail_server,port,use_secure from the email setup table for curenly  logged in user
        # receiver: to be queried from users table
        self.msg = msg
        curr_user_id = session.get("userid")
        # query users table to retrieve email address of currenlt logged in user
        userRecord = DBSession.query(User.email_address).filter(User.user_name == curr_user_id).first()
        if userRecord:
            self.receivers = userRecord.email_address
        emailservers = self.get_mailservers()
        for eachmailserver in emailservers:
            if eachmailserver:
                self.mail_server = eachmailserver["MailSetup"].mail_server
                self.port = int(eachmailserver["MailSetup"].port)
                self.secure_type = int(eachmailserver["MailSetup"].use_secure)
                self.cred_details = eachmailserver["Creds"].cred_details
                self.password = self.cred_details["password"]
                self.sender = self.cred_details["user_email"]
                result = False
                if self.secure_type == NONSECURE:
                    result = EmailManager().send_nonsecure(
                        self.mail_server, self.port, self.sender, self.receivers, msg
                    )
                elif self.secure_type == TLS:
                    result = EmailManager().send_tls(
                        self.mail_server, self.port, self.sender, self.password, self.receivers, msg
                    )
                else:
                    result = EmailManager().send_ssl(
                        self.mail_server, self.port, self.sender, self.password, self.receivers, msg
                    )

                if result == True:
                    return "Test mail sent from " + eachmailserver["MailSetup"].mail_server
예제 #47
0
    def metric_cache(self, node_id, metric, metric_type, rollup_type, per_type, date1, date2, period):
        """
        Setting value for cache by checking the conditions
        """
        now = datetime.utcnow()
        status = False
        ent = DBSession.query(Entity).filter(Entity.entity_id == node_id).one()
        cache_key = (node_id, ent.type.name, metric, period)
        # checking cache's key is already exisiting
        if self.cache.has_key(cache_key):
            #            print "FOUNDDDDDDDDDDDDDDDDDD==",(node_id[0],ent.type.name,period,metric)
            cached_time = self.cache[cache_key].get("cached_time")
            if now > cached_time:
                status = True
        else:
            self.check_cache_limit(self.cache)
            status = True

        if status:
            # quering the result and set it to cache
            result = self.chart_service.get_metrics_specific_value(
                [node_id], metric, metric_type, rollup_type, per_type, date1, date2
            )
            cache_time = now + timedelta(minutes=int(tg.config.get(constants.CACHE_TIME)))
            self.cache[cache_key] = {"cached_time": cache_time, "value": result}

        self.cache[cache_key]["last_accessed"] = now
        return self.cache[cache_key].get("value")
예제 #48
0
    def exec_task(self, auth, ctx,node_ids):
        LOGGER.debug('entered in exec task for VMAvailability task')
        strt = p_task_timing_start(AVL_LOGGER, "VMAvailability", node_ids)
        try:
            self.completed_nodes = []
            self.pending_nodes = [node_id for node_id in node_ids]
            self.exc_node_ids = [node_id for node_id in node_ids]
            index = 0
            node_id = self.get_next_node_id(index)
            while node_id is not None:
                self.pending_nodes.remove(node_id)
                node = DBSession.query(ManagedNode).filter(ManagedNode.id == node_id).first()
                index+=1
                node_id = self.get_next_node_id(index)
                if node and node.is_up():
                    self.current_node = node
                    self.start_time = datetime.utcnow()

                    try:
                        try:
                            strt1 = p_task_timing_start(AVL_LOGGER, "RefreshVMAvail", node.id)
                            node.refresh_vm_avail()
                            p_task_timing_end(AVL_LOGGER, strt1)
                        except Exception, e:
                            LOGGER.error("Error updating VM availability . Server :"+node.hostname)
                            traceback.print_exc()
                    finally:
                        self.completed_nodes.append(node.id)
        finally:
            self.check_if_hung()
            p_task_timing_end(AVL_LOGGER, strt)
예제 #49
0
 def get_imagegrp_summary_info(self,grp_id):
     result= []
     grp=DBSession.query(Entity).filter(Entity.entity_id==grp_id).one()
     count = len(grp.children)
     result.append(dict(name='Group Name', value=grp.name))
     result.append(dict(name='Total Templates', value=count))
     return result
예제 #50
0
 def get_node_defns(self, def_id, defType):
     defns=[]
     node_defns = DBSession.query(ServerDefLink).filter_by(def_id = def_id, def_type = to_unicode(defType))
     if node_defns:
         for eachdefn in node_defns:
             defns.append(eachdefn)
     return defns
예제 #51
0
    def get_entities(self,enttype_id):
        result= []

        entities=DBSession.query(Entity).filter(Entity.type_id==enttype_id)
        for ent in entities:
            result.append(dict(entid=ent.entity_id, entname=ent.name))
        return result
예제 #52
0
 def delete_user(self,userid):
     userid=int(userid)
     user=DBSession.query(User).filter(User.user_id==userid).first()
     if user is not None:
         if user.user_name in constants.DEFAULT_USERS:
             raise Exception("Can not delete "+user.user_name+" user.")
         DBHelper().delete_all(User,[],[User.user_id==userid])
예제 #53
0
    def save_user_det(self, login,userid, username, fname, lname, displayname, password, email, phone, status):
       
        user1=DBSession.query(User).filter(User.user_name==username).first()
        
        if user1 is None:
           if not self.check_email(email):
               return 'Email_exist'
           result = []
           user=User()  
           user.password=(password)
           user.firstname=(fname)
           user.lastname=(lname)
           user.display_name=(displayname)
           user.user_name=(username)
           user.phone_number=(phone)
           user.email_address=(email)
           user.created_by=(login)
           user.modified_by=(login)
           user.created_date=datetime.now()

           if status=="InActive":
              user.status=False

#           L=(groupids).split(',')
#           if groupids !="":
#                for i in L:
#                    group=DBSession.query(Group).filter(Group.group_id==int(i)).first()
#                    user.groups.append(group);

           DBHelper().add(user)
           return result
        else:
             result='False'
             return result
예제 #54
0
 def delete_group(self,groupid):
     groupid=int(groupid)
     group=DBSession.query(Group).filter(Group.group_id==groupid).first()
     if group is not None:
         if group.group_name in constants.DEFAULT_GROUPS:
             raise Exception("Can not delete "+group.group_name+" group.")
         DBHelper().delete_all(Group,[],[Group.group_id==groupid])
예제 #55
0
 def set_entity_details(self, ent_id):
     ent = DBSession.query(Entity).filter(
         Entity.entity_id == ent_id).first()
     if ent is not None:
         self.entity_id = ent.entity_id
         self.entity_type = ent.type_id
         self.entity_name = ent.name
예제 #56
0
    def edit_nw_defn(self,nw_id,nw_name,nw_desc):
        nw_name=(nw_name)
        nw_desc=(nw_desc)
        try:
            errmsgs=[]
            common_desc = { "Network name":nw_name,
                            "Network description":nw_desc}
            for key in common_desc:
                v = common_desc.get(key)
                if not v:
                    errmsgs.append("%s is required." % (key,))
            if errmsgs:
                if len(errmsgs)>0:
                     return {'success':False,'msg':to_str(errmsgs).replace("'","")}
            
            #Identify definition scope here. Since we do not have node here. We are checking the definition in spdeflinks table. If definition is present in the table then the definition is at pool level else it is at server level.
            # going ahead we could think of adding scope in the defintion tables so that we can directly take the scope from definition
            row = DBSession.query(SPDefLink).filter_by(def_id = nw_id).first()
            if row:
                scope = constants.SCOPE_SP
            else:
                scope = constants.SCOPE_S
            
            #Validation for duplicate name
            alldefns=None
            if scope == constants.SCOPE_S:
                node_defn = DBSession.query(ServerDefLink).filter_by(def_id = nw_id).first()
                if node_defn:
                    alldefns = DBSession.query(ServerDefLink).filter_by(server_id = node_defn.server_id, def_type = to_unicode(constants.NETWORK))
            elif scope == constants.SCOPE_SP:
                group_defn = DBSession.query(SPDefLink).filter_by(def_id = nw_id).first()
                if group_defn:
                    alldefns = DBSession.query(SPDefLink).filter_by(group_id = group_defn.group_id, def_type = to_unicode(constants.NETWORK))
            elif scope == constants.SCOPE_DC:
                group_defn = DBSession.query(DCDefLink).filter_by(def_id = nw_id).first()
                if group_defn:
                    alldefns = DBSession.query(DCDefLink).filter_by(site_id = group_defn.site_id, def_type = to_unicode(constants.NETWORK))
            
            if alldefns:
                for eachdefn in alldefns:
                    defnTemp = DBSession.query(NwDef).filter_by(id=eachdefn.def_id, name=nw_name).first()
                    if defnTemp and defnTemp.id != nw_id:
                        raise Exception("Network definition with the same name already exists")   

            defn = DBSession.query(NwDef).filter_by(id=nw_id).first()
            group = None
            auth = None
            self.sync_manager.update_defn(defn, nw_name, nw_desc, None, group, auth, constants.NETWORK, constants.ATTACH, self.nw_manager, 'UPDATE_NETWORK_DEF')
        except Exception, ex:
            print_traceback()
            LOGGER.error(to_str(ex).replace("'",""))
            return {'success':False,'msg':to_str(ex).replace("'","")}
예제 #57
0
 def get_imagestore_summary_info(self,imagestore_id):
     result= []
     imagestore_entity=DBSession.query(Entity).filter(Entity.entity_id==imagestore_id).one()
     location=self.image_store.get_store_location()
     count = len(imagestore_entity.children)
     result.append(dict(name='Template Groups', value=count))
     result.append(dict(name='Location', value=location))
     return result
예제 #58
0
    def get_boot_info(self,image_id):
        result= []
        image_instance= DBSession.query(Image).filter(Image.id==image_id).first()
        platform = image_instance.platform
        configs = image_instance.get_configs()
        vm_config = configs[0]

        ent=DBSession.query(Entity).filter(Entity.entity_id==image_id).one()
        result.append(dict(name='Boot Loader', value=vm_config['bootloader']))
        result.append(dict(name='Kernel', value= vm_config['kernel']))
        result.append(dict(name='RAMDisk', value=vm_config['ramdisk']))
        result.append(dict(name='Root Device', value=vm_config['root']))
        result.append(dict(name='Kernel Arguments', value=vm_config['extra']))
        result.append(dict(name='On Power Off', value=vm_config['on_shutdown']))
        result.append(dict(name='On Reboot', value=vm_config['on_reboot']))
        result.append(dict(name='On Crash', value=vm_config['on_crash']))
        return result
예제 #59
0
    def getSystemTasks(self,type,user):
        date2=datetime.utcnow()
        date1=date2 +timedelta(days=-1)
        if type == "COUNT":
           total=0
           task=DBSession.query(Task).filter(Task.entity_id == None).\
                filter(Task.submitted_on > date1).filter(Task.submitted_on < date2).all()

           for t in task:
               for tr in t.result:
                   status=tr.status
                   if status==2:
                      total+=1
           return total

        elif type == "DETAILS":            
            result= []            
            task=DBSession.query(Task).filter(Task.entity_id == None).\
                filter(Task.submitted_on > date1).filter(Task.submitted_on < date2).\
                order_by(Task.submitted_on.desc()).all()
            for t in task:                
                desc_tuple=t.get_short_desc()
                if desc_tuple is not None:
                    (short_desc, short_desc_params) = desc_tuple
                    tname = _(short_desc)%short_desc_params
                else:
                    tname = t.name
                username=t.user_name 
                startime=''
                endtime=''                
                for tr in t.result:
                    status=tr.status
                    ts=tr.timestamp
                    startime=to_str(ts)
                    startime=startime.split('.')
                    startime=startime[0]
                    if status==2:
                       status="Failed"
                       err=tr.results
                       tend=tr.endtime
                       endtime=to_str(tend)
                       endtime=endtime.split('.')
                       endtime=endtime[0]
                       result.append(dict(tname=tname,status=status,\
                                    st=startime,errmsg=err,user=username))
            return result