예제 #1
0
파일: Maintenance.py 프로젝트: smarkm/ovm
 def add_vm_states(self, vm_ids):
     try:
         transaction.begin()
         avail_states = DBSession.query(AvailState).filter(AvailState.entity_id.in_(vm_ids)).all()
         from stackone.model.VM import VM
         (vshs, avails) = ([], [])
         for avail in avail_states:
             vsh = DBSession.query(VMStateHistory).filter(VMStateHistory.node_id == self.node_id).filter(VMStateHistory.vm_id == avail.entity_id).first()
             if vsh is None:
                 vsh = VMStateHistory(self.node_id, avail.entity_id, avail.avail_state, avail.monit_state, avail.transient_state, avail.transient_state_time, avail.owner)
             else:
                 vsh.avail_state = avail.avail_state
                 vsh.monit_state = avail.monit_state
                 vsh.transient_state = avail.transient_state
                 vsh.transient_state_time = avail.transient_state_time
                 vsh.owner = avail.owner
                 vsh.timestamp = datetime.now()
             vshs.append(vsh)
             avails.append(avail)
         DBSession.add_all(vshs)
         DBSession.add_all(avails)
         transaction.commit()
     except Exception as e:
         LOGGER.error(to_str(e))
         DBSession.rollback()
         transaction.begin()
         traceback.print_exc()
         raise e
예제 #2
0
파일: Maintenance.py 프로젝트: smarkm/ovm
    def migrate_back_and_start_vms(self):
        LOGGER.debug('In migrate_back')
        vshs = VMStateHistory.get_vm_states(self.node_id)
        (s_flag, f_flag) = (False, False)
        for vsh in vshs:
            try:
                vm = DBSession.query(VM).filter(VM.id == vsh.vm_id).options(eagerload('current_state')).first()
                domname = vm.name
                if vm is None:
                    msg = 'Removing entries for VM ' + domname + ' from VM State History'
                    LOGGER.info(msg)
                    VMStateHistory.remove_vm_states(self.entity_id, vsh.vm_id, True)
                msg = 'Processing VM ' + domname + ' for the Node ' + self.nodename + '. '
                self.msg += '\n\n' + msg + '\n'
                self.msg += '==============================\n'
                LOGGER.info(msg)
                vm_ent = DBSession.query(Entity).filter(Entity.entity_id == vm.id).one()
                src_node = DBSession.query(ManagedNode).filter(ManagedNode.id == vm_ent.parents[0].entity_id).options(eagerload('current_state')).one()
        
                msg = 'VM ' + domname + ' is already under the Node ' + self.nodename
                self.msg += '\n' + msg
                LOGGER.info(msg)
                (was_running, was_paused) = (False, False)
                if vsh.monit_state == AvailState.MONITORING:
                    was_running = True
                    if vsh.avail_state == VM.PAUSED:
                        was_paused = True
                if was_running == False:
                    config = vm.get_config()
                    if config and config.get('auto_start_vm') == 1:
                        was_running = True
        
                if was_running == True:
                    retry_count = vm.get_attribute_value(constants.retry_count, 3)
                    wait_interval = vm.get_attribute_value(constants.wait_interval, 3)
                    msg = 'Trying to start the VM ' + domname
                    self.msg += '\n' + msg
                    LOGGER.info(msg)
                    if self.dom_start(self.auth, vm, self.node, retry_count, wait_interval):
                        msg = 'Successfully started VM ' + domname
                        self.msg += '\n' + msg
                        LOGGER.info(msg)
                        s_flag = True
                        if was_paused:
                            msg = 'Trying to pause VM ' + domname
                            self.msg += '\n' + msg
                            LOGGER.info(msg)
                            if self.dom_pause(self.auth,vm,self.node) == True:
                                s_flag = True
                                msg = 'Successfully paused VM ' + domname
                                self.msg += '\n' + msg
                                LOGGER.info(msg)
                            else:
                                f_flag = True
                                msg = 'Failed to pause VM ' + domname
                                self.msg += '\n' + msg
                                LOGGER.info(msg)
                    else:
                        f_flag = True
                        msg = 'Failed to start VM ' + domname
                        self.msg += '\n' + msg
                        LOGGER.info(msg)
                else:
                    s_flag = True
                msg = 'Removing entries for VM ' + domname + ' from VM State History'
                LOGGER.info(msg)
                VMStateHistory.remove_vm_states(self.entity_id, vsh.vm_id)
                #CONTINUE_LOOP
                if self.is_down(vm):
                    msg = 'Cold Mirgation of VM ' + domname + ' to the Node ' + self.nodename + '. '
                    self.msg += '\n' + msg
                    LOGGER.info(msg)
                    if self.dom_migrate(self.auth, vm, src_node, self.node):
                        f_flag = True
                        msg = 'Migrating VM ' + domname + ' back to the node ' + self.nodename + ' failed.'
                        self.msg += '\n' + msg
                        LOGGER.info(msg)
                        config = vm.get_config()
                        if config and config.get('auto_start_vm') == 1:
                            LOGGER.info('auto_start_vm is 1 for Down VM %s' % domname)
                            retry_count = vm.get_attribute_value(constants.retry_count, 3)
                            wait_interval = vm.get_attribute_value(constants.wait_interval, 3)
                            if self.dom_start(self.auth, vm, self.node, retry_count, wait_interval):
                                msg = 'Successfully started Down VM ' + domname
                                s_flag = True
                                self.msg += '\n' + msg
                                LOGGER.info(msg)
                            else:
                                f_flag = True
                                msg = 'Failed to start Down VM ' + domname
                                self.msg += '\n' + msg
                                LOGGER.info(msg)
                    else:
                        f_flag = True
                        self.dom_migrate(self.auth, vm, src_node, self.node)
                        f_flag = True
                        msg = 'Migrating VM ' + domname + ' back to the node ' + self.nodename + ' failed.'
                        self.msg += '\n' + msg
                        LOGGER.info(msg)
                else:
                    msg = 'Live Mirgation of VM ' + domname + ' to the Node ' + self.nodename + '. '
                    self.msg += '\n' + msg
                    LOGGER.info(msg)
                    if self.dom_migrate(self.auth, vm, src_node, self.node, 'true'):
                        s_flag = True
                        msg = 'Migrating VM ' + domname + ' back to the node ' + self.nodename + ' Complete.'
                        self.msg += '\n' + msg
                        LOGGER.info(msg)
                    else:
                        f_flag = True
                        msg = 'Migrating VM ' + domname + ' back to the node ' + self.nodename + ' Failed.'
                        self.msg += '\n' + msg
                        LOGGER.info(msg)
                msg = 'Removing entries for VM ' + domname + ' from VM State History'
                LOGGER.info(msg)
            except Exception as e:
                f_flag = True
                traceback.print_exc()

            self.msg += '\n\nFinished processing VM\n'
            self.msg += '==============================\n'
        if s_flag == True:
            self.status = self.SUCCESS
        if f_flag == True:
            self.status = self.FAILURE
        if s_flag == True and f_flag == True:
            self.status = self.PARTIAL
        if len(vshs) == 0:
            self.status = self.SUCCESS
        msg = 'Finished processing VMs with the Node ' + self.nodename + '. '
        self.msg += '\n' + msg
        LOGGER.info(msg)
        return True