Exemple #1
0
def grant_vnc_access(vm_id):
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()
    if active_vnc > 0:
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY :
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                create_vnc_mapping_in_nat(vm_id)
                
                vnc_info = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).select()
                if vnc_info:
                    vm_users = []
                    for user in db(db.user_vm_map.vm_id == vm_id).select(db.user_vm_map.user_id):
                        vm_users.append(user['user_id'])
    
                    send_email_vnc_access_granted(vm_users, 
                                                  vnc_info[0].vnc_server_ip, 
                                                  vnc_info[0].vnc_source_port, 
                                                  vnc_info[0].vm_id.vm_name, 
                                                  vnc_info[0].time_requested)
                else: 
                    raise
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                msg = 'Some Error Occurred. Please try later'
                log_exception()
                pass
    return msg
Exemple #2
0
def host_power_operation():
    logger.debug("\nIn host power operation function\n-----------------------------------\n")
    livehosts = current.db(current.db.host.status == HOST_STATUS_UP).select()
    freehosts=[]
    try:
        
        for host_data in livehosts:
            if not has_running_vm(host_data.host_ip.private_ip):
                freehosts.append(host_data.host_ip.private_ip)
        freehostscount = len(freehosts)
        if(freehostscount == 2):
            logger.debug("Everything is Balanced. Green Cloud :)")
        elif(freehostscount < 2):
            logger.debug("Urgently needed "+str(2-freehostscount)+" more live hosts.")
            newhosts = current.db(current.db.host.status == HOST_STATUS_DOWN).select()[0:(2-freehostscount)] #Select only Shutoff hosts
            for host_data in newhosts:
                logger.debug("Sending magic packet to "+host_data.host_name)
                host_power_up(host_data)
        elif(freehosts > 2):
            logger.debug("Sending shutdown signal to total "+str(freehostscount-2)+" no. of host(s)")
            extrahosts=freehosts[2:]
            for host_data in extrahosts:
                logger.debug("Moving any dead vms to first running host")
                migrate_all_vms_from_host(host_data.host_ip.private_ip)
                logger.debug("Sending kill signal to " + host_data.host_ip.private_ip)
                commands.getstatusoutput("ssh root@" + host_data.host_ip.private_ip + " shutdown -h now")
                host_data.update_record(status=HOST_STATUS_DOWN)
    except:
        log_exception()
    return
def process_purge_shutdownvm():

    logger.info("ENTERING PURGE SHUTDOWN VM ........") 
    vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days")

    try:
        # Fetch all the VM's which are locked and whose delete warning date is not null. 
        for vm_data in db(db.vm_data.locked == True and db.vm_data.delete_warning_date!=None).select(db.vm_data.ALL):
            daysDiff=0
            daysDiff=(get_datetime()-vm_data.delete_warning_date).days
            if(daysDiff >=0 ):
                for vm_details in db(db.vm_event_log.vm_id==vm_data.id).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                    daysDiff=(get_datetime()-vm_details.timestamp).days
                    if(vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)):
                        logger.info("Need to delete the VM ID:"+str(vm_data.id)) 
                        add_vm_task_to_queue(vm_data.id,VM_TASK_DELETE)
                        # make an entry in task queue so that scheduler can pick up and delete the VM.
                    else:
                        logger.info("No need to delete the VM ID:"+str(vm_data.id)+" as it is in use now. ")
                        db(db.vm_data.id == vm_details.vm_id).update(locked='F',delete_warning_date=None)
            else:
                logger.info("No need to process shutdown VM :"+str(vm_data.id))
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PURGE SHUTDOWN VM ........")
Exemple #4
0
def install_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In install_container() function...")
        cont_details = current.db.container_data[cont_id]
        logger.debug(cont_details)
    #     user_name = current.auth.user.username
        mount_hostvolumes = '/root/user/'+ cont_details.owner_id.username +'/'+cont_details.name
        memory = str(cont_details.RAM)+'M'
        
        ret = install_container(
                                cont_details.name,
                                cont_details.image_id,
                                mount_hostvolumes,
                                cont_details.env_vars,
                                cont_details.vCPU,
                                memory,
                                True);
        logger.debug(ret)
        message = ("Container %s created successfully." % cont_details.name)
        cont_details.update_record(UUID=ret['Id'], status=current.VM_STATUS_RUNNING)
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)                    
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
def process_snapshot_vm(snapshot_type, vm_id = None, frequency=None):
    """
    Handles snapshot task
    Invoked when scheduler runs task of type 'snapshot_vm'"""
    
    logger.debug("ENTERING SNAPSHOT VM TASK........Snapshot Type: %s"% snapshot_type)
    try:
        if snapshot_type == SNAPSHOT_SYSTEM:
            params={'snapshot_type' : frequency, 'vm_id' : vm_id}
            task[VM_TASK_SNAPSHOT](params)

        else:    
            vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED, VM_STATUS_SHUTDOWN)).select()
            for vm_data in vms:
                flag = vm_data.snapshot_flag

                if(snapshot_type & flag):
                    logger.debug("snapshot_type" + str(snapshot_type))
                    vm_scheduler.queue_task(TASK_SNAPSHOT, 
                                            group_name = 'snapshot_task', 
                                            pvars = {'snapshot_type' : SNAPSHOT_SYSTEM, 'vm_id' : vm_data.id, 'frequency' : snapshot_type}, 
                                            start_time = request.now, 
                                            timeout = 60 * MINUTES)
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING SNAPSHOT VM TASK........")
def revert_and_resume(vm_id, vm_identity):

    try:
        sys_snapshot = current.db.snapshot(vm_id=vm_id, type=SNAPSHOT_SYSTEM)
        if sys_snapshot:
            revert({'vm_id': vm_id, 'snapshot_id': sys_snapshot.id})
            logger.debug('Snapshot of %s reverted from %s successfully' %
                         (vm_identity, sys_snapshot.snapshot_name))
            delete_snapshot({'vm_id': vm_id, 'snapshot_id': sys_snapshot.id})
            logger.debug('Snapshot %s deleted successfully' %
                         (sys_snapshot.snapshot_name))

        event_data = current.db.vm_event_log(vm_id=vm_id,
                                             new_value='System Shutdown')
        #         event_data = current.db((current.db.vm_event_log.vm_id == vm_id) & (current.db.vm_event_log.new_value == 'System Shutdown')).select(current.db.vm_event_log.ALL)
        print event_data
        if event_data:
            start({'vm_id': vm_id})
            if int(event_data.old_value) == current.VM_STATUS_SUSPENDED:
                suspend({'vm_id': vm_id})

            del current.db.vm_event_log[event_data.id]
        current.db.commit()
    except:
        log_exception()
        pass
Exemple #7
0
def process_snapshot_vm(snapshot_type, vm_id = None, frequency=None):
    """
    Handles snapshot task
    Invoked when scheduler runs task of type 'snapshot_vm'"""
    
    logger.debug("ENTERING SNAPSHOT VM TASK........Snapshot Type: %s"% snapshot_type)
    try:
        if snapshot_type == SNAPSHOT_SYSTEM:
            params={'snapshot_type' : frequency, 'vm_id' : vm_id}
            task[VM_TASK_SNAPSHOT](params)

        else:    
            vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED, VM_STATUS_SHUTDOWN)).select()
            for vm_data in vms:
                flag = vm_data.snapshot_flag

                if(snapshot_type & flag):
                    logger.debug("snapshot_type" + str(snapshot_type))
                    vm_scheduler.queue_task(TASK_SNAPSHOT, 
                                            group_name = 'snapshot_task', 
                                            pvars = {'snapshot_type' : SNAPSHOT_SYSTEM, 'vm_id' : vm_data.id, 'frequency' : snapshot_type}, 
                                            start_time = request.now, 
                                            timeout = 60 * MINUTES)
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING SNAPSHOT VM TASK........")
Exemple #8
0
def host_power_down(host_data):

    try:
        host_ip = host_data.host_ip.private_ip
        if host_data.host_type == HOST_TYPE_VIRTUAL:
            output = execute_remote_cmd(host_ip, 'root', 'virsh destroy' + get_host_name[host_ip])
        else:
            setup_type = config.get("GENERAL_CONF","setup_type")
            if setup_type == "nic":
                ucs_management_ip = config.get("UCS_MANAGER_DETAILS","ucs_management_ip")
                logger.debug(ucs_management_ip)
                logger.debug(type(ucs_management_ip))  
                ucs_user = config.get("UCS_MANAGER_DETAILS","ucs_user")
                logger.debug(ucs_user)
                logger.debug(type(ucs_user))
                ucs_password = config.get("UCS_MANAGER_DETAILS","ucs_password")
                logger.debug(ucs_password) 
                host_ip=str(host_ip)
                server_num=host_ip.split('.')
                ucs_server_num=str(int(server_num[3])-20)
                logger.debug("ucs server number is :"+ucs_server_num)
                ssh = paramiko.SSHClient()
                ssh.load_system_host_keys()
                ssh.connect(ucs_management_ip,username=ucs_user,password=ucs_password)                
                stdin, stdout,stderr=ssh.exec_command("scope org / ;  scope org IIT-Delhi ; scope service-profile Badal-Host"+ str(ucs_server_num) + " ; power down ; commit-buffer")  # @UnusedVariable
                output=stdout.readlines()
                if len(output)!= 0:
                    logger.debug("Host not powered up . Command not run properly ")
            else:                        
                output = execute_remote_cmd(host_ip, 'root', 'init 0')
        logger.debug(str(output) + ' ,Host shut down successfully !!!')
    except:
        log_exception()
Exemple #9
0
def install_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In install_container() function...")
        cont_details = current.db.container_data[cont_id]
        logger.debug(cont_details)
#         user_name = current.auth.user.username
#         mount_hostvolumes = '/root/user/'+ cont_details.owner_id.username +'/'+cont_details.name
        memory = str(cont_details.RAM)+'M'
        
        ret = install_container(
                                cont_details.name,
                                cont_details.image_id,
                                
                                cont_details.env_vars,
                                cont_details.vCPU,
                                memory,
                                True)
        logger.debug(ret)
        message = ("Container %s created successfully." % cont_details.name)
        cont_details.update_record(UUID=ret['Id'], status=current.VM_STATUS_RUNNING)
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)                    
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
Exemple #10
0
def grant_vnc_access(vm_id):
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()
    if active_vnc > 0:
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY :
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                create_vnc_mapping_in_nat(vm_id)
                
                vnc_info = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).select()
                if vnc_info:
                    vm_users = []
                    for user in db(db.user_vm_map.vm_id == vm_id).select(db.user_vm_map.user_id):
                        vm_users.append(user['user_id'])
    
                    send_email_vnc_access_granted(vm_users, 
                                                  vnc_info[0].vnc_server_ip, 
                                                  vnc_info[0].vnc_source_port, 
                                                  vnc_info[0].vm_id.vm_name, 
                                                  vnc_info[0].time_requested)
                else: 
                    raise
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                msg = 'Some Error Occurred. Please try later'
                log_exception()
                pass
    return msg
Exemple #11
0
def check_vm_snapshot_sanity(vm_id):
    """
    Checks if the snapshot information of VM is in sync with actual snapshots of the VM.
    """
    vm_data = db.vm_data[vm_id]
    snapshot_check = []
    try:
        conn = libvirt.openReadOnly('qemu+ssh://root@' +
                                    vm_data.host_id.host_ip.private_ip +
                                    '/system')
        domain = conn.lookupByName(vm_data.vm_identity)

        dom_snapshot_names = domain.snapshotListNames(0)
        logger.debug(dom_snapshot_names)
        conn.close()

        snapshots = db(db.snapshot.vm_id == vm_id).select()
        for snapshot in snapshots:
            if snapshot.snapshot_name in dom_snapshot_names:
                snapshot_check.append({
                    'snapshot_name':
                    snapshot.snapshot_name,
                    'snapshot_type':
                    get_snapshot_type(snapshot.type),
                    'message':
                    'Snapshot present',
                    'operation':
                    'None'
                })
                dom_snapshot_names.remove(snapshot.snapshot_name)
            else:
                snapshot_check.append({
                    'snapshot_id':
                    snapshot.id,
                    'snapshot_name':
                    snapshot.snapshot_name,
                    'snapshot_type':
                    get_snapshot_type(snapshot.type),
                    'message':
                    'Snapshot not present',
                    'operation':
                    'Undefined'
                })

        for dom_snapshot_name in dom_snapshot_names:
            snapshot_check.append({
                'vm_name': vm_data.vm_identity,
                'snapshot_name': dom_snapshot_name,
                'snapshot_type': 'Unknown',
                'message': 'Orphan Snapshot',
                'operation': 'Orphan'
            })

    except Exception:
        log_exception()
    logger.debug(snapshot_check)
    return (vm_data.id, vm_data.vm_name, snapshot_check)
Exemple #12
0
def create_public_ip_mapping_in_nat(vm_id):
    
    vm_data = current.db.vm_data[vm_id]
    try:
        create_mapping(vm_data.public_ip, vm_data.private_ip)
        
        logger.debug("Updating DB")
        current.db(current.db.public_ip_pool.public_ip == vm_data.public_ip).update(vm_id = vm_id)
    except:
        log_exception()
def process_loadbalancer():
    logger.info("ENTERING PROCESS LOADBALANCER VM ........")
    try:
        (host_list,vm_list)=find_host_and_guest_list()
        loadbalance_vm(host_list,vm_list) 
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING PROCESS LOADBALANCER VM......")
Exemple #14
0
def process_loadbalancer():
    logger.info("ENTERING PROCESS LOADBALANCER VM ........")
    try:
        (host_list, vm_list) = find_host_and_guest_list()
        loadbalance_vm(host_list, vm_list)
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING PROCESS LOADBALANCER VM......")
def remove_public_ip_mapping_from_nat(vm_id):

    vm_data = current.db.vm_data[vm_id]
    try:
        remove_mapping(vm_data.public_ip, vm_data.private_ip)

        # Update DB
        logger.debug("Updating DB")
        vm_data.update_record(public_ip=None)
    except:
        log_exception()
def process_unusedvm():
    logger.info("ENTERING PROCESS UNUSED VM ........")
    try:
        process_shutdown_unusedvm()
        process_purge_shutdownvm()
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PROCESS UNUSED VM......")
Exemple #17
0
def remove_public_ip_mapping_from_nat(vm_id):
    
    vm_data = current.db.vm_data[vm_id]
    try:
        remove_mapping(vm_data.public_ip, vm_data.private_ip)
        
        # Update DB 
        logger.debug("Updating DB")
        vm_data.update_record(public_ip = None)
    except:
        log_exception()
def create_public_ip_mapping_in_nat(vm_id):

    vm_data = current.db.vm_data[vm_id]
    try:
        create_mapping(vm_data.public_ip, vm_data.private_ip)

        logger.debug("Updating DB")
        current.db(
            current.db.public_ip_pool.public_ip == vm_data.public_ip).update(
                vm_id=vm_id)
    except:
        log_exception()
Exemple #19
0
def snapshot_and_destroy(vm_id, vm_identity, vm_status):
    try:
        logger.debug('Inside snapshot_and_destroy for %s' %(vm_identity))
        if vm_status == current.VM_STATUS_RUNNING:
            snapshot({'vm_id':vm_id, 'snapshot_type':SNAPSHOT_SYSTEM})
            logger.debug('Snapshot of %s completed successfully' %(vm_identity))

        destroy({'vm_id':vm_id})
        logger.debug('%s destroyed successfully' %(vm_identity))
    except:
        log_exception()
        pass
Exemple #20
0
def backup_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In backup_container() function...")
        cont_details = current.db.container_data[cont_id]
        container = Container(cont_details.UUID)
        container.backup(cont_details.owner_id.username)
        message = "Container commited successfully."
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)                    
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
Exemple #21
0
def remove_vnc_mapping_from_nat(vm_id):
    vm_data = current.db.vm_data[vm_id]
    vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip")
    host_ip = vm_data.host_id.host_ip.private_ip
    vnc_port = vm_data.vnc_port

    try:
        remove_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port)
        logger.debug("Updating DB")
        current.db(current.db.vnc_access.vm_id == vm_id).update(status = VNC_ACCESS_STATUS_INACTIVE)
    except:
        log_exception()
def remove_vnc_mapping_from_nat(vm_id):
    vm_data = current.db.vm_data[vm_id]
    vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip")
    host_ip = vm_data.host_id.host_ip.private_ip
    vnc_port = vm_data.vnc_port

    try:
        remove_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port)
        logger.debug("Updating DB")
        current.db(current.db.vnc_access.vm_id == vm_id).update(
            status=VNC_ACCESS_STATUS_INACTIVE)
    except:
        log_exception()
def snapshot_and_destroy(vm_id, vm_identity, vm_status):
    try:
        logger.debug('Inside snapshot_and_destroy for %s' % (vm_identity))
        if vm_status == current.VM_STATUS_RUNNING:
            snapshot({'vm_id': vm_id, 'snapshot_type': SNAPSHOT_SYSTEM})
            logger.debug('Snapshot of %s completed successfully' %
                         (vm_identity))

        destroy({'vm_id': vm_id})
        logger.debug('%s destroyed successfully' % (vm_identity))
    except:
        log_exception()
        pass
def host_sanity_check():
    """
    Handles periodic Host sanity check
    Invoked when scheduler runs task of type 'host_sanity'"""
    
    logger.info("ENTERNING HOST SANITY CHECK........")
    try:
        host_status_sanity_check()
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING HOST SANITY CHECK........")
def check_vnc_access():
    """
    Clears all timed out VNC Mappings
    Invoked when scheduler runs task of type 'vnc_access'"""
    
    logger.info("ENTERNING CLEAR ALL TIMEDOUT VNC MAPPINGS")
    try:
        clear_all_timedout_vnc_mappings()
    except:
        log_exception()
        pass
    finally: 
        logger.debug("EXITING CLEAR ALL TIMEDOUT VNC MAPPINGS........")
Exemple #26
0
def delete_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In delete_container() function...")
        cont_details = current.db.container_data[cont_id]
        container = Container(cont_details.UUID)
        container.remove()
        del current.db.container_data[cont_id]
        message = "Container deleted successfully."
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
Exemple #27
0
def suspend_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In suspend_container() function...")
        cont_details = current.db.container_data[cont_id]
        container = Container(cont_details.UUID)
        container.pause()
        cont_details.update_record(status=current.VM_STATUS_SUSPENDED)
        message = "Container suspended successfully."
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
Exemple #28
0
def resume_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In resume_container() function...")
        cont_details = current.db.container_data[cont_id]
        container = Container(cont_details.UUID,setProp=False)
        container.resume()
        cont_details.update_record(status=current.VM_STATUS_RUNNING)
        message = "Container resumed successfully."
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)                    
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
Exemple #29
0
def delete_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In delete_container() function...")
        cont_details = current.db.container_data[cont_id]
        container = Container(cont_details.UUID);
        container.remove()
        del current.db.container_data[cont_id]
        message = "Container deleted successfully."
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)                    
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
def vm_sanity_check():
    """
    Handles periodic VM sanity check
    Invoked when scheduler runs task of type 'vm_sanity'"""
    
    logger.info("ENTERNING VM SANITY CHECK........")
    try:
        check_vm_sanity()
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING VM SANITY CHECK........")
Exemple #31
0
def resume_cont(parameters):
    try:
        cont_id = parameters['cont_id']
        logger.debug("In resume_container() function...")
        cont_details = current.db.container_data[cont_id]
        container = Container(cont_details.UUID);
        container.resume()
        cont_details.update_record(status=current.VM_STATUS_RUNNING)
        message = "Container resumed successfully."
        return (current.TASK_QUEUE_STATUS_SUCCESS, message)                    
    except:
        logger.debug("Task Status: FAILED Error: %s " % log_exception())
        return (current.TASK_QUEUE_STATUS_FAILED, log_exception())
Exemple #32
0
def check_vnc_access():
    """
    Clears all timed out VNC Mappings
    Invoked when scheduler runs task of type 'vnc_access'"""
    
    logger.info("ENTERNING CLEAR ALL TIMEDOUT VNC MAPPINGS")
    try:
        clear_all_timedout_vnc_mappings()
    except:
        log_exception()
        pass
    finally: 
        logger.debug("EXITING CLEAR ALL TIMEDOUT VNC MAPPINGS........")
Exemple #33
0
def vm_sanity_check():
    """
    Handles periodic VM sanity check
    Invoked when scheduler runs task of type 'vm_sanity'"""
    
    logger.info("ENTERNING VM SANITY CHECK........")
    try:
        check_vm_sanity()
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING VM SANITY CHECK........")
Exemple #34
0
def host_sanity_check():
    """
    Handles periodic Host sanity check
    Invoked when scheduler runs task of type 'host_sanity'"""
    
    logger.info("ENTERNING HOST SANITY CHECK........")
    try:
        host_status_sanity_check()
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING HOST SANITY CHECK........")
Exemple #35
0
def has_running_vm(host_ip):
    found=False
    if not check_host_status(host_ip):
        logger.debug("Host %s is down" %(host_ip))
        return False
    try:
        domains = get_host_domains(host_ip)
        for dom in domains:
            logger.debug("Checking "+str(dom.name()))
            if(dom.info()[0] != VIR_DOMAIN_SHUTOFF):
                found=True
    except:
        log_exception()
    return found
Exemple #36
0
def process_unusedvm():
    """
    Purge/shutdown the unused VM's
    """
    logger.info("ENTERING PROCESS UNUSED VM ........")
    try:
        process_shutdown_unusedvm()
        process_purge_shutdownvm()
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PROCESS UNUSED VM......")
Exemple #37
0
def grant_novnc_access(vm_id):

    msg = ""
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (
        db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()

    if active_vnc > 0:
        vm_data = db(db.vnc_access.vm_id == vm_id).select().first()
        token = vm_data.token
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id)
                       & (db.vnc_access.time_requested >
                          (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY:
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                f = os.popen('openssl rand -hex 10')
                token = f.read()
                token = token.split("\n")
                token = token[0]
                create_novnc_mapping(vm_id, token)
                vm_data = db(db.vm_data.id == vm_id).select().first()
                host_ip = vm_data.host_id.host_ip.private_ip
                vnc_port = vm_data.vnc_port
                vnc = str(vnc_port)
                file_token = str(token) + ":" + " " + str(host_ip) + ":" + str(
                    vnc) + "\n"
                myfile = get_file_append_mode("/home/www-data/token.list")
                myfile.write(file_token)
                command = "ps -ef | grep websockify|awk '{print $2}'"
                port = config.get("NOVNC_CONF", "port")
                server_ip = config.get("NOVNC_CONF", "server_ip")
                return_value = execute_remote_cmd(server_ip, 'root', command)
                return_value = return_value.split()
                if len(return_value) <= 2:
                    command = "./noVNC/utils/websockify/run --web /root/noVNC --target-config /home/www-data/token.list " + str(
                        server_ip) + ":" + str(port) + " > /dev/null 2>&1 &"
                    return_value = execute_remote_cmd(server_ip, 'root',
                                                      command)
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                logger.debug('Some Error Occurred. Please try later')
                log_exception()
                pass

    logger.debug(msg)
    return token
def process_vmdaily_checks():
    """
    Function will check for the shutdown VM's and sends email to the user"""
    
    logger.info("Entering VM's Daily Checks........")

    try:
        process_sendwarning_unusedvm()
        process_sendwarning_shutdownvm()
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING VM DAILY CHECKS........")
Exemple #39
0
def process_vmdaily_checks():
    """
    Check for the shutdown VM's and unused VM's and sends warning email to the user
    """
    logger.info("Entering VM's Daily Checks........")

    try:
        process_sendwarning_unusedvm()
        process_sendwarning_shutdownvm()
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING VM DAILY CHECKS........")
Exemple #40
0
def exception_handler():
    msg = log_exception('Exception: ') 
    if is_moderator():
        error = msg
    else:
        error = 'Some error has occurred'
    redirect(URL(c='default', f='error',vars={'error':error}))    
Exemple #41
0
def exception_handler():
    msg = log_exception('Exception: ')
    if is_moderator():
        error = msg
    else:
        error = 'Some error has occurred'
    redirect(URL(c='default', f='error', vars={'error': error}))
Exemple #42
0
    def upload_file(self, uploadedFile):
        '''
        Upload a file via AJAX request, the file will be created in temporary directory and the full path will
        be sent back as JSON response.

        @param self: the class instance itself
        @param uploadedFile: the uploaded file
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.upload_file()'
        helper.log_entrance(self.logger, signature, {'uploadedFile' : uploadedFile})
        
        try:
            temp_dir = os.path.join(TEMP_DATA_DIRECTORY, uuid.uuid1().hex)
            os.mkdir(temp_dir)
            filepath = os.path.join(temp_dir, uploadedFile.filename)
            
            # We must use 'wb' mode here in case the uploaded file is not ascii format.
            with open(filepath, 'wb') as output:
                while True:
                    data = uploadedFile.file.read(1024)
                    if data:
                        output.write(data)
                    else:
                        break
            try:
                with open(filepath, 'r') as input:
                    points, panels = self.determine_points_panels(input)
                    ret = {
                        'filepath' : filepath,
                        'points' : points,
                        'panels' : panels
                    }
                    helper.log_exit(self.logger, signature, [ret])
                    return ret
            except Exception as e:
                helper.log_exception(self.logger, signature, e)
                ret = { 'filepath' : filepath }
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
Exemple #43
0
    def upload_file(self, uploadedFile):
        '''
        Upload a file via AJAX request, the file will be created in temporary directory and the full path will
        be sent back as JSON response.

        @param self: the class instance itself
        @param uploadedFile: the uploaded file
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.upload_file()'
        helper.log_entrance(self.logger, signature, {'uploadedFile' : uploadedFile})
        
        try:
            temp_dir = os.path.join(TEMP_DATA_DIRECTORY, uuid.uuid1().hex)
            os.mkdir(temp_dir)
            filepath = os.path.join(temp_dir, uploadedFile.filename)
            
            # We must use 'wb' mode here in case the uploaded file is not ascii format.
            with open(filepath, 'wb') as output:
                while True:
                    data = uploadedFile.file.read(1024)
                    if data:
                        output.write(data)
                    else:
                        break
            try:
                with open(filepath, 'r') as input:
                    points, panels = self.determine_points_panels(input)
                    ret = {
                        'filepath' : filepath,
                        'points' : points,
                        'panels' : panels
                    }
                    helper.log_exit(self.logger, signature, [ret])
                    return ret
            except Exception as e:
                helper.log_exception(self.logger, signature, e)
                ret = { 'filepath' : filepath }
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
def process_sendwarning_unusedvm():

    logger.info("Entering send warning to unused VM........")

    try:
        ''' performing daily checks for network usage '''
        vmCPUThreshold  = config.get("GENERAL_CONF", "cpu_threshold_limit")
        vmreadThreshold = config.get("GENERAL_CONF", "nwRead_threshold_limit")
        vmwriteThreshold = config.get("GENERAL_CONF", "nwWrite_threshold_limit")

        thresholdcontext = dict(CPUThreshold=vmCPUThreshold,
                                ReadThreshold=vmreadThreshold,
                                WriteThreshold=vmwriteThreshold)

        logger.info("checking network usage with threshold values as CPUThreshold is:"+str(thresholdcontext['CPUThreshold'])+" WriteThreshold is :"+str(thresholdcontext['WriteThreshold'])+" ReadThreshold is :"+ str(thresholdcontext['ReadThreshold']))

        vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED) & (db.vm_data.shutdown_warning_date == None) & (db.vm_data.start_time < (get_datetime() - timedelta(days=20)))).select()
        '''check vm should have been created 20days back'''

        for vm in vms:
            logger.info("comparing threshold for the vm "+ str(vm.vm_identity))
            send_email=0
            retVal=compare_rrd_data_with_threshold(vm.vm_identity,thresholdcontext)
            if(retVal == True): 
                vm_users = []
                vm_name  = ""
                for user in db((db.user_vm_map.vm_id == vm.id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.shutdown_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name):
                    send_email=1
                    vm_users.append(user.user_vm_map.user_id)
                    vm_name=user.vm_data.vm_name

                if (send_email == 1):
                    vm_shutdown_time=send_email_vm_warning(VM_TASK_WARNING_SHUTDOWN,vm_users,vm_name,'')
                    logger.debug("Mail sent for vm_name:"+str(vm_name)+"|shutdown time returned from the function:"+ str(vm_shutdown_time))
                    db(db.vm_data.id == vm.id).update(shutdown_warning_date=vm_shutdown_time)
                    db.commit()
                else:
                    logger.debug("Warning Email to use the VM has already been sent to VM_ID:"+str(vm.id))
            else:
                logger.info("VM:"+str(vm.id)+" is in use.. no need to send shutdown warning mail ...")
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING send warning to unused VM........")
Exemple #45
0
def migrate_all_vms_from_host(host_ip):

    try:
        domains = get_host_domains(host_ip)
        for dom in domains:
            vm_details = current.db.vm_data(vm_identity=dom.name())
            if vm_details:
                if dom.info()[0] == VIR_DOMAIN_SHUTOFF:    #If the vm is in Off state, move it to host1
                    logger.debug("Moving "+str(dom.name())+" to another host")
                    add_migrate_task_to_queue(vm_details['id'])
                elif dom.info()[0] in (VIR_DOMAIN_PAUSED, VIR_DOMAIN_RUNNING):
                    logger.debug("Moving running vm "+str(dom.name())+" to appropriate host in queue")
                    add_migrate_task_to_queue(vm_details['id'], live_migration="on")
        
    except:
        log_exception()
    return
Exemple #46
0
    def postprocess(self, json_str):
        '''
        Run post-processing.

        @param self: the class instance itself
        @param json_str: the json string posted by client
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.postprocess()'
        helper.log_entrance(self.logger, signature, {'json_str': json_str})
        # Set session variable postprocess_done to False by default.
        cherrypy.session['postprocess_done'] = False
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call post-processing service
                ret = {
                    'log' : services.postprocess(cherrypy.session['simulation_dir'],
                                                 self.construct_postprocess_parameters(json_str), self.queue)
                }
                cherrypy.session['postprocess_done'] = True
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
Exemple #47
0
    def postprocess(self, json_str):
        '''
        Run post-processing.

        @param self: the class instance itself
        @param json_str: the json string posted by client
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.postprocess()'
        helper.log_entrance(self.logger, signature, {'json_str': json_str})
        # Set session variable postprocess_done to False by default.
        cherrypy.session['postprocess_done'] = False
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call post-processing service
                ret = {
                    'log' : services.postprocess(cherrypy.session['simulation_dir'],
                                                 self.construct_postprocess_parameters(json_str))
                }
                cherrypy.session['postprocess_done'] = True
                helper.log_exit(self.logger, signature, [ret])
                return ret
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
Exemple #48
0
def process_unusedvm_purge():

    logger.info("ENTERING PURGE UNUSED VM ........") 

    try:
        # Fetch all the VM's which are locked and whose delete warning date=today. 
        for vm_data in db(db.vm_data.locked == True).select(db.vm_data.ALL):
            for vm_details in db(db.vm_event_log.vm_id==vm_data.id).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                daysDiff=(get_datetime()-vm_data.delete_warning_date).days
                if(vm_details.new_value == "Shutdown" and daysDiff >= 0):
                    logger.info("Need to delete the VM ID:"+str(vm_data.id)) 
                    add_vm_task_to_queue(vm_data.id,VM_TASK_DELETE)
                    # make an entry in task queue so that scheduler can pick up and delete the VM.
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PURGE UNUSED VM ........")
def grant_novnc_access(vm_id):
    
    msg = ""
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()
    
    if active_vnc > 0:
        vm_data = db(db.vnc_access.vm_id == vm_id).select().first() 
        token = vm_data.token
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY :
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                f = os.popen('openssl rand -hex 10')
                token = f.read()
                token = token.split("\n")
                token=token[0]
                create_novnc_mapping(vm_id,token)
                vm_data = db(db.vm_data.id == vm_id).select().first()
                host_ip = vm_data.host_id.host_ip.private_ip
                vnc_port = vm_data.vnc_port
                vnc = str(vnc_port)
                file_token =str(token) +":" + " "  + str(host_ip)+ ":" + str(vnc) + "\n"
                myfile=get_file_append_mode("/home/www-data/token.list")
                myfile.write(file_token)
                command = "ps -ef | grep websockify|awk '{print $2}'"
                port = config.get("NOVNC_CONF","port")
                server_ip = config.get("NOVNC_CONF","server_ip")
                return_value = execute_remote_cmd(server_ip, 'root',command)
                return_value=return_value.split()
                if len(return_value) <=2:                 
                    command = "./noVNC/utils/websockify/run --web /root/noVNC --target-config /home/www-data/token.list " +str(server_ip)+ ":"+str(port) + " > /dev/null 2>&1 &" 
                    return_value = execute_remote_cmd(server_ip, 'root',command)
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                logger.debug('Some Error Occurred. Please try later')
                log_exception()
                pass

    logger.debug(msg)
    return token
Exemple #50
0
def process_container_queue(task_event_id):
    """
    Invoked when scheduler runs task of type 'Container_Task'
    For every task, function calls the corresponding handler
    and updates the database on the basis of the response 
    """
    logger.info("\n ENTERING Container_Task........")

    task_event_data = db.task_queue_event[task_event_id]
    task_queue_data = db.task_queue[task_event_data.task_id]
    container_data = db.container_data[
        task_event_data.cont_id] if task_event_data.cont_id != None else None
    try:
        #Update attention_time for task in the event table
        task_event_data.update_record(attention_time=get_datetime(),
                                      status=TASK_QUEUE_STATUS_PROCESSING)
        #Call the corresponding function from vm_helper
        logger.debug("Starting Container_Task processing...")
        ret = task[task_queue_data.task_type](task_queue_data.parameters)
        logger.debug("Completed Container_Task processing...")

        #On return, update the status and end time in task event table
        task_event_data.update_record(status=ret[0],
                                      message=ret[1],
                                      end_time=get_datetime())

        if ret[0] == TASK_QUEUE_STATUS_FAILED:

            logger.debug("Container_Task FAILED")
            logger.debug("Container_Task Error Message: %s" % ret[1])
            task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)

        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            # Create log event for the task
            logger.debug("Container_Task SUCCESSFUL")
            if container_data:
                _log_vm_event(container_data, task_queue_data)
            # For successful task, delete the task from queue
            if db.task_queue[task_queue_data.id]:
                del db.task_queue[task_queue_data.id]
            if 'request_id' in task_queue_data.parameters:
                del db.request_queue[task_queue_data.parameters['request_id']]

            if task_event_data.task_type not in (VM_TASK_MIGRATE_HOST,
                                                 VM_TASK_MIGRATE_DS):
                _send_cont_task_complete_mail(task_event_data)

    except:
        msg = log_exception()
        task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED,
                                      message=msg)

    finally:
        db.commit()
        logger.info("EXITING Container_Task........\n")
Exemple #51
0
    def visualize(self):
        '''
        Launch ParaView to visualize simulation results.

        @param self: the class instance itself
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.visualize()'
        helper.log_entrance(self.logger, signature, None)
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            elif not cherrypy.session.has_key('postprocess_done') or not cherrypy.session['postprocess_done']:
                # postprocess must be run first
                cherrypy.response.status = 400
                ret = { 'error' : '"SAVE AS TECPLOT" must be run right after a successful simulation.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call visualize service
                services.visualize(cherrypy.session['simulation_dir'])
                helper.log_exit(self.logger, signature, None)
                return {}
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
Exemple #52
0
    def visualize(self):
        '''
        Launch ParaView to visualize simulation results.

        @param self: the class instance itself
        @return: the response as a dictionary, will be serialized to JSON by CherryPy.
        '''
        signature = __name__ + '.WebController.visualize()'
        helper.log_entrance(self.logger, signature, None)
        
        try:
            if not cherrypy.session.has_key('simulation_done') or not cherrypy.session['simulation_done']:
                # simulation must be run first
                cherrypy.response.status = 400
                ret = { 'error' : 'Simulation must be run first.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            elif not cherrypy.session.has_key('postprocess_done') or not cherrypy.session['postprocess_done']:
                # postprocess must be run first
                cherrypy.response.status = 400
                ret = { 'error' : '"SAVE AS TECPLOT" must be run right after a successful simulation.' }
                helper.log_exit(self.logger, signature, [ret])
                return ret
            else:
                # Call visualize service
                services.visualize(cherrypy.session['simulation_dir'])
                helper.log_exit(self.logger, signature, None)
                return {}
        except (TypeError, ValueError) as e:
            helper.log_exception(self.logger, signature, e)
            # Error with input, respond with 400
            cherrypy.response.status = 400
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
        except Exception as e:
            helper.log_exception(self.logger, signature, e)
            # Server internal error, respond with 500
            cherrypy.response.status = 500
            ret = { 'error' : str(e) }
            helper.log_exit(self.logger, signature, [ret])
            return ret
def process_sendwarning_shutdownvm():

    logger.info("Entering Process send warning mail to shutdown vm........")

    try:
        vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days")
        send_email=0

        for vm_id in db().select(db.vm_event_log.vm_id, distinct=True):
            for vm_details in db(db.vm_event_log.vm_id==vm_id['vm_id']).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                daysDiff=(get_datetime()-vm_details.timestamp).days
                vm_shutdown_time=vm_details.timestamp

                logger.info("VM details are VM_ID:" + str(vm_details['vm_id'])+ "|ID:"+str(vm_details['id'])+"|new_values is:"+str(vm_details['new_value'])+"|daysDiff:" + str(daysDiff)+"|vmShutDownDays:"+vmShutDownDays+"|vm_shutdown_time :"+str(vm_shutdown_time))

                if (vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)):
                    vm_users = []
                    vm_name  = ""

                    for user in db((db.user_vm_map.vm_id == vm_details.vm_id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.locked != True) & (db.vm_data.delete_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name):
                        send_email=1
                        vm_users.append(user.user_vm_map.user_id)
                        vm_name=user.vm_data.vm_name

                    if (send_email == 1):
                        vm_delete_time=send_email_vm_warning(VM_TASK_WARNING_DELETE,vm_users,vm_name,vm_shutdown_time)
                        logger.debug("Mail sent for vm_id:"+str(vm_details.vm_id)+"|vm_name:"+str(vm_name)+"|delete time:"+ str(vm_delete_time))
                        db(db.vm_data.id == vm_details.vm_id).update(locked=True, delete_warning_date=vm_delete_time) 
                        send_email=0
                    else:
                        logger.debug("Email has already been sent to VM_ID:"+str(vm_details.vm_id))

                else:
                    logger.info("VM:"+str(vm_details.vm_id)+" is not shutdown for: "+str(vmShutDownDays)+"(configured) days")


    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING Send warning to shutdown vm........")
def get_my_object_store():
    """get list of object store"""
    
    try:
        object_store = db((~db.object_store_data.status.belongs(VM_STATUS_IN_QUEUE, VM_STATUS_UNKNOWN))
                          & (db.object_store_data.id==db.user_object_map.ob_id)
                          & (db.user_object_map.user_id==auth.user.id)).select(db.object_store_data.ALL)
        return get_my_object_store_list(object_store)
    except:
        logger.debug("Exception occurred: %s " % log_exception())
        l = []
        return l    
Exemple #55
0
def create_vnc_mapping_in_nat(vm_id):

    vm_data = current.db.vm_data[vm_id]
    vnc_host_ip = config.get("GENERAL_CONF", "vnc_ip")
    duration = 30 * 60 #30 minutes
    host_ip = vm_data.host_id.host_ip.private_ip
    vnc_port = vm_data.vnc_port
    
    vnc_id = current.db.vnc_access.insert(vm_id = vm_id,
                                    host_id = vm_data.host_id, 
                                    vnc_server_ip = vnc_host_ip, 
                                    vnc_source_port = vnc_port, 
                                    vnc_destination_port = vnc_port, 
                                    duration = duration, 
                                    status = VNC_ACCESS_STATUS_INACTIVE)
    
    try:
        create_mapping(vnc_host_ip, host_ip, vnc_port, vnc_port, duration)
        current.db.vnc_access[vnc_id] = dict(status = VNC_ACCESS_STATUS_ACTIVE)
    except:
        log_exception()
Exemple #56
0
def check_vm_snapshot_sanity(vm_id):
    """
    Checks if the snapshot information of VM is in sync with actual snapshots of the VM.
    """
    vm_data = db.vm_data[vm_id]
    snapshot_check = []
    try:
        conn = libvirt.openReadOnly('qemu+ssh://root@'+vm_data.host_id.host_ip.private_ip+'/system')
        domain = conn.lookupByName(vm_data.vm_identity)
        
        dom_snapshot_names = domain.snapshotListNames(0)
        logger.debug(dom_snapshot_names)
        conn.close()
            
        snapshots = db(db.snapshot.vm_id == vm_id).select()
        for snapshot in snapshots:
            if snapshot.snapshot_name in dom_snapshot_names:
                snapshot_check.append({'snapshot_name' : snapshot.snapshot_name,
                                       'snapshot_type' : get_snapshot_type(snapshot.type),
                                       'message' : 'Snapshot present',
                                       'operation' : 'None'})
                dom_snapshot_names.remove(snapshot.snapshot_name)
            else:
                snapshot_check.append({'snapshot_id' : snapshot.id,
                                       'snapshot_name' : snapshot.snapshot_name,
                                       'snapshot_type' : get_snapshot_type(snapshot.type),
                                       'message' : 'Snapshot not present',
                                       'operation' : 'Undefined'})

        for dom_snapshot_name in dom_snapshot_names:
                snapshot_check.append({'vm_name' : vm_data.vm_identity,
                                       'snapshot_name' : dom_snapshot_name,
                                       'snapshot_type' : 'Unknown',
                                       'message' : 'Orphan Snapshot',
                                       'operation' : 'Orphan'})
                        
    except Exception:
        log_exception()
    logger.debug(snapshot_check)
    return (vm_data.id, vm_data.vm_name, snapshot_check)