Example #1
0
def migrate_vm():

    vm_id = request.args[0]
    vm_details = get_migrate_vm_details(vm_id)
    if len(request.args) > 1:
        params={}
        if request.args[1] == 'migrate_vm_hosts':

            params['destination_host'] = request.vars['selected_host']
            params['live_migration'] = request.vars['live_migration']
            add_vm_task_to_queue(vm_id, VM_TASK_MIGRATE_HOST, params)

        elif request.args[1] == 'migrate_vm_datastores':

            params['destination_ds'] = request.vars['selected_datastore']
            params['live_migration'] = request.vars['live_migration']
            add_vm_task_to_queue(vm_id, VM_TASK_MIGRATE_DS, params)

        session.flash = 'Your task has been queued. Please check your task list for status.'
        redirect(URL(c = 'admin', f = 'hosts_vms'))
    logger.debug("vm_details[affinity flag] :  " + str(vm_details['affinity_flag']))
    if vm_details['affinity_flag'] != 0:
        host_details = get_host_details(vm_details['vm_name'])
        logger.debug("available_hosts : " + str(host_details['available_hosts']))
        vm_details['available_hosts'] = host_details['available_hosts']
    return dict(vm_details=vm_details)
def paste_remote_file(machine_ip,
                      user_name,
                      remotepath,
                      localpath,
                      password=None,
                      ret_list=False):
    """
    Executes command on remote machine using paramiko SSHClient
    """
    #logger.debug("executing remote command %s on %s with %s:"  %(command, machine_ip, user_name))
    if machine_ip == 'localhost':
        logger.debug("TODO")
    else:
        #logger.debug("executing remote command %s on %s with %s:"  %(command, machine_ip, user_name))
        try:
            sftp = None
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(machine_ip, username=user_name, password=password)
            #logger.debug("Connected to host %s " % machine_ip)
            sftp = ssh.open_sftp()
            if not os.path.exists(localpath):
                raise paramiko.SSHException('filepath: ' + localpath +
                                            'not found')
            sftp.put(localpath, remotepath)

        except paramiko.SSHException as e:
            #log_exception()
            print(e)
        finally:

            if sftp:
                sftp.close()
            if ssh:
                ssh.close()
def vm_utilization_rrd(host_ip,m_type=None):
    """
    Handles periodic collection of VM and Host utilization data and updation of respective RRD file."""
    
    logger.info("ENTERING RRD UPDATION/CREATION........on host: %s" % host_ip)
    try:
        
        rrd_logger.debug("Starting RRD Processing for Host: %s" % host_ip)
        rrd_logger.debug(host_ip)
        
        if is_pingable(host_ip):
	   if m_type is None: 
	        update_rrd(host_ip)
	   else:
		update_rrd(host_ip,m_type)
 
        else:
            rrd_logger.error("UNABLE TO UPDATE RRDs for host : %s" % host_ip)

    except Exception as e:

        rrd_logger.debug("ERROR OCCURED: %s" % e)
 
    finally:
        rrd_logger.debug("Completing RRD Processing for Host: %s" % host_ip)
        logger.debug("EXITING RRD UPDATION/CREATION........on host: %s" % host_ip)
Example #4
0
def get_host_hdd(host_ip):
    
    command = "fdisk -l | egrep 'Disk.*bytes' | awk '{ sub(/,/,\"\"); sum +=$3;} END {print sum}'"
    ret = execute_remote_cmd(host_ip, 'root',command)#Returns e.g. 500.1 kB
    logger.debug("Host HDD is %s" %ret)
    hdd_in_gb = int(math.ceil(float(ret)))
    return hdd_in_gb
Example #5
0
def migrate_vm():

    vm_id = request.args[0]
    vm_details = get_migrate_vm_details(vm_id)
    if len(request.args) > 1:
        params = {}
        if request.args[1] == 'migrate_vm_hosts':

            params['destination_host'] = request.vars['selected_host']
            params['live_migration'] = request.vars['live_migration']
            add_vm_task_to_queue(vm_id, VM_TASK_MIGRATE_HOST, params)

        elif request.args[1] == 'migrate_vm_datastores':

            params['destination_ds'] = request.vars['selected_datastore']
            params['live_migration'] = request.vars['live_migration']
            add_vm_task_to_queue(vm_id, VM_TASK_MIGRATE_DS, params)

        session.flash = 'Your task has been queued. Please check your task list for status.'
        redirect(URL(c='admin', f='hosts_vms'))
    logger.debug("vm_details[affinity flag] :  " +
                 str(vm_details['affinity_flag']))
    if vm_details['affinity_flag'] != 0:
        host_details = get_host_details(vm_details['vm_name'])
        logger.debug("available_hosts : " +
                     str(host_details['available_hosts']))
        vm_details['available_hosts'] = host_details['available_hosts']
    return dict(vm_details=vm_details)
Example #6
0
def vm_utilization_rrd(host_ip, m_type=None):
    """
    Handles periodic collection of VM and Host utilization data and updates of 
    respective RRD file.
    """
    logger.info("ENTERING RRD UPDATION/CREATION........on host: %s" % host_ip)
    try:

        rrd_logger.debug("Starting RRD Processing for Host: %s" % host_ip)
        rrd_logger.debug(host_ip)

        if is_pingable(host_ip):
            update_rrd(host_ip, m_type)

        else:
            rrd_logger.error("UNABLE TO UPDATE RRDs for host : %s" % host_ip)

    except Exception as e:

        rrd_logger.debug("ERROR OCCURED: %s" % e)

    finally:
        rrd_logger.debug("Completing RRD Processing for Host: %s" % host_ip)
        logger.debug("EXITING RRD UPDATION/CREATION........on host: %s" %
                     host_ip)
Example #7
0
def find_host_and_guest_list():
    """
    Returns host list in descending order of available memory and guest list.
    """
    logger.debug("Entering into find_host_and_guest_list")

    host_usage = {}
    host_ram_usage_map = {}

    hosts = current.db(current.db.host.id > 0
                       and current.db.host.status == HOST_STATUS_UP).select(
                           current.db.host.ALL)
    for host in hosts:
        logger.debug("host_private_ip is " + str(host.host_ip.private_ip))
        host_usage = get_host_resources_usage(host.host_ip.private_ip)
        memhog_usage = get_memhog_usage(host.host_ip.private_ip)
        logger.debug("memhog_usage and host_usage are:" + str(memhog_usage) +
                     "::" + str(host_usage))
        host_ram_usage_map[host] = ((
            (host['RAM'] * 1024 * 1024 * 1024) - host_usage['ram']) +
                                    memhog_usage)

    sorted_host_list = sorted(host_ram_usage_map.items(),
                              key=itemgetter(1),
                              reverse=True)

    guest_list = current.db(current.db.vm_data.id > 0).select(
        current.db.vm_data.ALL)
    logger.debug("Guest_list is :" + str(guest_list))
    logger.debug("sorted list is :" + str(sorted_host_list))

    return (map(itemgetter(0), sorted_host_list), guest_list)
Example #8
0
def execute_remote_cmd(machine_ip, user_name, command, password = None, ret_list = False):
    """
    Executes command on remote machine using paramiko SSHClient
    """
    logger.debug("executing remote command %s on %s with %s:"  %(command, machine_ip, user_name))
    output = None
    if machine_ip == 'localhost':
        output=os.popen(command).readline()
        #logger.debug(output)
    else:
        #logger.debug("executing remote command %s on %s with %s:"  %(command, machine_ip, user_name))

        try:
            ssh = paramiko.SSHClient()
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(machine_ip, username = user_name, password = password)
            #logger.debug("Connected to host %s " % machine_ip)
            stdin,stdout,stderr = ssh.exec_command(command)  # @UnusedVariable

            output = stdout.readlines() if ret_list else "".join(stdout.readlines())

            error = "".join(stderr.readlines())
            if (stdout.channel.recv_exit_status()) != 0:
                raise Exception("Exception while executing remote command %s on %s: %s" %(command, machine_ip, error))
        except paramiko.SSHException:
            #log_exception()
            raise
        finally:
            if ssh:
                ssh.close()

    return output
Example #9
0
def configure_host_by_mac(mac_addr):
    
    avl_private_ip = None
    ip_info = db.private_ip_pool(mac_addr=mac_addr)
    if ip_info:
        avl_private_ip = ip_info.private_ip
    else:
        avl_ip = db((~db.private_ip_pool.id.belongs(db()._select(db.host.host_ip)))
                    & (db.private_ip_pool.vlan == HOST_VLAN_ID)).select(db.private_ip_pool.private_ip)
        if avl_ip.first():
            ip_info = avl_ip.first()
            avl_private_ip = ip_info['private_ip']

    if avl_private_ip:
        logger.debug('Available IP for mac address %s is %s'%(mac_addr, avl_private_ip))
        host_name = 'host'+str(avl_private_ip.split('.')[3])
        create_dhcp_entry(host_name, mac_addr, avl_private_ip)
        db.host[0] = dict(host_ip=ip_info['id'], 
                          host_name=host_name, 
                          mac_addr=mac_addr, 
                          status=HOST_STATUS_DOWN)
        return 'Host configured. Proceed for PXE boot.'
    else:
        logger.error('Available Private IPs for host are exhausted.')
        return 'Available Private IPs for host are exhausted.'
Example #10
0
def execute_remote_bulk_cmd(machine_ip, user_name, command, password=None):

    logger.debug("executing remote command %s on %s:"  %(command, machine_ip))
    output = None
    try:
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(machine_ip, username=user_name)
        channel = ssh.invoke_shell()
        stdin = channel.makefile('wb')
        stdout = channel.makefile('rb')
        
        stdin.write(command)

        if (stdout.channel.recv_exit_status()) != 0:
            raise Exception("Exception while executing remote command %s on %s" %(command, machine_ip))
        output = stdout.read()
        stdout.close()
        stdin.close()
    except paramiko.SSHException:
        log_exception()
    finally:
        if ssh:
            ssh.close()
    
    return output
Example #11
0
def execute_remote_cmd(machine_ip,
                       user_name,
                       command,
                       password=None,
                       ret_list=False):

    logger.debug("executing remote command %s on %s with %s:" %
                 (command, machine_ip, user_name))

    output = None
    try:
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(machine_ip, username=user_name, password=password)
        logger.debug("Connected to host %s " % machine_ip)
        stdin, stdout, stderr = ssh.exec_command(command)  # @UnusedVariable

        output = stdout.readlines() if ret_list else "".join(
            stdout.readlines())
        #logger.debug("Output : %s " % output)

        error = "".join(stderr.readlines())
        if (stdout.channel.recv_exit_status()) != 0:
            raise Exception(
                "Exception while executing remote command %s on %s: %s" %
                (command, machine_ip, error))
    except paramiko.SSHException:
        log_exception()
        raise
    finally:
        if ssh:
            ssh.close()

    return output
Example #12
0
def update_host_status(host_id, status):
    host_data = db.host[host_id]
    host_ip = host_data.host_ip.private_ip
    logger.debug(host_ip)
    host_info=host_data.host_type
    logger.debug(host_info)
    if status == HOST_STATUS_UP:
        if is_host_available(host_ip):
                if host_data.CPUs == 0:
                    cpu_num = get_host_cpu(host_ip)
                    ram_gb = get_host_ram(host_ip)
                    hdd_gb = get_host_hdd(host_ip)
                    host_data.update_record(CPUs=cpu_num, RAM=ram_gb, HDD=hdd_gb)
        else:   
            host_power_up(host_data)                

        host_data.update_record(status = HOST_STATUS_UP)

    elif status == HOST_STATUS_MAINTENANCE:
        migrate_all_vms_from_host(host_ip)
        host_data.update_record(status=HOST_STATUS_MAINTENANCE)

    elif status == HOST_STATUS_DOWN:
        host_power_down(host_data)          
        host_data.update_record(status = HOST_STATUS_DOWN )  

    return True
Example #13
0
 def save_template(self,message,author,changes,tag,repository):
     #docker commit will used. Data in volumes will not be saved.Only changes in applications will be reflected.
     logger.debug(repository);
     if (not repository) :
         repository = self.properties['ImageName'];
     response = Container.client.commit(self.id,tag=tag,message=message,author = author ,changes = changes , repository = repository);
     return response;
Example #14
0
def revert_and_resume(vm_id, vm_identity):

    try:
        sys_snapshot = current.db.snapshot(vm_id=vm_id, type=SNAPSHOT_SYSTEM)
        if sys_snapshot:
            revert({'vm_id': vm_id, 'snapshot_id': sys_snapshot.id})
            logger.debug('Snapshot of %s reverted from %s successfully' %
                         (vm_identity, sys_snapshot.snapshot_name))
            delete_snapshot({'vm_id': vm_id, 'snapshot_id': sys_snapshot.id})
            logger.debug('Snapshot %s deleted successfully' %
                         (sys_snapshot.snapshot_name))

        event_data = current.db.vm_event_log(vm_id=vm_id,
                                             new_value='System Shutdown')
        #         event_data = current.db((current.db.vm_event_log.vm_id == vm_id) & (current.db.vm_event_log.new_value == 'System Shutdown')).select(current.db.vm_event_log.ALL)
        print event_data
        if event_data:
            start({'vm_id': vm_id})
            if int(event_data.old_value) == current.VM_STATUS_SUSPENDED:
                suspend({'vm_id': vm_id})

            del current.db.vm_event_log[event_data.id]
        current.db.commit()
    except:
        log_exception()
        pass
Example #15
0
    def addipbyconf(self, updatetc=False):
        ipaddress = self.properties['IPAddress']

        name = self.properties['Name']
        domainname = proxieddomain(name)
        nginx_server = get_nginx_server_address()
        fulladdress = self.properties.get('ConnectionPath')
        if (not fulladdress):
            return
        logger.debug(fulladdress)
        filepath = "/etc/nginx/conf.d/" + self.properties['Name'] + ".conf"
        reverser = manageproxy('')
        reverser.add(reverser.render({
            'x': domainname,
            'y': fulladdress
        }))
        print(reverser.filedit)
        reverser.filedit = reverser.filedit.replace('$', '\$')
        cmd = 'echo -e "' + reverser.filedit + '" > ' + filepath
        out2 = remote_machine.execute_remote_cmd(nginx_server[0],
                                                 nginx_server[1], cmd,
                                                 nginx_server[2])
        print("Output : " + out2)
        cmd = '/etc/init.d/nginx reload'
        output = remote_machine.execute_remote_cmd(nginx_server[0],
                                                   nginx_server[1], cmd,
                                                   nginx_server[2])
        print(output)
Example #16
0
def check_affinity(vm_details,host):
    try:
        logger.debug("Entering into check_affinity")
        return True 
    except:
        logger.exception('Exception in check_affinity') 
        return False
Example #17
0
def check_snapshot_limit(vm_id):
    snapshots = db((db.snapshot.vm_id == vm_id) & (db.snapshot.type == SNAPSHOT_USER)).count()
    logger.debug("No of snapshots are " + str(snapshots))
    if snapshots < SNAPSHOTTING_LIMIT:
        return True
    else:
        return False
Example #18
0
def process_purge_shutdownvm():

    logger.info("ENTERING PURGE SHUTDOWN VM ........") 
    vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days")

    try:
        # Fetch all the VM's which are locked and whose delete warning date is not null. 
        for vm_data in db(db.vm_data.locked == True and db.vm_data.delete_warning_date!=None).select(db.vm_data.ALL):
            daysDiff=0
            daysDiff=(get_datetime()-vm_data.delete_warning_date).days
            if(daysDiff >=0 ):
                for vm_details in db(db.vm_event_log.vm_id==vm_data.id).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                    daysDiff=(get_datetime()-vm_details.timestamp).days
                    if(vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)):
                        logger.info("Need to delete the VM ID:"+str(vm_data.id)) 
                        add_vm_task_to_queue(vm_data.id,VM_TASK_DELETE)
                        # make an entry in task queue so that scheduler can pick up and delete the VM.
                    else:
                        logger.info("No need to delete the VM ID:"+str(vm_data.id)+" as it is in use now. ")
                        db(db.vm_data.id == vm_details.vm_id).update(locked='F',delete_warning_date=None)
            else:
                logger.info("No need to process shutdown VM :"+str(vm_data.id))
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PURGE SHUTDOWN VM ........")
Example #19
0
def schedule_vm(vm_details, host, live_migration):
    """
    Migrate a vm on host if affinity is correct and migration is possible.
    """
    try:
        logger.debug("Entering into scheduleVM")
        retVal = True

        retVal = is_same_host(vm_details, host)
        if (retVal == False):
            logger.debug("VM is on the same host")
            return False

        if (live_migration == 1):
            retVal = is_migration_possible(vm_details, host)

        if (retVal == True):
            logger.debug("Going to migrate VM:" + vm_details.vm_name +
                         " on host: " + str(host['host_ip'].private_ip))
            ret = migrate_domain(vm_details.id, host.id, live_migration)
            #ret=(TASK_QUEUE_STATUS_SUCCESS,'msg') #when uncomment this, comment the line above
            logger.debug("Value returned from migrate_domain is:" +
                         str(ret[0]))
            if (ret[0] == TASK_QUEUE_STATUS_SUCCESS):
                return True
            else:
                return False
        else:
            logger.debug("VM:" + vm_details.vm_name +
                         " cannot be migrated to host:" +
                         str(host['host_ip'].private_ip))
            return False
    except:
        logger.exception('Exception in scheduleVM')
        return False
Example #20
0
def update_host_status(host_id, status):
    host_data = db.host[host_id]
    host_ip = host_data.host_ip.private_ip
    logger.debug(host_ip)
    host_info=host_data.host_type
    logger.debug(host_info)
    if status == HOST_STATUS_UP:
        if is_host_available(host_ip):
                if host_data.CPUs == 0:
                    cpu_num = get_host_cpu(host_ip)
                    ram_gb = get_host_ram(host_ip)
                    hdd_gb = get_host_hdd(host_ip)
                    host_data.update_record(CPUs=cpu_num, RAM=ram_gb, HDD=hdd_gb)
        else:   
            host_power_up(host_data)                

        host_data.update_record(status = HOST_STATUS_UP)

    elif status == HOST_STATUS_MAINTENANCE:
        migrate_all_vms_from_host(host_ip)
        host_data.update_record(status=HOST_STATUS_MAINTENANCE)

    elif status == HOST_STATUS_DOWN:
        host_power_down(host_data)          
        host_data.update_record(status = HOST_STATUS_DOWN )  

    return True
Example #21
0
def schedule_vm(vm_details,host,live_migration):
    try:
        logger.debug("Entering into scheduleVM")
        retVal=True
       
        retVal=is_same_host(vm_details,host)
        if (retVal == False):
            logger.debug("VM is on the same host")
            return False

        if (live_migration == 1):
            retVal=is_migration_possible(vm_details,host)
         
        if (retVal == True):
            logger.debug("Going to migrate VM:"+vm_details.vm_name+ " on host: "+ str(host['host_ip'].private_ip))
            ret=migrate_domain(vm_details.id,host.id,live_migration) 
            #ret=(TASK_QUEUE_STATUS_SUCCESS,'msg') #when uncomment this, comment the line above
            logger.debug("Value returned from migrate_domain is:"+str(ret[0]))
            if(ret[0] == TASK_QUEUE_STATUS_SUCCESS):
                return True
            else:
                return False
        else:
            logger.debug("VM:"+vm_details.vm_name+" cannot be migrated to host:"+str(host['host_ip'].private_ip))  
            return False
    except:
        logger.exception('Exception in scheduleVM')         
        return False
Example #22
0
def configure_host_by_mac(mac_addr):
    
    avl_private_ip = None
    ip_info = db.private_ip_pool(mac_addr=mac_addr)
    if ip_info:
        avl_private_ip = ip_info.private_ip
    else:
        avl_ip = db((~db.private_ip_pool.id.belongs(db()._select(db.host.host_ip)))
                    & (db.private_ip_pool.vlan == HOST_VLAN_ID)).select(db.private_ip_pool.private_ip)
        if avl_ip.first():
            ip_info = avl_ip.first()
            avl_private_ip = ip_info['private_ip']

    if avl_private_ip:
        logger.debug('Available IP for mac address %s is %s'%(mac_addr, avl_private_ip))
        host_name = 'host'+str(avl_private_ip.split('.')[3])
        create_dhcp_entry(host_name, mac_addr, avl_private_ip)
        db.host[0] = dict(host_ip=ip_info['id'], 
                          host_name=host_name, 
                          mac_addr=mac_addr, 
                          status=HOST_STATUS_DOWN)
        return 'Host configured. Proceed for PXE boot.'
    else:
        logger.error('Available Private IPs for host are exhausted.')
        return 'Available Private IPs for host are exhausted.'
Example #23
0
def request_vpn():
    user_info = get_vpn_user_details()
    logger.debug(type(user_info))
    user_name = user_info['username']
    cmd = "./vpn_client_creation.sh " + str(user_name)
    #vpn_ip=""
    vpn_ip = config.get("VPN_CONF", "vpn_server_ip")
    vpn_key_path = config.get("VPN_CONF", "vpn_key_path")

    #passwd=""
    #     password=config.get("VPN_CONF","passwd")
    try:
        var = execute_remote_cmd(vpn_ip, 'root', cmd, ret_list=True)
        filepath = vpn_key_path + str(user_name) + "_baadalVPN.tar"
        localpath = os.path.join(
            get_context_path(),
            'private/VPN/' + str(user_name) + "_baadalVPN.tar")
        sftp_files(vpn_ip, 'root', filepath, localpath)

        if "false" in str(var):
            return 1
        elif "true" in str(var):
            return 3
    #transfer_vpn_files(user_name,vpn_ip,password)
    except Exception:
        return 2
Example #24
0
def paste_remote_file(machine_ip, user_name, remotepath,localpath, password = None, ret_list = False):
    """
    Executes command on remote machine using paramiko SSHClient
    """
    #logger.debug("executing remote command %s on %s with %s:"  %(command, machine_ip, user_name))
    output = None
    if machine_ip == 'localhost':
#         output=os.popen(command).readline()
        #logger.debug(output)
        logger.debug("TODO")
    else:
        #logger.debug("executing remote command %s on %s with %s:"  %(command, machine_ip, user_name))
        try :
            sftp = None
            ssh = paramiko.SSHClient() 
            ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
            ssh.connect(machine_ip, username=user_name, password=password)
            #logger.debug("Connected to host %s " % machine_ip)
            sftp = ssh.open_sftp()
            if not os.path.exists(localpath):
                raise paramiko.SSHException('filepath: ' + localpath +'not found');
            sftp.put(localpath, remotepath)
            
        except paramiko.SSHException as e:
            #log_exception()
            print (e)
        finally:
                   
            if sftp:
                sftp.close()
            if ssh:
                ssh.close() 
Example #25
0
def check_snapshot_limit(vm_id):
    snapshots = db((db.snapshot.vm_id == vm_id) & (db.snapshot.type == SNAPSHOT_USER)).count()
    logger.debug("No of snapshots are " + str(snapshots))
    if snapshots < SNAPSHOTTING_LIMIT:
        return True
    else:
        return False
Example #26
0
def execute_remote_bulk_cmd(machine_ip, user_name, command, password=None):
    """
    Executes multiple commands on remote machine using paramiko SSHClient
    """
    logger.debug("executing remote command %s on %s:"  %(command, machine_ip))
    output = None
    try:
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        ssh.connect(machine_ip, username=user_name)
        channel = ssh.invoke_shell()
        stdin = channel.makefile('wb')
        stdout = channel.makefile('rb')
        
        stdin.write(command)

        if (stdout.channel.recv_exit_status()) != 0:
            raise Exception("Exception while executing remote command %s on %s" %(command, machine_ip))
        output = stdout.read()
        stdout.close()
        stdin.close()
    except paramiko.SSHException:
        log_exception()
    finally:
        if ssh:
            ssh.close()
    
    return output
Example #27
0
def delete_snapshot_info(snapshot_id):
    """
    Deletes undefined snapshot info of the VM from database.
    These are checkpoints of VM that is present in database but not on VM.
    """
    logger.debug('Deleting snapshot info for ' + str(snapshot_id))
    del db.snapshot[snapshot_id]
    logger.debug('Snapshot info deleted')
Example #28
0
def delete_snapshot_info(snapshot_id):
    """
    Deletes undefined snapshot info of the VM from database.
    These are checkpoints of VM that is present in database but not on VM.
    """
    logger.debug('Deleting snapshot info for ' + str(snapshot_id))
    del db.snapshot[snapshot_id]
    logger.debug('Snapshot info deleted')
Example #29
0
def get_vpn_user_details():
   user_info=db((db.user.id==auth.user.id)).select(db.user.ALL)
   logger.debug("userinfo"+ str(user_info))
   for info in user_info:
       user_details={'username':info['username'],
         'first_name':info['first_name'],
          'last_name':info['last_name']}
       return user_details
Example #30
0
def get_vpn_user_details():
    user_info=db((db.user.id==auth.user.id)).select(db.user.ALL)
    logger.debug("userinfo"+ str(user_info))
    for info in user_info:
        user_details={'username':info['username'],
         'first_name':info['first_name'],
          'last_name':info['last_name']}
    return user_details
Example #31
0
 def backup(self, user):
     logger.debug(user)
     self.save_template(message='backup_at',
                        author=user,
                        changes={},
                        tag="backup",
                        repository=self.properties['Name'][1:])
     return None
Example #32
0
def getExecResize_cont():
    cont_id = request.args[0]
    cont_uuid = get_container_uuid(cont_id)
    cont_session = request.vars['execid'];
    height = request.vars.get('height','80')
    width = request.vars.get('width','100')
    logger.debug(" In container Resize" + height + width );
    get_container_execresize(cont_uuid,cont_session,height,width);
    return response.json(dict(cont_id = cont_id));
Example #33
0
def get_memhog_usage(host_ip):
    logger.debug("Entering into getmemhog_usage") 
    cmd = "output=`ps -ef --sort=start_time | grep 'memhog' | grep -v grep | awk '{print $2}'`;smem -c 'pid pss'| grep $output | awk '{print $2}'"
    output = execute_remote_cmd(host_ip, "root", cmd, None, True) 
    if not output : 
        return 0
    else:
        #logger.debug("For host:"+str(host_ip)+" PSS Value from memhog="+str(output))
        return (int(output[0])*1024)
Example #34
0
def get_host_details(vm_name):
    host_data = db(db.host_affinity.vm_name == vm_name).select().first()
    host_details = {}
    logger.debug("host data is : " + str(host_data))
    if host_data != None :
       host_details['current_host'] = "%s" %(host_data.current_host)   
       host_details['available_hosts'] = dict((host.id, "%s"%(host.affinity_host))
                                         for host in db((db.host_affinity.vm_name == host_data.vm_name)).select())
    return host_details
Example #35
0
def check_vm_snapshot_sanity(vm_id):
    """
    Checks if the snapshot information of VM is in sync with actual snapshots of the VM.
    """
    vm_data = db.vm_data[vm_id]
    snapshot_check = []
    try:
        conn = libvirt.openReadOnly('qemu+ssh://root@' +
                                    vm_data.host_id.host_ip.private_ip +
                                    '/system')
        domain = conn.lookupByName(vm_data.vm_identity)

        dom_snapshot_names = domain.snapshotListNames(0)
        logger.debug(dom_snapshot_names)
        conn.close()

        snapshots = db(db.snapshot.vm_id == vm_id).select()
        for snapshot in snapshots:
            if snapshot.snapshot_name in dom_snapshot_names:
                snapshot_check.append({
                    'snapshot_name':
                    snapshot.snapshot_name,
                    'snapshot_type':
                    get_snapshot_type(snapshot.type),
                    'message':
                    'Snapshot present',
                    'operation':
                    'None'
                })
                dom_snapshot_names.remove(snapshot.snapshot_name)
            else:
                snapshot_check.append({
                    'snapshot_id':
                    snapshot.id,
                    'snapshot_name':
                    snapshot.snapshot_name,
                    'snapshot_type':
                    get_snapshot_type(snapshot.type),
                    'message':
                    'Snapshot not present',
                    'operation':
                    'Undefined'
                })

        for dom_snapshot_name in dom_snapshot_names:
            snapshot_check.append({
                'vm_name': vm_data.vm_identity,
                'snapshot_name': dom_snapshot_name,
                'snapshot_type': 'Unknown',
                'message': 'Orphan Snapshot',
                'operation': 'Orphan'
            })

    except Exception:
        log_exception()
    logger.debug(snapshot_check)
    return (vm_data.id, vm_data.vm_name, snapshot_check)
Example #36
0
def check_vnc(vm_id):
    logger.debug("checking vnc port")
    details = db((db.vnc_access.vm_id == vm_id)
                 & (db.private_ip_pool.id == db.host.host_ip)
                 & (db.vnc_access.host_id == db.host.id)).select(
                     db.private_ip_pool.private_ip,
                     db.vnc_access.vnc_source_port)
    logger.debug(details)
    return details
Example #37
0
def launch_vm_image():
    form = get_launch_vm_image_form()
    
    if form.accepts(request.vars, session, onvalidation=launch_vm_image_validation):
        
        exec_launch_vm_image(form.vars.id, form.vars.collaborators, form.vars.extra_disk_list)
        
        logger.debug('VM image launched successfully')
        redirect(URL(c='default', f='index'))
    return dict(form=form)
Example #38
0
def host_status_sanity_check():
    for host in current.db().select(current.db.host.ALL):
        if host.status != HOST_STATUS_MAINTENANCE:
            host_status=check_host_status(host.host_ip.private_ip)
            if(host_status != host.status):
                logger.debug("Changing status of " + host.host_name +" to " + str(host_status))
                host.update_record(status=host_status)
                current.db.commit()
                if host_status == HOST_STATUS_DOWN:
                    respawn_dangling_vms(host.id)
Example #39
0
def check_host_status(host_ip):
    out=commands.getstatusoutput("ping -c 2 -W 1 " + host_ip)[0]
    logger.debug("Host Check command response for %s: %s" %(host_ip, str(out)))
    if(out == 0):
        if check_host_service_status(host_ip):
            return HOST_STATUS_UP
        else:
            return HOST_STATUS_DOWN
    else: 
        return HOST_STATUS_DOWN
Example #40
0
def check_affinity(vm_details,host):
    try:
        logger.debug("Entering into check_affinity for vm:"+str(vm_details.vm_name)) 
        if(vm_details.vm_name in ('superopt','largevm','NeuroImaging2','sniper-big','csl788-1','NeuroImaging','sniper-large', 'mooc_6')):
           return False 
        else:
           return True 
    except:
        logger.exception('Exception in check_affinity') 
        return False
Example #41
0
def process_loadbalancer():
    logger.info("ENTERING PROCESS LOADBALANCER VM ........")
    try:
        (host_list, vm_list) = find_host_and_guest_list()
        loadbalance_vm(host_list, vm_list)
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING PROCESS LOADBALANCER VM......")
Example #42
0
def launch_vm_image():
    form = get_launch_vm_image_form()
    
    if form.accepts(request.vars, session, onvalidation=launch_vm_image_validation):
        
        exec_launch_vm_image(form.vars.id, form.vars.collaborators, form.vars.extra_disk_list)
        
        logger.debug('VM image launched successfully')
        redirect(URL(c='default', f='index'))
    return dict(form=form)
Example #43
0
def get_vm_host_details(vm_id):
    vm_data = db.vm_data[vm_id]
    vm_details = {}
    vm_details['vm_id'] = vm_id
    vm_details['vm_name'] = vm_data.vm_identity
    vm_details['current_host'] = "%s (%s)" %(vm_data.host_id.host_name, vm_data.host_id.host_ip.private_ip)
    vm_details['available_hosts'] = dict((host.id, "%s (%s)"%(host.host_name, host.host_ip.private_ip))
                                         for host in db(db.host.status == 1).select())
    logger.debug("vm_details is : " + str(vm_details))
    return vm_details
def process_loadbalancer():
    logger.info("ENTERING PROCESS LOADBALANCER VM ........")
    try:
        (host_list,vm_list)=find_host_and_guest_list()
        loadbalance_vm(host_list,vm_list) 
    except:
        log_exception()
        pass
    finally:
        logger.debug("EXITING PROCESS LOADBALANCER VM......")
Example #45
0
def get_vm_host_details(vm_id):
    vm_data = db.vm_data[vm_id]
    vm_details = {}
    vm_details['vm_id'] = vm_id
    vm_details['vm_name'] = vm_data.vm_identity
    vm_details['current_host'] = "%s (%s)" %(vm_data.host_id.host_name, vm_data.host_id.host_ip.private_ip)
    vm_details['available_hosts'] = dict((host.id, "%s (%s)"%(host.host_name, host.host_ip.private_ip))
                                         for host in db(db.host.status == 1).select())
    logger.debug("vm_details is : " + str(vm_details))
    return vm_details
Example #46
0
def process_container_queue(task_event_id):
    """
    Invoked when scheduler runs task of type 'Container_Task'
    For every task, function calls the corresponding handler
    and updates the database on the basis of the response 
    """
    logger.info("\n ENTERING Container_Task........")

    task_event_data = db.task_queue_event[task_event_id]
    task_queue_data = db.task_queue[task_event_data.task_id]
    container_data = db.container_data[
        task_event_data.cont_id] if task_event_data.cont_id != None else None
    try:
        #Update attention_time for task in the event table
        task_event_data.update_record(attention_time=get_datetime(),
                                      status=TASK_QUEUE_STATUS_PROCESSING)
        #Call the corresponding function from vm_helper
        logger.debug("Starting Container_Task processing...")
        ret = task[task_queue_data.task_type](task_queue_data.parameters)
        logger.debug("Completed Container_Task processing...")

        #On return, update the status and end time in task event table
        task_event_data.update_record(status=ret[0],
                                      message=ret[1],
                                      end_time=get_datetime())

        if ret[0] == TASK_QUEUE_STATUS_FAILED:

            logger.debug("Container_Task FAILED")
            logger.debug("Container_Task Error Message: %s" % ret[1])
            task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)

        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            # Create log event for the task
            logger.debug("Container_Task SUCCESSFUL")
            if container_data:
                _log_vm_event(container_data, task_queue_data)
            # For successful task, delete the task from queue
            if db.task_queue[task_queue_data.id]:
                del db.task_queue[task_queue_data.id]
            if 'request_id' in task_queue_data.parameters:
                del db.request_queue[task_queue_data.parameters['request_id']]

            if task_event_data.task_type not in (VM_TASK_MIGRATE_HOST,
                                                 VM_TASK_MIGRATE_DS):
                _send_cont_task_complete_mail(task_event_data)

    except:
        msg = log_exception()
        task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED,
                                      message=msg)

    finally:
        db.commit()
        logger.info("EXITING Container_Task........\n")
Example #47
0
def download_sample_obect_program():
    file_name = 's3_object_key.txt'
    file_path = os.path.join(get_context_path(), 'private/Object_keys/' + file_name)
    logger.debug(file_path+"\n")
    response.headers['Content-Type'] = "text"
    response.headers['Content-Disposition']="attachment; filename=" +file_name
    try:
        return response.stream(get_file_stream(file_path),chunk_size=4096)
    except Exception:
        session.flash = "Unable to download your Keys."
    redirect(URL(r = request, c = 'user', f = 'list_my_object_store'))
def process_unusedvm():
    logger.info("ENTERING PROCESS UNUSED VM ........")
    try:
        process_shutdown_unusedvm()
        process_purge_shutdownvm()
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PROCESS UNUSED VM......")
Example #49
0
def request_user_vpn():
    var = request_vpn()
    logger.debug("request user vpn var value "+str(var))
    if var== 1 :
        session.flash = "Get your baadalVPN key's  tar file from the Download link given below "

    elif var == 2 :
        session.flash = "Unable to process  your Request. Please contact Baadal Team"
    else :
        session.flash = "You already have VPN files  you can  download it from  given link "
    redirect(URL(r = request, c = 'user', f = 'vpn'))
Example #50
0
def download_sample_obect_program():
    file_name = 's3_object_key.txt'
    file_path = os.path.join(get_context_path(), 'private/Object_keys/' + file_name)
    logger.debug(file_path+"\n")
    response.headers['Content-Type'] = "text"
    response.headers['Content-Disposition']="attachment; filename=" +file_name
    try:
        return response.stream(get_file_stream(file_path),chunk_size=4096)
    except Exception:
        session.flash = "Unable to download your Keys."
    redirect(URL(r = request, c = 'user', f = 'list_my_object_store'))
Example #51
0
def request_user_vpn():
    var = request_vpn()
    logger.debug("request user vpn var value "+str(var))
    if var== 1 :
        session.flash = "Get your baadalVPN key's  tar file from the Download link given below "

    elif var == 2 :
        session.flash = "Unable to process  your Request. Please contact Baadal Team"
    else :
        session.flash = "You already have VPN files  you can  download it from  given link "
    redirect(URL(r = request, c = 'user', f = 'vpn'))
Example #52
0
def show_host_performance():

    host_id = request.args(0)
    host_info = get_host_config(host_id)
    host_identity = str(host_info.host_ip.private_ip).replace('.','_')
    host_cpu=host_info.CPUs

    host_ram=host_info.RAM
    logger.debug(host_cpu)
    logger.debug(type(host_cpu))
    m_type="host"
    return dict(host_id=host_id, host_identity=host_identity ,host_ram=host_ram, m_type=m_type,host_cpu=host_cpu)
Example #53
0
def sync_snapshot():
    task = request.vars['action_type']
    vm_id = request.vars['vm_id']
    logger.debug(request.args)
    if task == 'Delete_Orphan':
        vm_name = request.vars['vm_name']
        snapshot_name = request.vars['snapshot_name']
        delete_orphan_snapshot(vm_name, snapshot_name)
    elif task == 'Delete_Snapshot_Info':
        snapshot_id = request.vars['snapshot_id']
        delete_snapshot_info(snapshot_id)
    redirect(URL(r=request,c='admin',f='snapshot_sanity_check', args = vm_id))
Example #54
0
def get_vm_groupby_organisations() :
    organisations = get_all_organisations()              
    orgvmlist = []
    for org in organisations:    # for each organisations get all the vm's that runs on it and add them to list                          
        vmlist = get_all_vm_oforg(org['id'])
        logger.debug("organisations info............" + str(org))
        orgvms = {'org_id':org['id'],
                   'org_name':org['name'], 
                   'details':vmlist}
        orgvmlist.append(orgvms)    
    logger.debug("organisations list" + str(orgvmlist))
    return (orgvmlist)
Example #55
0
def sync_snapshot():
    task = request.vars['action_type']
    vm_id = request.vars['vm_id']
    logger.debug(request.args)
    if task == 'Delete_Orphan':
        vm_name = request.vars['vm_name']
        snapshot_name = request.vars['snapshot_name']
        delete_orphan_snapshot(vm_name, snapshot_name)
    elif task == 'Delete_Snapshot_Info':
        snapshot_id = request.vars['snapshot_id']
        delete_snapshot_info(snapshot_id)
    redirect(URL(r=request, c='admin', f='snapshot_sanity_check', args=vm_id))
Example #56
0
def get_memhog_usage(host_ip):
    """
    Returns memory used by memhog process so that it can be added to available memory.
    """
    logger.debug("Entering into getmemhog_usage")
    cmd = "output=`ps -ef --sort=start_time | grep 'memhog' | grep -v grep | awk '{print $2}'`;smem -c 'pid pss'| grep $output | awk '{print $2}'"
    output = execute_remote_cmd(host_ip, "root", cmd, None, True)
    if not output:
        return 0
    else:
        #logger.debug("For host:"+str(host_ip)+" PSS Value from memhog="+str(output))
        return (int(output[0]) * 1024)