def process_purge_shutdownvm():

    logger.info("ENTERING PURGE SHUTDOWN VM ........") 
    vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days")

    try:
        # Fetch all the VM's which are locked and whose delete warning date is not null. 
        for vm_data in db(db.vm_data.locked == True and db.vm_data.delete_warning_date!=None).select(db.vm_data.ALL):
            daysDiff=0
            daysDiff=(get_datetime()-vm_data.delete_warning_date).days
            if(daysDiff >=0 ):
                for vm_details in db(db.vm_event_log.vm_id==vm_data.id).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                    daysDiff=(get_datetime()-vm_details.timestamp).days
                    if(vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)):
                        logger.info("Need to delete the VM ID:"+str(vm_data.id)) 
                        add_vm_task_to_queue(vm_data.id,VM_TASK_DELETE)
                        # make an entry in task queue so that scheduler can pick up and delete the VM.
                    else:
                        logger.info("No need to delete the VM ID:"+str(vm_data.id)+" as it is in use now. ")
                        db(db.vm_data.id == vm_details.vm_id).update(locked='F',delete_warning_date=None)
            else:
                logger.info("No need to process shutdown VM :"+str(vm_data.id))
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PURGE SHUTDOWN VM ........")
Exemple #2
0
def processTaskQueue(task_id):
    try:
        process = db.task_queue[task_id]

        task_queue_query = db(db.task_queue.id == task_id)
        task_event_query = db((db.task_queue_event.task_id == task_id) & (
            db.task_queue_event.status != TASK_QUEUE_STATUS_IGNORE))
        #Update attention_time for task in the event table
        task_event_query.update(attention_time=get_datetime())
        #Call the corresponding function from vm_helper
        ret = task[process['task_type']](process['parameters'])
        #On return, update the status and end time in task event table
        task_event_query.update(status=ret[0], end_time=get_datetime())
        if ret[0] == TASK_QUEUE_STATUS_FAILED:
            #For failed task, change task status to Failed, it can be marked for retry by admin later
            task_queue_query.update(status=TASK_QUEUE_STATUS_FAILED)
            #Update task event with the error message
            task_event_query.update(error=ret[1],
                                    status=TASK_QUEUE_STATUS_FAILED)
        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            # For successful task, delete the task from queue
            task_queue_query.delete()
        db.commit()
        logger.debug("Task done")
    except Exception as e:
        logger.error(e)
        etype, value, tb = sys.exc_info()
        msg = ''.join(traceback.format_exception(etype, value, tb, 10))
        db(db.task_queue.id == task_id).update(status=-1)
def processTaskQueue(task_id):
    try:
        process=db.task_queue[task_id] 
        
        task_queue_query = db(db.task_queue.id==task_id)
        task_event_query = db((db.task_queue_event.task_id==task_id) & (db.task_queue_event.status != TASK_QUEUE_STATUS_IGNORE))
        #Update attention_time for task in the event table
        task_event_query.update(attention_time=get_datetime())
        #Call the corresponding function from vm_helper
        ret = task[process['task_type']](process['parameters'])
        #On return, update the status and end time in task event table
        task_event_query.update(status=ret[0], end_time=get_datetime())
        if ret[0] == TASK_QUEUE_STATUS_FAILED:
            #For failed task, change task status to Failed, it can be marked for retry by admin later
            task_queue_query.update(status=TASK_QUEUE_STATUS_FAILED)
            #Update task event with the error message
            task_event_query.update(error=ret[1],status=TASK_QUEUE_STATUS_FAILED)
        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            # For successful task, delete the task from queue 
            task_queue_query.delete()
        db.commit()
        logger.debug("Task done")
    except Exception as e:
        logger.error(e)
        etype, value, tb = sys.exc_info()
        msg=''.join(traceback.format_exception(etype, value, tb, 10))
        db(db.task_queue.id==task_id).update(status=-1)
Exemple #4
0
def process_container_queue(task_event_id):
    """
    Invoked when scheduler runs task of type 'Container_Task'
    For every task, function calls the corresponding handler
    and updates the database on the basis of the response 
    """
    logger.info("\n ENTERING Container_Task........")

    task_event_data = db.task_queue_event[task_event_id]
    task_queue_data = db.task_queue[task_event_data.task_id]
    container_data = db.container_data[
        task_event_data.cont_id] if task_event_data.cont_id != None else None
    try:
        #Update attention_time for task in the event table
        task_event_data.update_record(attention_time=get_datetime(),
                                      status=TASK_QUEUE_STATUS_PROCESSING)
        #Call the corresponding function from vm_helper
        logger.debug("Starting Container_Task processing...")
        ret = task[task_queue_data.task_type](task_queue_data.parameters)
        logger.debug("Completed Container_Task processing...")

        #On return, update the status and end time in task event table
        task_event_data.update_record(status=ret[0],
                                      message=ret[1],
                                      end_time=get_datetime())

        if ret[0] == TASK_QUEUE_STATUS_FAILED:

            logger.debug("Container_Task FAILED")
            logger.debug("Container_Task Error Message: %s" % ret[1])
            task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)

        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            # Create log event for the task
            logger.debug("Container_Task SUCCESSFUL")
            if container_data:
                _log_vm_event(container_data, task_queue_data)
            # For successful task, delete the task from queue
            if db.task_queue[task_queue_data.id]:
                del db.task_queue[task_queue_data.id]
            if 'request_id' in task_queue_data.parameters:
                del db.request_queue[task_queue_data.parameters['request_id']]

            if task_event_data.task_type not in (VM_TASK_MIGRATE_HOST,
                                                 VM_TASK_MIGRATE_DS):
                _send_cont_task_complete_mail(task_event_data)

    except:
        msg = log_exception()
        task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED,
                                      message=msg)

    finally:
        db.commit()
        logger.info("EXITING Container_Task........\n")
Exemple #5
0
def clear_all_timedout_vnc_mappings():
    """
    Deletes all timed-out VNC mappings from NAT
    """    
    nat_type, nat_ip, nat_user = _get_nat_details()

    if nat_type == NAT_TYPE_SOFTWARE:
       
        logger.debug("Clearing all timed out VNC mappings from NAT box %s" %(nat_ip)) 

        # Get all active VNC mappings from DB
        vnc_mappings = current.db((current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & 
                                  (current.db.vnc_access.expiry_time < get_datetime())).select()
        if (vnc_mappings != None) & (len(vnc_mappings) != 0):
            # Delete the VNC mapping from NAT if the duration of access has past its requested time duration
            command = ''
            for mapping in vnc_mappings:
                logger.debug('Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s' %(mapping.vm_id, mapping.host_id, mapping.vnc_server_ip, mapping.vnc_source_port, mapping.vnc_destination_port))
                host_ip = mapping.host_id.host_ip.private_ip
                # Delete rules from iptables on NAT box
                command += '''
                iptables -D PREROUTING -t nat -i %s -p tcp -d %s --dport %s -j DNAT --to %s:%s
                iptables -D FORWARD -p tcp -d %s --dport %s -j ACCEPT''' %(NAT_PUBLIC_INTERFACE, mapping.vnc_server_ip, mapping.vnc_source_port, host_ip, mapping.vnc_destination_port, host_ip, mapping.vnc_destination_port)

                # Update DB for each VNC access
                current.db(current.db.vnc_access.id == mapping.id).update(status=VNC_ACCESS_STATUS_INACTIVE)

            command += '''
                /etc/init.d/iptables-persistent save
                /etc/init.d/iptables-persistent reload
                exit
            ''' 

            current.db.commit()
            execute_remote_bulk_cmd(nat_ip, nat_user, command)
        logger.debug("Done clearing vnc mappings")    
    elif nat_type == NAT_TYPE_HARDWARE:
        # This function is to be implemented
        raise Exception("No implementation for NAT type hardware")
    elif nat_type == NAT_TYPE_MAPPING:
        # This function is to be implemented
        logger.debug('Clearing all timed out VNC mappings') 

        # Get all active VNC mappings from DB
        vnc_mappings = current.db((current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & 
                                  (current.db.vnc_access.expiry_time < get_datetime())).select()
        if (vnc_mappings != None) & (len(vnc_mappings) != 0):

            for mapping in vnc_mappings:
                # Update DB for each VNC access
                current.db(current.db.vnc_access.id == mapping.id).update(status=VNC_ACCESS_STATUS_INACTIVE)
            current.db.commit()
        logger.debug("Done clearing vnc mappings")    
    else:
        raise Exception("NAT type is not supported")
Exemple #6
0
def process_task_queue(task_event_id):
    """
    Invoked when scheduler runs task of type 'vm_task'
    For every task, function calls the corresponding handler
    and updates the database on the basis of the response 
    """
    logger.info("\n ENTERING VM_TASK........")
    
    task_event_data = db.task_queue_event[task_event_id]
    task_queue_data = db.task_queue[task_event_data.task_id]
    vm_data = db.vm_data[task_event_data.vm_id] if task_event_data.vm_id != None else None
    try:
        #Update attention_time for task in the event table
        task_event_data.update_record(attention_time=get_datetime(), status=TASK_QUEUE_STATUS_PROCESSING)
        #Call the corresponding function from vm_helper
        logger.debug("Starting VM_TASK processing...")
        ret = task[task_queue_data.task_type](task_queue_data.parameters)
        logger.debug("Completed VM_TASK processing...")

        #On return, update the status and end time in task event table
        task_event_data.update_record(status=ret[0], message=ret[1], end_time=get_datetime())
        
        if ret[0] == TASK_QUEUE_STATUS_FAILED:

            logger.debug("VM_TASK FAILED")
            logger.debug("VM_TASK Error Message: %s" % ret[1])
            task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)

        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            # Create log event for the task
            logger.debug("VM_TASK SUCCESSFUL")
            if vm_data:
                _log_vm_event(vm_data, task_queue_data)
            # For successful task, delete the task from queue 
            if db.task_queue[task_queue_data.id]:
                del db.task_queue[task_queue_data.id]
            if 'request_id' in task_queue_data.parameters:
                del db.request_queue[task_queue_data.parameters['request_id']]
            
            if task_event_data.task_type not in (VM_TASK_MIGRATE_HOST, VM_TASK_MIGRATE_DS):
                _send_task_complete_mail(task_event_data)
        
    except:
        msg = log_exception()
        task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED, message=msg)
        
    finally:
        db.commit()
        logger.info("EXITING VM_TASK........\n")
Exemple #7
0
def process_object_task(task_event_id):
    """Invoked when scheduler runs task of type 'object_task'
    For every task, function calls the corresponding handler
    and updates the database on the basis of the response """

    logger.info("\n ENTERING OBJECT_TASK	........")
    task_event_data = db.task_queue_event[task_event_id]
    task_queue_data = db.task_queue[task_event_data.task_id]
    object_data = db.object_store_data[task_event_data.parameters[
        'vm_id']] if task_event_data.parameters['vm_id'] != None else None
    try:
        #Update attention_time for task in the event table
        task_event_data.update_record(attention_time=get_datetime(),
                                      status=TASK_QUEUE_STATUS_PROCESSING)
        #Call the corresponding function from vm_helper
        logger.debug("Starting OBJECT_TASK processing...")
        ret = create_object_store(task_event_id, object_data)
        logger.debug("Completed OBJECT_TASK processing...")
        #On return, update the status and end time in task event table
        task_event_data.update_record(status=ret[0],
                                      message=ret[1],
                                      end_time=get_datetime())

        if ret[0] == TASK_QUEUE_STATUS_FAILED:
            logger.debug("OBJECT_TASK FAILED")
            logger.debug("OBJECT_TASK Error Message: %s" % ret[1])
            task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)

        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            # Create log event for the task
            logger.debug("OBJECT_TASK SUCCESSFUL")
            if object_data:
                logger.info("\n object_data: %s" % object_data)
            # For successful task, delete the task from queue
            if db.task_queue[task_queue_data.id]:
                del db.task_queue[task_queue_data.id]
            if 'request_id' in task_queue_data.parameters:
                del db.request_queue[task_queue_data.parameters['request_id']]

            _send_object_task_complete_mail(task_event_data,
                                            object_data['object_store_name'])
    except:
        msg = log_exception()
        task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED,
                                      message=msg)

    finally:
        db.commit()
        logger.info("EXITING OBJECT_TASK........\n")
def clear_all_timedout_vnc_mappings():
    # Get all active VNC mappings from DB
    current.db("FLUSH QUERY CACHE")
    vnc_mappings = current.db(
        (current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)
        & (current.db.vnc_access.expiry_time < get_datetime())).select()
    if (vnc_mappings != None) & (len(vnc_mappings) != 0):

        for mapping in vnc_mappings:
            logger.debug(
                'Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s'
                % (mapping.vm_id, mapping.host_id, mapping.token,
                   mapping.vnc_source_port, mapping.vnc_destination_port))
            f = open("/home/www-data/token.list", "r")
            lines = f.readlines()
            f.close()
            f = open("/home/www-data/token.list", "w")
            token = mapping.token
            logger.debug("token is : " + str(token))
            logger.debug("token type is : " + str(type(token)))
            for line in lines:
                if token not in line:
                    logger.debug("lines are : " + str(line))
                    f.write(line)
            f.close()
            current.db(current.db.vnc_access.id == mapping.id).delete()
            current.db.commit()
            logger.debug("Done clearing novnc mappings")
    else:
        raise Exception("NAT type is not supported")
Exemple #9
0
def send_email_vm_warning(task_type, vm_users, vm_name, vm_shutdown_time):
    vm_action_time = get_datetime() + timedelta(days=20)
    cc_user_list = []
    cc_user_list.append("*****@*****.**")

    for vm_user in vm_users:
        user_info = get_user_details(vm_user)
        if user_info[1] != None:
            context = dict(entityName=vm_name,
                           userName=user_info[0],
                           vmShutdownDate=vm_shutdown_time,
                           vmActionDate=vm_action_time)
            logger.debug("Inside send warning e-mail for vm:" + vm_name +
                         ", userName:"******", vmShutdownDate:" +
                         str(vm_shutdown_time) + ", vmDeleteDate:" +
                         str(vm_action_time))

            if task_type == VM_TASK_WARNING_SHUTDOWN:
                send_email(user_info[1], SHUTDOWN_WARNING_SUBJECT,
                           SHUTDOWN_WARNING_BODY, context, cc_user_list)
            elif task_type == VM_TASK_WARNING_DELETE:
                send_email(user_info[1], DELETE_WARNING_SUBJECT,
                           DELETE_WARNING_BODY, context, cc_user_list)
            else:
                logger.debug("Not a valid task type")

    return vm_action_time
def clear_all_timedout_vnc_mappings():
    # Get all active VNC mappings from DB
        current.db("FLUSH QUERY CACHE")
        vnc_mappings = current.db((current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE) & 
                                  (current.db.vnc_access.expiry_time < get_datetime())).select()
        if (vnc_mappings != None) & (len(vnc_mappings) != 0):

            for mapping in vnc_mappings: 
                logger.debug('Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s' %(mapping.vm_id, mapping.host_id, mapping.token, mapping.vnc_source_port, mapping.vnc_destination_port))
                f = open("/home/www-data/token.list","r")
                lines = f.readlines()
                f.close()
                f = open("/home/www-data/token.list","w")
                token = mapping.token
                logger.debug("token is : " + str(token))
                logger.debug("token type is : " + str(type(token)))
                for line in lines:
                    if token  not  in line:
                        logger.debug("lines are : "  + str(line))
                        f.write(line)
                f.close()
                current.db(current.db.vnc_access.id == mapping.id).delete()
                current.db.commit()
                logger.debug("Done clearing novnc mappings")    
        else:
            raise Exception("NAT type is not supported")
 def __init__(self, system):
     self.active = get_boolean(system.IsActive)
     self.configuration = get_string(system.Digital3DConfiguration)
     self.install_date = get_datetime(system.InstallDate)
     self.screen_color = get_string(system.ScreenColor) # enum
     self.screen_luminance = get_uint(system.ScreenLuminance) # 1 to 29
     self.ghostbusting = get_boolean(system.Ghostbusting)
     self.ghostbusting_configuration = get_string(system.GhostbustingConfiguration)
 def __init__(self, software):
     self.kind = get_string(software.SoftwareKind)  # enum
     self.producer = get_string(software.SoftwareProducer)
     self.description = get_string(software.Description)
     self.version = get_string(software.Version)
     self.filename = get_string(software.FileName)
     self.file_size = get_uint(software.FileSize)
     self.file_time = get_datetime(software.FileDateTime)
 def __init__(self, software):
     self.kind = get_string(software.SoftwareKind) # enum
     self.producer = get_string(software.SoftwareProducer)
     self.description = get_string(software.Description)
     self.version = get_string(software.Version)
     self.filename = get_string(software.FileName)
     self.file_size = get_uint(software.FileSize)
     self.file_time = get_datetime(software.FileDateTime)
 def __init__(self, system):
     self.active = get_boolean(system.IsActive)
     self.configuration = get_string(system.Digital3DConfiguration)
     self.install_date = get_datetime(system.InstallDate)
     self.screen_color = get_string(system.ScreenColor)  # enum
     self.screen_luminance = get_uint(system.ScreenLuminance)  # 1 to 29
     self.ghostbusting = get_boolean(system.Ghostbusting)
     self.ghostbusting_configuration = get_string(
         system.GhostbustingConfiguration)
Exemple #15
0
def add_to_cost(vm_id):
    vm = db.vm_data[vm_id]

    oldtime = vm.start_time
    newtime = get_datetime()
    
    if(oldtime==None):oldtime=newtime
    #Calculate hour difference between start_time and current_time
    hours  = ((newtime - oldtime).total_seconds()) / 3600
    
    if(vm.current_run_level == 0): scale = 0
    elif(vm.current_run_level == 1): scale = 1
    elif(vm.current_run_level == 2): scale = .5
    elif(vm.current_run_level == 3): scale = .25

    totalcost = float(hours*(vm.vCPU*float(COST_CPU) + vm.RAM*float(COST_RAM)/1024)*float(COST_SCALE)*float(scale)) + float(vm.total_cost)
    totalcost = round(totalcost,2)
    db(db.vm_data.id == vm_id).update(start_time=get_datetime(),total_cost=totalcost)
    return totalcost
    def __init__(self, device):
        self.type = get_string(device.DeviceTypeID)
        self.id = get_string(device.DeviceIdentifier)
        self.serial = get_string(device.DeviceSerial)

        self.manufacturer_id = None
        if device.ManufacturerID:
            self.manufacturer_id = device.ManufacturerID.get_text().split(
                u":", 2)[2]
        self.manufacturer_name = get_string(device.ManufacturerName)

        self.model_number = get_string(device.ModelNumber)
        self.install_date = get_datetime(device.InstallDate)
        self.resolution = get_string(device.Resolution)
        self.active = get_boolean(device.IsActive)

        self.integrator = get_string(device.Integrator)
        self.vpf_finance_entity = get_string(device.VPFFinanceEntity)
        self.vpf_start_date = None
        if device.VPFStartDate:
            self.vpf_start_date = get_date(device.VPFStartDate)

        self.ip_addresses = []
        if device.IPAddressList:
            self.ip_addresses = [
                IPAddress(ip_address)
                for ip_address in device.IPAddressList(u"IPAddress")
            ]

        self.software = []
        if device.SoftwareList:
            self.software = [
                Software(program)
                for program in device.SoftwareList(u"Software")
            ]

        self.certificates = []
        if device.KeyInfoList:
            self.certificates = [
                Certificate(certificate)
                for certificate in device.KeyInfoList(u"X509Data")
            ]

        self.watermarking = []
        if device.WatermarkingList:
            self.watermarking = [
                Watermarking(watermark)
                for watermark in device.WatermarkingList(u"Watermarking")
            ]

        self.kdm_deliveries = deliveries(device.KDMDeliveryMethodList)
        self.dcp_deliveries = deliveries(device.DCPDeliveryMethodList)
    def add(self):
        files = filedialog.askopenfilenames(title='Choose file/s to load')
        for _file in files:
            dtime = get_datetime(_file)
            self.data[dtime] = {}
            self.data[dtime]["filename"] = _file
            self.data[dtime]["range"] = [1, 16]  # arena indices

        self.lb.delete(0, END)
        sortkeys = sorted(self.data.keys())
        for key in sortkeys:
            self.lb.insert(END, key)
        self.update()
Exemple #18
0
    def add(self):
        files = filedialog.askopenfilenames(title='Choose file/s to load')
        for _file in files:
            dtime = get_datetime(_file)
            self.data[dtime] = {}
            self.data[dtime]["filename"] = _file
            self.data[dtime]["length"] = 10. * get_data_len(
                _file)  # in millisecs

        self.lb.delete(0, END)
        sortkeys = sorted(self.data.keys())
        for key in sortkeys:
            self.lb.insert(END, key)
        self.update()
Exemple #19
0
def grant_novnc_access(vm_id):

    msg = ""
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (
        db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()

    if active_vnc > 0:
        vm_data = db(db.vnc_access.vm_id == vm_id).select().first()
        token = vm_data.token
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id)
                       & (db.vnc_access.time_requested >
                          (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY:
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                f = os.popen('openssl rand -hex 10')
                token = f.read()
                token = token.split("\n")
                token = token[0]
                create_novnc_mapping(vm_id, token)
                vm_data = db(db.vm_data.id == vm_id).select().first()
                host_ip = vm_data.host_id.host_ip.private_ip
                vnc_port = vm_data.vnc_port
                vnc = str(vnc_port)
                file_token = str(token) + ":" + " " + str(host_ip) + ":" + str(
                    vnc) + "\n"
                myfile = get_file_append_mode("/home/www-data/token.list")
                myfile.write(file_token)
                command = "ps -ef | grep websockify|awk '{print $2}'"
                port = config.get("NOVNC_CONF", "port")
                server_ip = config.get("NOVNC_CONF", "server_ip")
                return_value = execute_remote_cmd(server_ip, 'root', command)
                return_value = return_value.split()
                if len(return_value) <= 2:
                    command = "./noVNC/utils/websockify/run --web /root/noVNC --target-config /home/www-data/token.list " + str(
                        server_ip) + ":" + str(port) + " > /dev/null 2>&1 &"
                    return_value = execute_remote_cmd(server_ip, 'root',
                                                      command)
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                logger.debug('Some Error Occurred. Please try later')
                log_exception()
                pass

    logger.debug(msg)
    return token
Exemple #20
0
def send_email_delete_vm_warning(vm_users, vm_name, vm_shutdown_time):
    vm_delete_time = get_datetime() + timedelta(days=15)
    for vm_user in vm_users:
        user_info = get_user_details(vm_user)
        if user_info[1] != None:
            context = dict(vmName=vm_name,
                           userName=user_info[0],
                           vmShutdownDate=vm_shutdown_time,
                           vmDeleteDate=vm_delete_time)
            logger.debug("Inside send mail delete vm warning function:" +
                         vm_name + ", userName:"******", vmShutdownDate:" + str(vm_shutdown_time) +
                         ", vmDeleteDate:" + str(vm_delete_time))
        send_email(user_info[1], DELETE_WARNING_SUBJECT, DELETE_WARNING_BODY,
                   context)
        return vm_delete_time
    def __init__(self, auditorium):
        self.number = get_uint(auditorium.AuditoriumNumber)
        self.name = get_string(auditorium.AuditoriumName)

        self.supports_35mm = get_boolean(auditorium.Supports35MM)
        self.screen_aspect_ratio = get_string(auditorium.ScreenAspectRatio) # enum
        self.adjustable_screen_mask = get_string(auditorium.AdjustableScreenMask) # enum
        self.audio_format = get_string(auditorium.AudioFormat)
        self.install_date = get_datetime(auditorium.AuditoriumInstallDate)
        self.large_format_type = get_string(auditorium.LargeFormatType)

        self.digital_3d_system = None
        if auditorium.Digital3DSystem:
            self.digital_3d_system = Digital3DSystem(auditorium.Digital3DSystem)

        self.devices = [Device(device) for device in auditorium.DeviceGroupList(u"Device")]
Exemple #22
0
def process_unusedvm_purge():

    logger.info("ENTERING PURGE UNUSED VM ........") 

    try:
        # Fetch all the VM's which are locked and whose delete warning date=today. 
        for vm_data in db(db.vm_data.locked == True).select(db.vm_data.ALL):
            for vm_details in db(db.vm_event_log.vm_id==vm_data.id).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                daysDiff=(get_datetime()-vm_data.delete_warning_date).days
                if(vm_details.new_value == "Shutdown" and daysDiff >= 0):
                    logger.info("Need to delete the VM ID:"+str(vm_data.id)) 
                    add_vm_task_to_queue(vm_data.id,VM_TASK_DELETE)
                    # make an entry in task queue so that scheduler can pick up and delete the VM.
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING PURGE UNUSED VM ........")
def process_sendwarning_shutdownvm():

    logger.info("Entering Process send warning mail to shutdown vm........")

    try:
        vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days")
        send_email=0

        for vm_id in db().select(db.vm_event_log.vm_id, distinct=True):
            for vm_details in db(db.vm_event_log.vm_id==vm_id['vm_id']).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                daysDiff=(get_datetime()-vm_details.timestamp).days
                vm_shutdown_time=vm_details.timestamp

                logger.info("VM details are VM_ID:" + str(vm_details['vm_id'])+ "|ID:"+str(vm_details['id'])+"|new_values is:"+str(vm_details['new_value'])+"|daysDiff:" + str(daysDiff)+"|vmShutDownDays:"+vmShutDownDays+"|vm_shutdown_time :"+str(vm_shutdown_time))

                if (vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)):
                    vm_users = []
                    vm_name  = ""

                    for user in db((db.user_vm_map.vm_id == vm_details.vm_id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.locked != True) & (db.vm_data.delete_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name):
                        send_email=1
                        vm_users.append(user.user_vm_map.user_id)
                        vm_name=user.vm_data.vm_name

                    if (send_email == 1):
                        vm_delete_time=send_email_vm_warning(VM_TASK_WARNING_DELETE,vm_users,vm_name,vm_shutdown_time)
                        logger.debug("Mail sent for vm_id:"+str(vm_details.vm_id)+"|vm_name:"+str(vm_name)+"|delete time:"+ str(vm_delete_time))
                        db(db.vm_data.id == vm_details.vm_id).update(locked=True, delete_warning_date=vm_delete_time) 
                        send_email=0
                    else:
                        logger.debug("Email has already been sent to VM_ID:"+str(vm_details.vm_id))

                else:
                    logger.info("VM:"+str(vm_details.vm_id)+" is not shutdown for: "+str(vmShutDownDays)+"(configured) days")


    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING Send warning to shutdown vm........")
    def __init__(self, auditorium):
        self.number = get_uint(auditorium.AuditoriumNumber)
        self.name = get_string(auditorium.AuditoriumName)

        self.supports_35mm = get_boolean(auditorium.Supports35MM)
        self.screen_aspect_ratio = get_string(
            auditorium.ScreenAspectRatio)  # enum
        self.adjustable_screen_mask = get_string(
            auditorium.AdjustableScreenMask)  # enum
        self.audio_format = get_string(auditorium.AudioFormat)
        self.install_date = get_datetime(auditorium.AuditoriumInstallDate)
        self.large_format_type = get_string(auditorium.LargeFormatType)

        self.digital_3d_system = None
        if auditorium.Digital3DSystem:
            self.digital_3d_system = Digital3DSystem(
                auditorium.Digital3DSystem)

        self.devices = [
            Device(device) for device in auditorium.DeviceGroupList(u"Device")
        ]
Exemple #25
0
def task_timeout_cleanup(task_event_id, scheduler_row):

    logger.debug("cleaning up for " + scheduler_row.status + " task: " +
                 str(task_event_id))
    task_event_data = db.task_queue_event[task_event_id]
    task_queue_data = db.task_queue[task_event_data.task_id]

    if task_queue_data.status == TASK_QUEUE_STATUS_PENDING:
        #On return, update the status and end time in task event table
        msg = ""
        if scheduler_row.status == 'TIMEOUT':
            msg = "Task Timeout "  # + task_event_data['message']
        elif scheduler_row.status == 'FAILED':
            rows = db(db.scheduler_run.task_id == scheduler_row.id).select()
            rows.sort(lambda row: row.stop_time, reverse=True)
            msg = rows.first().traceback

        task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED,
                                      message=msg,
                                      end_time=get_datetime())
        task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)
Exemple #26
0
def process_vmdaily_checks():
    """
    Function will check for the shutdown VM's and sends email to the user"""
    
    logger.info("Entering VM's Daily Checks........")

    try: 
        vmShutDownDays = config.get("GENERAL_CONF", "shutdown_vm_days") 
        send_email=0

        for vm_id in db().select(db.vm_event_log.vm_id, distinct=True): 
            for vm_details in db(db.vm_event_log.vm_id==vm_id['vm_id']).select(db.vm_event_log.ALL,orderby = ~db.vm_event_log.id,limitby=(0,1)):
                daysDiff=(get_datetime()-vm_details.timestamp).days
                vm_shutdown_time=vm_details.timestamp

                logger.info("VM details are VM_ID:" + str(vm_details['vm_id'])+ "|ID:"+str(vm_details['id'])+"|new_values is:"+str(vm_details['new_value'])+"|daysDiff:" + str(daysDiff)+"|vmShutDownDays:"+vmShutDownDays+"|vm_shutdown_time :"+str(vm_shutdown_time))

                if (vm_details.new_value == "Shutdown" and int(daysDiff)>=int(vmShutDownDays)):
                    vm_users = []
                    vm_name  = ""

                    for user in db((db.user_vm_map.vm_id == vm_details.vm_id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.locked !='T') & (db.vm_data.delete_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name): 
                        send_email=1
                        vm_users.append(user.user_vm_map.user_id) 
                        vm_name=user.vm_data.vm_name
                   
                    if (send_email == 1):
                        vm_delete_time = send_email_delete_vm_warning(vm_users,vm_name,vm_shutdown_time) 
                        logger.debug("Mail sent for vm_name:"+str(vm_name)+"|delete time returned from the function:"+ str(vm_delete_time)) 
                        db(db.vm_data.id == vm_details.vm_id).update(locked=True, delete_warning_date=vm_delete_time) 
                    else:
                        logger.debug("Email has already been sent to VM_ID:"+str(vm_details.vm_id))
                else:
                    logger.info("VM:"+str(vm_details.vm_id)+" is not shutdown ..") 
    except:
        log_exception()
        pass
    finally: 
        db.commit()
        logger.debug("EXITING VM DAILY CHECKS........")
    def __init__(self, device):
        self.type = get_string(device.DeviceTypeID)
        self.id = get_string(device.DeviceIdentifier)
        self.serial = get_string(device.DeviceSerial)

        self.manufacturer_id = None
        if device.ManufacturerID:
            self.manufacturer_id = device.ManufacturerID.get_text().split(u":", 2)[2]
        self.manufacturer_name = get_string(device.ManufacturerName)

        self.model_number = get_string(device.ModelNumber)
        self.install_date = get_datetime(device.InstallDate)
        self.resolution = get_string(device.Resolution)
        self.active = get_boolean(device.IsActive)

        self.integrator = get_string(device.Integrator)
        self.vpf_finance_entity = get_string(device.VPFFinanceEntity)
        self.vpf_start_date = None
        if device.VPFStartDate:
            self.vpf_start_date = get_date(device.VPFStartDate)

        self.ip_addresses = []
        if device.IPAddressList:
            self.ip_addresses = [IPAddress(ip_address) for ip_address in device.IPAddressList(u"IPAddress")]

        self.software = []
        if device.SoftwareList:
            self.software = [Software(program) for program in device.SoftwareList(u"Software")]

        self.certificates = []
        if device.KeyInfoList:
            self.certificates = [Certificate(certificate) for certificate in device.KeyInfoList(u"X509Data")]

        self.watermarking = []
        if device.WatermarkingList:
            self.watermarking = [Watermarking(watermark) for watermark in device.WatermarkingList(u"Watermarking")]

        self.kdm_deliveries = deliveries(device.KDMDeliveryMethodList)
        self.dcp_deliveries = deliveries(device.DCPDeliveryMethodList)
    def __init__(self, xml):
        """Parses an XML sitelist, and constructs a container holding the the XML document's data.

        :param string xml: Either the contents of an XML file, or a file handle.
            This will parse the contents and construct ``sites``.
        :param boolean validate: Defaults to true. If set, will validate the given
            XML file against the Sitelist XML Schema xsd file, as found on the `FLM-x Homepage`.

        """

        #If it's a file, we call .read() on it so that it can be consumed twice - once by XMLValidator, and once by
        #beautiful soup
        if not (isinstance(xml, str) or isinstance(xml, unicode)):
            try:
                xml = xml.read()
            except AttributeError as e:
                _logger.critical(repr(e))
                raise FlmxCriticalError(repr(e))

        validate_XML(
            xml,
            os.path.join(os.path.dirname(__file__), u'schema',
                         u'schema_sitelist.xsd'))

        soup = BeautifulSoup(xml, u"xml")

        self.originator = soup.SiteList.Originator.string
        self.system_name = soup.SiteList.SystemName.string
        facilities = []
        for facility in soup.find_all(u'Facility'):
            facLink = FacilityLink()
            facLink.id_code = facility[u'id']
            # strip the timezone from the ISO timecode
            facLink.last_modified = get_datetime(facility[u'modified'])
            facLink.xlink_href = facility[u'xlink:href']
            facLink.xlink_type = facility[u'xlink:type']

            facilities.append(facLink)
        self.facilities = sorted(facilities, key=attrgetter(u'last_modified'))
Exemple #29
0
def send_email_vm_warning(task_type,vm_users,vm_name,vm_shutdown_time):
    vm_action_time= get_datetime() + timedelta(days=20)   
    cc_user_list=[]
    cc_user_list.append("*****@*****.**")

    for vm_user in vm_users:
        user_info = get_user_details(vm_user)
        if user_info[1] != None:
            context = dict(vmName = vm_name, 
                           userName = user_info[0],
                           vmShutdownDate=vm_shutdown_time,
                           vmActionDate=vm_action_time)
            logger.debug("Inside send warning e-mail for vm:" +vm_name+ ", userName:"******", vmShutdownDate:" + str(vm_shutdown_time) + ", vmDeleteDate:" + str(vm_action_time))

            if task_type == VM_TASK_WARNING_SHUTDOWN:
                send_email(user_info[1],SHUTDOWN_WARNING_SUBJECT,SHUTDOWN_WARNING_BODY,context,cc_user_list) 
            elif task_type == VM_TASK_WARNING_DELETE: 
                send_email(user_info[1],DELETE_WARNING_SUBJECT,DELETE_WARNING_BODY,context,cc_user_list) 
            else:
                logger.debug("Not a valid task type")

    return vm_action_time
def process_shutdown_unusedvm():
   
    logger.info("ENTERING SHUTDOWN UNUSED VM ........")

    try:
        # Fetch all the VM's which are locked and whose shutdown_warning_date=today. 
        vmCPUThreshold  = config.get("GENERAL_CONF", "cpu_threshold_limit")
        vmreadThreshold = config.get("GENERAL_CONF", "nwRead_threshold_limit")
        vmwriteThreshold = config.get("GENERAL_CONF", "nwWrite_threshold_limit")

        thresholdcontext = dict(CPUThreshold=vmCPUThreshold,
                                ReadThreshold=vmreadThreshold,
                                WriteThreshold=vmwriteThreshold)

        for vmData in db(db.vm_data.shutdown_warning_date!=None).select(db.vm_data.ALL):
            daysDiff=(get_datetime()-vmData.shutdown_warning_date).days
            if(daysDiff >= 0):
                '''Again compare the data for last 20 days from rrd logs '''
                retVal=compare_rrd_data_with_threshold(vmData.vm_identity,thresholdcontext) 
                logger.info(" DaysDiff are "+str(daysDiff)+" return value is "+str(retVal))
                if(retVal == True):
                    logger.info("Need to shutdown the VM ID:"+str(vmData.id))
                    add_vm_task_to_queue(vmData.id,VM_TASK_DESTROY)
                    # make an entry in task queue so that scheduler can pick up and shutdown the VM.
                else:
                    logger.info("No Need to shutdown the VM ID:"+str(vmData.id)+" as VM is in use now. ")
             
                #update db to clean the shutdown warning date 
                db(db.vm_data.id == vmData.id).update(shutdown_warning_date=None)
            else:
                logger.info("No need to process purge for the VM:"+str(vmData.id))
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING SHUTDOWN UNUSED VM ........")
Exemple #31
0
def grant_vnc_access(vm_id):
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (
        db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()
    if active_vnc > 0:
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id)
                       & (db.vnc_access.time_requested >
                          (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY:
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                create_vnc_mapping_in_nat(vm_id)

                vnc_info = db((db.vnc_access.vm_id == vm_id)
                              & (db.vnc_access.status ==
                                 VNC_ACCESS_STATUS_ACTIVE)).select()
                if vnc_info:
                    vm_users = []
                    for user in db(db.user_vm_map.vm_id == vm_id).select(
                            db.user_vm_map.user_id):
                        vm_users.append(user['user_id'])

                    send_email_vnc_access_granted(vm_users,
                                                  vnc_info[0].vnc_server_ip,
                                                  vnc_info[0].vnc_source_port,
                                                  vnc_info[0].vm_id.vm_name,
                                                  vnc_info[0].time_requested)
                else:
                    raise
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                msg = 'Some Error Occurred. Please try later'
                log_exception()
                pass
    return msg
Exemple #32
0
    def __init__(self, xml):
        """Parses an XML sitelist, and constructs a container holding the the XML document's data.

        :param string xml: Either the contents of an XML file, or a file handle.
            This will parse the contents and construct ``sites``.
        :param boolean validate: Defaults to true. If set, will validate the given
            XML file against the Sitelist XML Schema xsd file, as found on the `FLM-x Homepage`.

        """

        #If it's a file, we call .read() on it so that it can be consumed twice - once by XMLValidator, and once by
        #beautiful soup
        if not (isinstance(xml, str) or isinstance(xml, unicode)):
            try:
                xml = xml.read()
            except AttributeError as e:
                _logger.critical(repr(e))
                raise FlmxCriticalError(repr(e))

        validate_XML(xml, os.path.join(os.path.dirname(__file__), os.pardir, u'schema', u'flmx', u'schema_sitelist.xsd'))

        soup = BeautifulSoup(xml, u"xml")

        self.originator = soup.SiteList.Originator.string
        self.system_name = soup.SiteList.SystemName.string
        facilities = []
        for facility in soup.find_all(u'Facility'):
            facLink = FacilityLink()
            facLink.id_code = facility[u'id']
            # strip the timezone from the ISO timecode
            facLink.last_modified = get_datetime(facility[u'modified'])
            facLink.xlink_href = facility[u'xlink:href']
            facLink.xlink_type = facility[u'xlink:type']

            facilities.append(facLink)
        self.facilities = sorted(facilities, key=attrgetter(u'last_modified'))
Exemple #33
0
    Field('vm_identity', 'string', length = 100, notnull = True, unique = True),
    Field('host_id', db.host),
    Field('RAM', 'integer', label='RAM(MB)'),
    Field('HDD', 'integer', label='HDD(GB)'),
    Field('extra_HDD', 'integer', label='Extra HDD(GB)'),
    Field('vCPU', 'integer', label='CPUs'),
    Field('template_id', db.template),
    Field('requester_id',db.user, label='Requester'),
    Field('owner_id', db.user, label='Owner'),
    Field('private_ip', db.private_ip_pool, label='Private IP'),
    Field('public_ip', db.public_ip_pool, label='Public IP'),
    Field('vnc_port', 'integer'),
    Field('datastore_id', db.datastore),
    Field('purpose', 'string', length = 512),
    Field('expiry_date', 'date'),
    Field('start_time', 'datetime', default = get_datetime()),
    Field('parent_id', 'reference vm_data'),
    Field('locked', 'boolean', default = False),
    Field('security_domain', db.security_domain),
    Field('status', 'integer', represent=lambda x, row: get_vm_status(x)),
    Field('snapshot_flag', 'integer', default = 0),
    Field('saved_template', db.template),
    Field('delete_warning_date', 'datetime'))

db.vm_data.purpose.widget=SQLFORM.widgets.text.widget
db.vm_data.public_ip.requires = IS_EMPTY_OR(IS_IN_DB(db, 'public_ip_pool.id', '%(public_ip)s', zero=None))
db.vm_data.private_ip.requires = IS_EMPTY_OR(IS_IN_DB(db, 'private_ip_pool.id', '%(private_ip)s', zero=None))

db.define_table('request_queue',
    Field('vm_name', 'string', length = 100, notnull = True, label='VM Name'),
    Field('parent_id', 'reference vm_data'),
Exemple #34
0
    Field('extra_HDD', 'integer'),
    Field('vCPU', 'integer'),
    Field('template_id', db.template),
    Field('requester_id',db.user),
    Field('owner_id', db.user),
    Field('vm_ip', 'string',length = 15),
    Field('vnc_port', 'integer'),
    Field('mac_addr', 'string',length = 100),
    Field('datastore_id', db.datastore),
    Field('purpose', 'text'),
    Field('expiry_date', 'date'),
    Field('total_cost', 'float', default = 0),
    Field('current_run_level', 'integer', default = 0),
    Field('last_run_level', 'integer'),
    Field('next_run_level', 'integer'),
    Field('start_time', 'datetime', default = get_datetime()),
    Field('parent_name', 'string'),
    Field('locked', 'boolean', default = False),
    Field('status', 'integer'))

db.define_table('user_vm_map',
    Field('user_id', db.user),
    Field('vm_id', db.vm_data),
    primarykey = ['user_id', 'vm_id'])

db.define_table('vm_data_event',
    Field('vm_id', 'integer'),
    Field('vm_name', 'string',length = 512,notnull = True),
    Field('host_id', db.host),
    Field('RAM', 'integer'),
    Field('HDD', 'integer'),
def process_clone_task(task_event_id, vm_id):
    """
    Invoked when scheduler runs task of type 'clone_task'
    When multiple clones of a VM is requested, multiple tasks are created in scheduler,
    so that they can run concurrently. 
    This function ensures that the status of clone request is updated on the basis of all 
    corresponding asynchronous tasks """

    vm_data = db.vm_data[vm_id]
    logger.debug("ENTERING CLONE_TASK.......")
    logger.debug("Task Id: %s" % task_event_id)
    logger.debug("VM to be Cloned: %s" % vm_data.vm_name)
    task_event = db.task_queue_event[task_event_id]
    task_queue = db.task_queue[task_event.task_id]
    message = task_event.message if task_event.message != None else ''
    try:
        # Update attention time for first clone task
        if task_event.attention_time == None:
            task_event.update_record(attention_time=get_datetime())
        logger.debug("Starting VM Cloning...")
        ret = task[VM_TASK_CLONE](vm_id)
        logger.debug("Completed VM Cloning...")

        if ret[0] == TASK_QUEUE_STATUS_FAILED:
            logger.debug("VM Cloning Failed")
            logger.debug("Failure Message: %s" % ret[1])
            message = message + '\n' + vm_data.vm_name + ': ' + ret[1]
            vm_data.update_record(status = VM_STATUS_UNKNOWN)
        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            logger.debug("VM Cloning Successful")
            params = task_queue.parameters
            # Delete successful vms from list. So, that in case of retry, only failed requests are retried.
            params['clone_vm_id'].remove(vm_id)
            task_queue.update_record(parameters=params)
        
        clone_vm_list = task_event.parameters['clone_vm_id']
        # Remove VM id from the list. This is to check if all the clones for the task are processed.
        clone_vm_list.remove(vm_id)
        
        # Find the status of all clone tasks combined
        current_status = ret[0]
        if task_event.status != TASK_QUEUE_STATUS_PENDING and task_event.status != current_status:
            current_status = TASK_QUEUE_STATUS_PARTIAL_SUCCESS
        
        if not clone_vm_list: #All Clones are processed
            if current_status == TASK_QUEUE_STATUS_SUCCESS:
                del db.request_queue[task_queue.parameters['request_id']]
                del db.task_queue[task_queue.id]
            else:
                if 'request_id' in task_queue.parameters:
                    db.request_queue[task_queue.parameters['request_id']] = dict(status = REQ_STATUS_FAILED)
                task_queue.update_record(status=current_status)
            task_event.update_record(status=current_status, message=message, end_time=get_datetime())
        else:
            task_event.update_record(parameters={'clone_vm_id' : clone_vm_list}, status=current_status, message=message)

    except:
        msg = log_exception()
        vm_data = db.vm_data[vm_id]
        message = message + '\n' + vm_data.vm_name + ': ' + msg
        task_event.update_record(status=TASK_QUEUE_STATUS_FAILED, message=message)

    finally:
        db.commit()
        logger.debug("EXITING CLONE_TASK........")
def grant_novnc_access(vm_id):
    
    msg = ""
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()
    
    if active_vnc > 0:
        vm_data = db(db.vnc_access.vm_id == vm_id).select().first() 
        token = vm_data.token
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY :
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                f = os.popen('openssl rand -hex 10')
                token = f.read()
                token = token.split("\n")
                token=token[0]
                create_novnc_mapping(vm_id,token)
                vm_data = db(db.vm_data.id == vm_id).select().first()
                host_ip = vm_data.host_id.host_ip.private_ip
                vnc_port = vm_data.vnc_port
                vnc = str(vnc_port)
                file_token =str(token) +":" + " "  + str(host_ip)+ ":" + str(vnc) + "\n"
                myfile=get_file_append_mode("/home/www-data/token.list")
                myfile.write(file_token)
                command = "ps -ef | grep websockify|awk '{print $2}'"
                port = config.get("NOVNC_CONF","port")
                server_ip = config.get("NOVNC_CONF","server_ip")
                return_value = execute_remote_cmd(server_ip, 'root',command)
                return_value=return_value.split()
                if len(return_value) <=2:                 
                    command = "./noVNC/utils/websockify/run --web /root/noVNC --target-config /home/www-data/token.list " +str(server_ip)+ ":"+str(port) + " > /dev/null 2>&1 &" 
                    return_value = execute_remote_cmd(server_ip, 'root',command)
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                logger.debug('Some Error Occurred. Please try later')
                log_exception()
                pass

    logger.debug(msg)
    return token
Exemple #37
0
def task_timeout_cleanup(task_event_id, scheduler_row):

    logger.debug("cleaning up for " + scheduler_row.status + " task: " + str(task_event_id))
    task_event_data = db.task_queue_event[task_event_id]
    task_queue_data = db.task_queue[task_event_data.task_id]

    if task_queue_data.status == TASK_QUEUE_STATUS_PENDING:
        # On return, update the status and end time in task event table
        msg = ""
        if scheduler_row.status == "TIMEOUT":
            msg = "Task Timeout "  # + task_event_data['message']
        elif scheduler_row.status == "FAILED":
            rows = db(db.scheduler_run.task_id == scheduler_row.id).select()
            rows.sort(lambda row: row.stop_time, reverse=True)
            msg = rows.first().traceback

        task_event_data.update_record(status=TASK_QUEUE_STATUS_FAILED, message=msg, end_time=get_datetime())
        task_queue_data.update_record(status=TASK_QUEUE_STATUS_FAILED)
def clear_all_timedout_vnc_mappings():

    nat_type, nat_ip, nat_user = get_nat_details()

    if nat_type == NAT_TYPE_SOFTWARE:

        logger.debug("Clearing all timed out VNC mappings from NAT box %s" %
                     (nat_ip))

        # Get all active VNC mappings from DB
        vnc_mappings = current.db(
            (current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)
            & (current.db.vnc_access.expiry_time < get_datetime())).select()
        if (vnc_mappings != None) & (len(vnc_mappings) != 0):
            # Delete the VNC mapping from NAT if the duration of access has past its requested time duration
            command = ''
            for mapping in vnc_mappings:
                logger.debug(
                    'Removing VNC mapping for vm id: %s, host: %s, source IP: %s, source port: %s, destination port: %s'
                    % (mapping.vm_id, mapping.host_id, mapping.vnc_server_ip,
                       mapping.vnc_source_port, mapping.vnc_destination_port))
                host_ip = mapping.host_id.host_ip.private_ip
                # Delete rules from iptables on NAT box
                command += '''
                iptables -D PREROUTING -t nat -i %s -p tcp -d %s --dport %s -j DNAT --to %s:%s
                iptables -D FORWARD -p tcp -d %s --dport %s -j ACCEPT''' % (
                    NAT_PUBLIC_INTERFACE, mapping.vnc_server_ip,
                    mapping.vnc_source_port, host_ip,
                    mapping.vnc_destination_port, host_ip,
                    mapping.vnc_destination_port)

                # Update DB for each VNC access
                current.db(current.db.vnc_access.id == mapping.id).update(
                    status=VNC_ACCESS_STATUS_INACTIVE)

            command += '''
                /etc/init.d/iptables-persistent save
                /etc/init.d/iptables-persistent reload
                exit
            '''

            current.db.commit()
            execute_remote_bulk_cmd(nat_ip, nat_user, command)
        logger.debug("Done clearing vnc mappings")
    elif nat_type == NAT_TYPE_HARDWARE:
        # This function is to be implemented
        raise Exception("No implementation for NAT type hardware")
    elif nat_type == NAT_TYPE_MAPPING:
        # This function is to be implemented
        logger.debug('Clearing all timed out VNC mappings')

        # Get all active VNC mappings from DB
        vnc_mappings = current.db(
            (current.db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)
            & (current.db.vnc_access.expiry_time < get_datetime())).select()
        if (vnc_mappings != None) & (len(vnc_mappings) != 0):

            for mapping in vnc_mappings:
                # Update DB for each VNC access
                current.db(current.db.vnc_access.id == mapping.id).update(
                    status=VNC_ACCESS_STATUS_INACTIVE)
            current.db.commit()
        logger.debug("Done clearing vnc mappings")
    else:
        raise Exception("NAT type is not supported")
                                     icon='warning')
    deffile = "E:/Dennis/Google Drive/PhD Project/Data/Jan-Feb/fulllist.txt"
    if askload == 'yes':
        deffile = filedialog.askopenfilename(title='Choose defaults to load')
        print(deffile)
    with open(deffile) as f:
        for line in f:
            symb = "'"
            st = line.find(symb) + 1
            en = line.rfind(symb) - 4  ## removes 8dD
            deflist.append(line[st:en])

    files = filedialog.askopenfilenames(title='Choose file/s to load')
    counter = 0
    for _file in files:
        dtime = get_datetime(_file)
        strtime = dtime.strftime("%y-%m-%dT%H:%M:%S")
        conditions[strtime] = {}
        dictf = conditions[strtime]
        dictf["filename"] = _file
        done = False
        conds = []
        print("Enter conditions for " + strtime + ":")
        while done == False:
            if len(conds) > 0:
                print("Conditions:", conds)
            click.echo('Next line: ' + deflist[counter] +
                       ' Correct? [y/n/d=done]',
                       nl=True)
            c = click.getchar().decode("utf-8")
            print(c)
def process_sendwarning_unusedvm():

    logger.info("Entering send warning to unused VM........")

    try:
        ''' performing daily checks for network usage '''
        vmCPUThreshold  = config.get("GENERAL_CONF", "cpu_threshold_limit")
        vmreadThreshold = config.get("GENERAL_CONF", "nwRead_threshold_limit")
        vmwriteThreshold = config.get("GENERAL_CONF", "nwWrite_threshold_limit")

        thresholdcontext = dict(CPUThreshold=vmCPUThreshold,
                                ReadThreshold=vmreadThreshold,
                                WriteThreshold=vmwriteThreshold)

        logger.info("checking network usage with threshold values as CPUThreshold is:"+str(thresholdcontext['CPUThreshold'])+" WriteThreshold is :"+str(thresholdcontext['WriteThreshold'])+" ReadThreshold is :"+ str(thresholdcontext['ReadThreshold']))

        vms = db(db.vm_data.status.belongs(VM_STATUS_RUNNING, VM_STATUS_SUSPENDED) & (db.vm_data.shutdown_warning_date == None) & (db.vm_data.start_time < (get_datetime() - timedelta(days=20)))).select()
        '''check vm should have been created 20days back'''

        for vm in vms:
            logger.info("comparing threshold for the vm "+ str(vm.vm_identity))
            send_email=0
            retVal=compare_rrd_data_with_threshold(vm.vm_identity,thresholdcontext)
            if(retVal == True): 
                vm_users = []
                vm_name  = ""
                for user in db((db.user_vm_map.vm_id == vm.id) & (db.user_vm_map.vm_id == db.vm_data.id) & (db.vm_data.shutdown_warning_date == None )).select(db.user_vm_map.user_id,db.vm_data.vm_name):
                    send_email=1
                    vm_users.append(user.user_vm_map.user_id)
                    vm_name=user.vm_data.vm_name

                if (send_email == 1):
                    vm_shutdown_time=send_email_vm_warning(VM_TASK_WARNING_SHUTDOWN,vm_users,vm_name,'')
                    logger.debug("Mail sent for vm_name:"+str(vm_name)+"|shutdown time returned from the function:"+ str(vm_shutdown_time))
                    db(db.vm_data.id == vm.id).update(shutdown_warning_date=vm_shutdown_time)
                    db.commit()
                else:
                    logger.debug("Warning Email to use the VM has already been sent to VM_ID:"+str(vm.id))
            else:
                logger.info("VM:"+str(vm.id)+" is in use.. no need to send shutdown warning mail ...")
    except:
        log_exception()
        pass
    finally:
        db.commit()
        logger.debug("EXITING send warning to unused VM........")
Exemple #41
0
    Field('datastore_id', db.datastore, notnull = True, label='Datastore'),
    Field('owner', 'list:reference user', readable=False, writable=False),
    Field('is_active', 'boolean', notnull = True, default = True),
    format = lambda r: 
            '%s %s %s %s %sGB'%(r.os_name, r.os_version, r.os_type, r.arch, r.hdd) if r.tag == None else 
            '%s %s %s %s %sGB (%s)'%(r.os_name, r.os_version, r.os_type, r.arch, r.hdd, r.tag))
db.template.hdd.requires=IS_INT_IN_RANGE(1,1025)

db.define_table('object_store_data',
    Field('object_store_name', 'string', length = 100, notnull = True, label='Name'),
    Field('object_store_size', 'integer', notnull = True, label='Size(GB)'),
    Field('object_store_type', 'string', notnull = True, label='Type'), 
    Field('requester_id',db.user, label='Requester'),
    Field('owner_id', db.user, label='Owner'),
    Field('purpose', 'string', length = 512),
    Field('start_time', 'datetime', default = get_datetime()),
    Field('parent_id', 'reference object_store_data'),
    Field('locked', 'boolean', default = False),
    Field('status', 'integer', represent=lambda x, row: get_vm_status(x)),
    Field('s3_secret_key', 'string'),
    Field('s3_access_key', 'string'),
    Field('swift_access_key', 'string'))

db.define_table('container_data',
    Field('name', 'string', length = 100, notnull = True, unique = True, label='Name'),
    Field('RAM', 'integer', notnull=True, label='RAM(MB)'),
    Field('vCPU', 'integer', notnull=True, label='CPUs'),
    Field('UUID', 'string',length = 100),
    Field('image_id', 'integer'),
    Field('image_profile', 'string',length = 100),
    Field('restart_policy', default = "Restart Until Stop", requires = IS_IN_SET(('Restart Until Stop','Restart Once','Never')), label='Restart Policy'),
Exemple #42
0
def grant_vnc_access(vm_id):
    active_vnc = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).count()
    if active_vnc > 0:
        msg = 'VNC access already granted. Please check your mail for further details.'
    else:
        vnc_count = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.time_requested > (get_datetime() - timedelta(days=1)))).count()
        if vnc_count >= MAX_VNC_ALLOWED_IN_A_DAY :
            msg = 'VNC request has exceeded limit.'
        else:
            try:
                create_vnc_mapping_in_nat(vm_id)
                
                vnc_info = db((db.vnc_access.vm_id == vm_id) & (db.vnc_access.status == VNC_ACCESS_STATUS_ACTIVE)).select()
                if vnc_info:
                    vm_users = []
                    for user in db(db.user_vm_map.vm_id == vm_id).select(db.user_vm_map.user_id):
                        vm_users.append(user['user_id'])
    
                    send_email_vnc_access_granted(vm_users, 
                                                  vnc_info[0].vnc_server_ip, 
                                                  vnc_info[0].vnc_source_port, 
                                                  vnc_info[0].vm_id.vm_name, 
                                                  vnc_info[0].time_requested)
                else: 
                    raise
                msg = 'VNC access granted. Please check your mail for further details.'
            except:
                msg = 'Some Error Occurred. Please try later'
                log_exception()
                pass
    return msg
Exemple #43
0
def process_clone_task(task_event_id, vm_id):
    """
    Invoked when scheduler runs task of type 'clone_task'
    When multiple clones of a VM is requested, multiple tasks are created in scheduler,
    so that they can run concurrently. 
    This function ensures that the status of clone request is updated on the basis of all 
    corresponding asynchronous tasks """

    vm_data = db.vm_data[vm_id]
    logger.debug("ENTERING CLONE_TASK.......")
    logger.debug("Task Id: %s" % task_event_id)
    logger.debug("VM to be Cloned: %s" % vm_data.vm_name)
    task_event = db.task_queue_event[task_event_id]
    task_queue = db.task_queue[task_event.task_id]
    message = task_event.message if task_event.message != None else ''
    try:
        # Update attention time for first clone task
        if task_event.attention_time == None:
            task_event.update_record(attention_time=get_datetime())
        logger.debug("Starting VM Cloning...")
        ret = task[VM_TASK_CLONE](vm_id)
        logger.debug("Completed VM Cloning...")

        if ret[0] == TASK_QUEUE_STATUS_FAILED:
            logger.debug("VM Cloning Failed")
            logger.debug("Failure Message: %s" % ret[1])
            message = message + '\n' + vm_data.vm_name + ': ' + ret[1]
            vm_data.update_record(status = VM_STATUS_UNKNOWN)
        elif ret[0] == TASK_QUEUE_STATUS_SUCCESS:
            logger.debug("VM Cloning Successful")
            params = task_queue.parameters
            # Delete successful vms from list. So, that in case of retry, only failed requests are retried.
            params['clone_vm_id'].remove(vm_id)
            task_queue.update_record(parameters=params)
        
        clone_vm_list = task_event.parameters['clone_vm_id']
        # Remove VM id from the list. This is to check if all the clones for the task are processed.
        clone_vm_list.remove(vm_id)
        
        # Find the status of all clone tasks combined
        current_status = ret[0]
        if task_event.status != TASK_QUEUE_STATUS_PENDING and task_event.status != current_status:
            current_status = TASK_QUEUE_STATUS_PARTIAL_SUCCESS
        
        if not clone_vm_list: #All Clones are processed
            if current_status == TASK_QUEUE_STATUS_SUCCESS:
                del db.request_queue[task_queue.parameters['request_id']]
                del db.task_queue[task_queue.id]
            else:
                if 'request_id' in task_queue.parameters:
                    db.request_queue[task_queue.parameters['request_id']] = dict(status = REQ_STATUS_FAILED)
                task_queue.update_record(status=current_status)
            task_event.update_record(status=current_status, message=message, end_time=get_datetime())
        else:
            task_event.update_record(parameters={'clone_vm_id' : clone_vm_list}, status=current_status, message=message)

    except:
        msg = log_exception()
        vm_data = db.vm_data[vm_id]
        message = message + '\n' + vm_data.vm_name + ': ' + msg
        task_event.update_record(status=TASK_QUEUE_STATUS_FAILED, message=message)

    finally:
        db.commit()
        logger.debug("EXITING CLONE_TASK........")