예제 #1
0
    def __init__(self):
        ResourcePoolInfo.__init__(self)
        self.logger = logging.getLogger("ENACT.ONE.INFO")

        self.rpc = OpenNebulaXMLRPCClientSingleton().client

        # Get information about nodes from OpenNebula
        self.max_nod_id = 0
        self.nodes = {}

        self.resource_types = []
        self.resource_types.append((constants.RES_CPU, 1))
        self.resource_types.append((constants.RES_MEM, 1))
        self.resource_types.append((constants.RES_DISK, 1))

        self.logger.info("Fetching nodes from OpenNebula")
        self.__fetch_nodes()
        self.logger.info("Fetched %i nodes from OpenNebula" % len(self.nodes))
예제 #2
0
class OpenNebulaVMEnactment(VMEnactment):
    def __init__(self):
        VMEnactment.__init__(self)
        self.logger = logging.getLogger("ENACT.ONE.VM")
        self.rpc = OpenNebulaXMLRPCClientSingleton().client

    def start(self, action):
        for vnode in action.vnodes:
            # Unpack action
            vid = action.vnodes[vnode].enactment_info
            hid = action.vnodes[vnode].pnode
            
            self.logger.debug("Sending request to start VM for L%iV%i (ONE: vid=%i, hid=%i)"
                         % (action.lease_haizea_id, vnode, vid, hid))

            try:
                self.rpc.vm_deploy(vid, hid)
                self.logger.debug("Request succesful.")
            except Exception, msg:
                raise OpenNebulaEnactmentError("vm.deploy", msg)
예제 #3
0
class OpenNebulaVMEnactment(VMEnactment):
    def __init__(self):
        VMEnactment.__init__(self)
        self.logger = logging.getLogger("ENACT.ONE.VM")
        self.rpc = OpenNebulaXMLRPCClientSingleton().client

    def start(self, action):
        for vnode in action.vnodes:
            # Unpack action
            vid = action.vnodes[vnode].enactment_info
            hid = action.vnodes[vnode].pnode

            self.logger.debug(
                "Sending request to start VM for L%iV%i (ONE: vid=%i, hid=%i)"
                % (action.lease_haizea_id, vnode, vid, hid))

            try:
                self.rpc.vm_deploy(vid, hid)
                self.logger.debug("Request succesful.")
            except Exception, msg:
                raise OpenNebulaEnactmentError("vm.deploy", msg)
예제 #4
0
    def __init__(self):
        ResourcePoolInfo.__init__(self)
        self.logger = logging.getLogger("ENACT.ONE.INFO")

        self.rpc = OpenNebulaXMLRPCClientSingleton().client

        # Get information about nodes from OpenNebula
        self.max_nod_id = 0
        self.nodes = {}

        self.resource_types = []
        self.resource_types.append((constants.RES_CPU,1))
        self.resource_types.append((constants.RES_MEM,1))
        self.resource_types.append((constants.RES_DISK,1))
                    
        self.logger.info("Fetching nodes from OpenNebula")            
        self.__fetch_nodes()
        self.logger.info("Fetched %i nodes from OpenNebula" % len(self.nodes))            
예제 #5
0
class OpenNebulaResourcePoolInfo(ResourcePoolInfo):
    
    def __init__(self):
        ResourcePoolInfo.__init__(self)
        self.logger = logging.getLogger("ENACT.ONE.INFO")

        self.rpc = OpenNebulaXMLRPCClientSingleton().client

        # Get information about nodes from OpenNebula
        self.max_nod_id = 0
        self.nodes = {}

        self.resource_types = []
        self.resource_types.append((constants.RES_CPU,1))
        self.resource_types.append((constants.RES_MEM,1))
        self.resource_types.append((constants.RES_DISK,1))
                    
        self.logger.info("Fetching nodes from OpenNebula")            
        self.__fetch_nodes()
        self.logger.info("Fetched %i nodes from OpenNebula" % len(self.nodes))            
        
    def refresh(self):
        return self.__fetch_nodes()
        
    def get_nodes(self):
        return self.nodes
    
    def get_resource_types(self):
        return self.resource_types

    def get_bandwidth(self):
        return 0
    
    def __fetch_nodes(self):
        new_nodes = []
        hosts = self.rpc.hostpool_info()
        hostnames = set([n.hostname for n in self.nodes.values()])
        for host in hosts:
            # CPU
            # OpenNebula reports each CPU as "100"
            # (so, a 4-core machine is reported as "400")
            # We need to convert this to a multi-instance
            # resource type in Haizea            
            cpu = host.max_cpu
            ncpu = cpu / 100
            enact_id = host.id                
            hostname = host.name
            
            # We want to skip nodes we're already aware of ...
            if hostname in hostnames:
                continue

            # ... and those in an error or disabled state ...
            if host.state in (OpenNebulaHost.STATE_ERROR, OpenNebulaHost.STATE_DISABLED):
                continue
            
            # ... and those were monitoring information is not yet available.
            if cpu == 0:
                self.logger.debug("Skipping node '%s' (monitoring information not yet available)" % hostname)
                continue
            
            self.max_nod_id += 1
            
            nod_id = self.max_nod_id
            capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
            
            capacity.set_ninstances(constants.RES_CPU, ncpu)
            for i in range(ncpu):
                capacity.set_quantity_instance(constants.RES_CPU, i+1, 100)            
            
            # Memory. Must divide by 1024 to obtain quantity in MB
            capacity.set_quantity(constants.RES_MEM, host.max_mem / 1024.0)
            
            # Disk
            # OpenNebula doesn't report this correctly yet.
            # We set it to an arbitrarily high value.
            capacity.set_quantity(constants.RES_DISK, 80000)

            node = ResourcePoolNode(nod_id, hostname, capacity)
            node.enactment_info = enact_id
            self.nodes[nod_id] = node
            new_nodes.append(node)
            self.logger.debug("Fetched node %i %s %s" % (node.id, node.hostname, node.capacity))
        return new_nodes
예제 #6
0
 def __init__(self):
     VMEnactment.__init__(self)
     self.logger = logging.getLogger("ENACT.ONE.VM")
     self.rpc = OpenNebulaXMLRPCClientSingleton().client
예제 #7
0
 def __init__(self):
     self.processed = []
     self.logger = logging.getLogger("ONEREQ")
     self.rpc = OpenNebulaXMLRPCClientSingleton().client
예제 #8
0
class OpenNebulaFrontend(RequestFrontend):    
    
    def __init__(self):
        self.processed = []
        self.logger = logging.getLogger("ONEREQ")
        self.rpc = OpenNebulaXMLRPCClientSingleton().client

    def load(self, manager):
        pass
        
    def get_accumulated_requests(self):
        vms = self.rpc.vmpool_info()

        # Extract the pending OpenNebula VMs
        pending_vms = [] 
        for vm in vms:
            if not vm.id  in self.processed and vm.state == OpenNebulaVM.STATE_PENDING:
                vm_detailed = self.rpc.vm_info(vm.id)        
                pending_vms.append(OpenNebulaHaizeaVM(vm_detailed))
                self.processed.append(vm.id)
            
        grouped = [vm for vm in pending_vms if vm.group != None]
        not_grouped = [vm for vm in pending_vms if vm.group == None]
        
        # Extract VM groups
        group_ids = set([vm.group for vm in grouped])
        groups = {}
        for group_id in group_ids:
            groups[group_id] = [vm for vm in grouped if vm.group == group_id]
            
        lease_requests = []
        for group_id, opennebula_vms in groups.items():
            lease_requests.append(self.__ONEreqs_to_lease(opennebula_vms, group_id))

        for opennebula_vm in not_grouped:
            lease_requests.append(self.__ONEreqs_to_lease([opennebula_vm]))
        
        lease_requests.sort(key=operator.attrgetter("submit_time"))
        return lease_requests

    def exists_more_requests(self):
        return True

    
    def __ONEreqs_to_lease(self, opennebula_vms, group_id=None):
        # The vm_with_params is used to extract the HAIZEA parameters.
        # (i.e., lease-wide attributes)
        vm_with_params = opennebula_vms[0]

        # Per-lease attributes
        start = vm_with_params.start
        duration = vm_with_params.duration
        preemptible = vm_with_params.preemptible
        submit_time = vm_with_params.submit_time

        # Per-vnode attributes
        requested_resources = dict([(i+1,vm.capacity) for i, vm in enumerate(opennebula_vms)])

        lease = Lease.create_new(submit_time = submit_time, 
                                 requested_resources = requested_resources, 
                                 start = start, 
                                 duration = duration, 
                                 deadline = None,
                                 preemptible = preemptible, 
                                 software = UnmanagedSoftwareEnvironment())
     
        lease.enactment_info = group_id
        lease.vnode_enactment_info = dict([(i+1,vm.one_id) for i, vm in enumerate(opennebula_vms)])
        return lease
예제 #9
0
class OpenNebulaResourcePoolInfo(ResourcePoolInfo):
    def __init__(self):
        ResourcePoolInfo.__init__(self)
        self.logger = logging.getLogger("ENACT.ONE.INFO")

        self.rpc = OpenNebulaXMLRPCClientSingleton().client

        # Get information about nodes from OpenNebula
        self.max_nod_id = 0
        self.nodes = {}

        self.resource_types = []
        self.resource_types.append((constants.RES_CPU, 1))
        self.resource_types.append((constants.RES_MEM, 1))
        self.resource_types.append((constants.RES_DISK, 1))

        self.logger.info("Fetching nodes from OpenNebula")
        self.__fetch_nodes()
        self.logger.info("Fetched %i nodes from OpenNebula" % len(self.nodes))

    def refresh(self):
        return self.__fetch_nodes()

    def get_nodes(self):
        return self.nodes

    def get_resource_types(self):
        return self.resource_types

    def get_bandwidth(self):
        return 0

    def __fetch_nodes(self):
        new_nodes = []
        hosts = self.rpc.hostpool_info()
        hostnames = set([n.hostname for n in self.nodes.values()])
        for host in hosts:
            # CPU
            # OpenNebula reports each CPU as "100"
            # (so, a 4-core machine is reported as "400")
            # We need to convert this to a multi-instance
            # resource type in Haizea
            cpu = host.max_cpu
            ncpu = cpu / 100
            enact_id = host.id
            hostname = host.name

            # We want to skip nodes we're already aware of ...
            if hostname in hostnames:
                continue

            # ... and those in an error or disabled state ...
            if host.state in (OpenNebulaHost.STATE_ERROR,
                              OpenNebulaHost.STATE_DISABLED):
                continue

            # ... and those were monitoring information is not yet available.
            if cpu == 0:
                self.logger.debug(
                    "Skipping node '%s' (monitoring information not yet available)"
                    % hostname)
                continue

            self.max_nod_id += 1

            nod_id = self.max_nod_id
            capacity = Capacity(
                [constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])

            capacity.set_ninstances(constants.RES_CPU, ncpu)
            for i in range(ncpu):
                capacity.set_quantity_instance(constants.RES_CPU, i + 1, 100)

            # Memory. Must divide by 1024 to obtain quantity in MB
            capacity.set_quantity(constants.RES_MEM, host.max_mem / 1024.0)

            # Disk
            # OpenNebula doesn't report this correctly yet.
            # We set it to an arbitrarily high value.
            capacity.set_quantity(constants.RES_DISK, 80000)

            node = ResourcePoolNode(nod_id, hostname, capacity)
            node.enactment_info = enact_id
            self.nodes[nod_id] = node
            new_nodes.append(node)
            self.logger.debug("Fetched node %i %s %s" %
                              (node.id, node.hostname, node.capacity))
        return new_nodes
예제 #10
0
 def __init__(self):
     VMEnactment.__init__(self)
     self.logger = logging.getLogger("ENACT.ONE.VM")
     self.rpc = OpenNebulaXMLRPCClientSingleton().client
예제 #11
0
    def __init__(self, config, daemon=False, pidfile=None, logging_handler=None, site=None):
        """Initializes the manager.
        
        Argument:site
        config -- a populated instance of haizea.common.config.RMConfig
        daemon -- True if Haizea must run as a daemon, False if it must
                  run in the foreground
        pidfile -- When running as a daemon, file to save pid to
        """
        self.config = config
        self.logging_handler = logging_handler
        # Create the RM components
        
        mode = config.get("mode")
        
        self.daemon = daemon
        self.pidfile = pidfile

        if mode == "simulated":
            # Simulated-time simulations always run in the foreground
            clock = self.config.get("clock")
            if clock == constants.CLOCK_SIMULATED:
                self.daemon = False
        elif mode == "opennebula":
            clock = constants.CLOCK_REAL        
        
        self.init_logging()
                
        if clock == constants.CLOCK_SIMULATED:
            starttime = self.config.get("starttime")
            self.clock = SimulatedClock(self, starttime)
            self.rpc_server = None
        elif clock == constants.CLOCK_REAL:
            wakeup_interval = self.config.get("wakeup-interval")
            non_sched = self.config.get("non-schedulable-interval")
            if mode == "opennebula":
                fastforward = self.config.get("dry-run")
            else:
                fastforward = False
            self.clock = RealClock(self, wakeup_interval, non_sched, fastforward)
            if fastforward:
                # No need for an RPC server when doing a dry run
                self.rpc_server = None
            else:
                self.rpc_server = RPCServer(self)
                    
        # Create the RPC singleton client for OpenNebula mode
        if mode == "opennebula":
            host = self.config.get("one.host")
            port = self.config.get("one.port")
            rv = OpenNebulaXMLRPCClient.get_userpass_from_env()
            if rv == None:
                print "ONE_AUTH environment variable is not set"
                exit(1)
            else:
                user, passw = rv[0], rv[1]
                try:
                    OpenNebulaXMLRPCClientSingleton(host, port, user, passw)
                except socket.error, e:
                    print "Unable to connect to OpenNebula"
                    print "Reason: %s" % e
                    exit(1)