def test_ratingbased(self): s1 = Server.objects.create(name='CN1', rating=15, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s2 = Server.objects.create(name='CN2', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_LOCKED) s3 = Server.objects.create(name='CN3', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s4 = Server.objects.create(name='CN4', rating=10, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s5 = Server.objects.create(name='Some server', status=Resource.STATUS_INUSE) s6 = Server.objects.create(name='CN6', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_OPENVZ, status=Resource.STATUS_INUSE) hvisors = self.cloud.get_hypervisors(hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM) self.assertEqual(3, len(hvisors)) scheduler = RatingBasedScheduler() node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id)
def test_ratingbased(self): s1 = Server.objects.create( name='CN1', rating=15, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s2 = Server.objects.create( name='CN2', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_LOCKED) s3 = Server.objects.create( name='CN3', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s4 = Server.objects.create( name='CN4', rating=10, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s5 = Server.objects.create(name='Some server', status=Resource.STATUS_INUSE) s6 = Server.objects.create( name='CN6', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_OPENVZ, status=Resource.STATUS_INUSE) hvisors = self.cloud.get_hypervisors( hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM) self.assertEqual(3, len(hvisors)) scheduler = RatingBasedScheduler() node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id)
class ProxMoxJBONServiceBackend(HypervisorBackend): """ Just bunch of ProxMox nodes. Every node have a pytin-agentd-hv running. When task is submitted to this backend it is actually submitted to run remotely on the specific agent. Node to create VPS is selected by the scheduler. Every hypervisor node in CMDB must have options: role = hypervisor. hypervisor_driver (kvm, openvz, etc). agentd_taskqueue: task queue used to feed the specific agent. agentd_heartbeat: last heartbeat time. """ TASK_CREATE = VpsCreateTask TASK_START = VpsStartTask TASK_STOP = VpsStopTask def __init__(self, cloud): super(ProxMoxJBONServiceBackend, self).__init__(cloud) self.scheduler = RatingBasedScheduler() def start_vps(self, **options): assert 'vmid' in options assert 'node' in options and options['node'] > 0 target_node = Server.active.get(pk=options['node']) return self.internal_send_task(self.TASK_START, target_node, **options) def stop_vps(self, **options): assert 'vmid' in options assert 'node' in options and options['node'] > 0 target_node = Server.active.get(pk=options['node']) return self.internal_send_task(self.TASK_STOP, target_node, **options) def create_vps(self, **options): """ Create VPS using options. Parameters: vmid: ID of the VPS cpu: number of CPU cores ram: amount of RAM in Mb hdd: amount of HDD space in Gb user: user name template: template name used to create VPS. It is in form: <driver>.param1.param2..paramN (kvm.centos.6.x86_64.directadmin) driver: method of provisioning. Different drivers supports different templates and provisioning depth. Drivers can work with different virtualization technologies. Optional: node: ID of the hypervisor node. If not specified, scheduler is used to select the best node, based on template. Populated parameters: hostname: hostname of the virtual machine driver: used driver to control the VPS (got from the template string) ip, gateway, netmask, dns1, dns2: network interface config :param options: Options used to create VPS. :return: TaskTracker instance to chesk progress. """ assert 'vmid' in options assert 'cpu' in options assert 'ram' in options assert 'hdd' in options assert 'user' in options assert 'template' in options logger.debug(options) if 'node' in options and options['node'] > 0: target_node = Server.active.get(pk=options['node']) hyper_driver = target_node.get_option_value('hypervisor_driver', default='unknown') else: (hyper_driver, tpl) = options['template'].split('.', 1) target_node = self.scheduler.get_best_node(self.cloud.get_hypervisors(hypervisor_driver=hyper_driver)) ip, gateway, netmask = self.lease_ip(target_node.id) # update some options options['driver'] = hyper_driver options['hostname'] = "v%s.%s.pytin" % (options['vmid'], hyper_driver) options['dns1'] = '46.17.46.200' options['dns2'] = '46.17.40.200' options['ip'] = ip options['gateway'] = gateway options['netmask'] = netmask return self.internal_send_task(self.TASK_CREATE, target_node, **options) def internal_send_task(self, task_class, target_node, **task_options): """ Run specific task remotely. Used to collect and send task with options to the remote node (worker). :param task_class: Task to run. :param target_node: Node that is used to run the task. :param task_options: Task options (context). :return: TaskTracker """ assert target_node node_queue = target_node.get_option_value('agentd_taskqueue', default=None) if not node_queue: raise ValueError("Missing agentd_taskqueue in node %s" % target_node.id) logger.info("Send task %s to queue %s for node %s" % (task_class, node_queue, target_node.id)) task_options['node'] = target_node.id task_options['driver'] = target_node.get_option_value('hypervisor_driver', default='unknown') return self.send_task(task_class, cmdb_node_id=target_node.id, queue=node_queue, options=task_options)
def __init__(self, cloud): super(ProxMoxJBONServiceBackend, self).__init__(cloud) self.scheduler = RatingBasedScheduler()
class ProxMoxJBONServiceBackend(HypervisorBackend): """ Just bunch of ProxMox nodes. Every node have a pytin-agentd-hv running. When task is submitted to this backend it is actually submitted to run remotely on the specific agent. Node to create VPS is selected by the scheduler. Every hypervisor node in CMDB must have options: role = hypervisor. hypervisor_driver (kvm, openvz, etc). agentd_taskqueue: task queue used to feed the specific agent. agentd_heartbeat: last heartbeat time. """ TASK_CREATE = VpsCreateTask TASK_START = VpsStartTask TASK_STOP = VpsStopTask def __init__(self, cloud): super(ProxMoxJBONServiceBackend, self).__init__(cloud) self.scheduler = RatingBasedScheduler() def start_vps(self, **options): assert 'vmid' in options assert 'node' in options and options['node'] > 0 target_node = Server.active.get(pk=options['node']) return self.internal_send_task(self.TASK_START, target_node, **options) def stop_vps(self, **options): assert 'vmid' in options assert 'node' in options and options['node'] > 0 target_node = Server.active.get(pk=options['node']) return self.internal_send_task(self.TASK_STOP, target_node, **options) def create_vps(self, **options): """ Create VPS using options. Parameters: vmid: ID of the VPS cpu: number of CPU cores ram: amount of RAM in Mb hdd: amount of HDD space in Gb user: user name template: template name used to create VPS. It is in form: <driver>.param1.param2..paramN (kvm.centos.6.x86_64.directadmin) driver: method of provisioning. Different drivers supports different templates and provisioning depth. Drivers can work with different virtualization technologies. Optional: node: ID of the hypervisor node. If not specified, scheduler is used to select the best node, based on template. rootpass: root password for the KVM Populated parameters: hostname: hostname of the virtual machine driver: used driver to control the VPS (got from the template string) ip, gateway, netmask, dns1, dns2: network interface config :param options: Options used to create VPS. :return: TaskTracker instance to chesk progress. """ assert 'vmid' in options assert 'cpu' in options assert 'ram' in options assert 'hdd' in options assert 'user' in options assert 'template' in options logger.debug(options) if 'node' in options and options['node'] > 0: target_node = Server.active.get(pk=options['node']) hyper_driver = target_node.get_option_value('hypervisor_driver', default='unknown') else: (hyper_driver, tpl) = options['template'].split('.', 1) target_node = self.scheduler.get_best_node( self.cloud.get_hypervisors(hypervisor_driver=hyper_driver)) if 'ip' in options and options['ip']: ip, gateway, netmask, dns1, dns2 = self.find_ip_info(options['ip']) else: ip, gateway, netmask, dns1, dns2 = self.lease_ip(target_node.id) if 'rootpass' not in options: options['rootpass'] = generate_password() # update some options options['driver'] = hyper_driver options['hostname'] = "v%s.%s.%s" % (options['vmid'], hyper_driver, options['user']) options['dns1'] = dns1 options['dns2'] = dns2 options['ip'] = ip options['gateway'] = gateway options['netmask'] = netmask return self.internal_send_task(self.TASK_CREATE, target_node, **options) def internal_send_task(self, task_class, target_node, **task_options): """ Run specific task remotely. Used to collect and send task with options to the remote node (worker). :param task_class: Task to run. :param target_node: Node that is used to run the task. :param task_options: Task options (context). :return: TaskTracker """ assert target_node node_queue = target_node.get_option_value('agentd_taskqueue', default=None) if not node_queue: raise ValueError("Missing agentd_taskqueue in node %s" % target_node.id) logger.info("Send task %s to queue %s for node %s" % (task_class, node_queue, target_node.id)) task_options['node'] = target_node.id task_options['driver'] = target_node.get_option_value( 'hypervisor_driver', default='unknown') return self.send_task(task_class, cmdb_node_id=target_node.id, queue=node_queue, options=task_options)