class ProxMoxSchedulersTest(TestCase): def setUp(self): Resource.objects.all().delete() self.cloud = CmdbCloudConfig() def test_ratingbased(self): s1 = Server.objects.create(name='CN1', rating=15, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s2 = Server.objects.create(name='CN2', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_LOCKED) s3 = Server.objects.create(name='CN3', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s4 = Server.objects.create(name='CN4', rating=10, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s5 = Server.objects.create(name='Some server', status=Resource.STATUS_INUSE) s6 = Server.objects.create(name='CN6', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_OPENVZ, status=Resource.STATUS_INUSE) hvisors = self.cloud.get_hypervisors(hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM) self.assertEqual(3, len(hvisors)) scheduler = RatingBasedScheduler() node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id) def test_roundrobin(self): s1 = Server.objects.create(name='CN1', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s2 = Server.objects.create(name='CN2', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_LOCKED) s3 = Server.objects.create(name='CN3', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s4 = Server.objects.create(name='CN4', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s5 = Server.objects.create(name='Some server', status=Resource.STATUS_INUSE) s6 = Server.objects.create(name='CN6', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_OPENVZ, status=Resource.STATUS_INUSE) hvisors = self.cloud.get_hypervisors(hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM) self.assertEqual(3, len(hvisors)) scheduler = RoundRobinScheduler() scheduler.reset() node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id) node = scheduler.get_best_node(hvisors) self.assertEqual(s3.id, node.id) node = scheduler.get_best_node(hvisors) self.assertEqual(s4.id, node.id) node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id)
def test_controller_create_success(self): cloud = CmdbCloudConfig() tracker_class = cloud.task_tracker full_cloud_task_class_name = "%s.%s" % (MockVpsControlTask.__module__, MockVpsControlTask.__name__) task_tracker1 = tracker_class(task_class=full_cloud_task_class_name) task_tracker1.context = {'some': 'value'} task_tracker1.save() task_tracker2 = tracker_class(task_class=full_cloud_task_class_name) task_tracker2.context = {'some': 'value'} task_tracker2.failed('some error') task_tracker2.save() response_err = self.client.patch('/v1/cloud_tasks/%s/' % task_tracker1.id) self.assertEqual(405, response_err.status_code) response1 = self.client.get('/v1/cloud_tasks/%s/' % task_tracker1.id) self.assertEqual(200, response1.status_code) self.assertEqual(task_tracker1.id, response1.data['id']) self.assertEqual('success', response1.data['status']) response2 = self.client.get('/v1/cloud_tasks/%s/' % task_tracker2.id) self.assertEqual(200, response2.status_code) self.assertEqual(task_tracker2.id, response2.data['id']) self.assertEqual('failed', response2.data['status']) self.assertEqual('some error', response2.data['error'])
def test_controller_create_success(self): cloud = CmdbCloudConfig() tracker_class = cloud.task_tracker task_tracker1 = tracker_class(task_class='SomeClassName') task_tracker1.context = {'some': 'value'} task_tracker1.save() task_tracker2 = tracker_class(task_class='SomeClassName') task_tracker2.context = {'some': 'value'} task_tracker2.failed('some error') task_tracker2.save() self.assertEqual(task_tracker1.id, tracker_class.get(task_tracker1.id).id) self.assertEqual(task_tracker2.id, tracker_class.get(task_tracker2.id).id) self.assertEqual(2, len(tracker_class.find())) self.assertEqual( 1, len(tracker_class.find(status=TaskTrackerStatus.STATUS_NEW))) self.assertEqual( 1, len(tracker_class.find(status=TaskTrackerStatus.STATUS_FAILED))) self.assertEqual( 0, len(tracker_class.find(status=TaskTrackerStatus.STATUS_SUCCESS)))
def setUp(self): super(ResourcesAPITests, self).setUp() user_name = 'admin' user, created = User.objects.get_or_create(username=user_name, password=user_name, email='*****@*****.**', is_staff=True) token, created = Token.objects.get_or_create(user=user) self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key) Resource.objects.all().delete() self.moscow = RegionResource.objects.create(name='Moscow') self.dc1 = Datacenter.objects.create(name='Test DC 1', parent=self.moscow) self.rack1 = Rack.objects.create(name='Test Rack 1', parent=self.dc1) self.srv1 = Server.objects.create(name='Test hypervisor 1', role='hypervisor', parent=self.rack1) self.pools_group1 = RegionResource.objects.create( name='Test DC 1 IP Pools', parent=self.dc1) self.pool1 = IPAddressPoolFactory.from_network( '192.168.0.0/23', parent=self.pools_group1, dns1='46.17.46.200', dns2='46.17.40.200') GlobalIPManager.get_ip('192.168.0.1').lock() self.pool11 = IPAddressPoolFactory.from_network( '192.169.0.0/23', parent=self.pools_group1, dns1='46.17.46.200', dns2='46.17.40.200') self.pool11.use() GlobalIPManager.get_ip('192.169.0.1').lock() self.srv1.set_option('agentd_taskqueue', 'test_task_queue') MockVpsControlTask.REMOTE_WORKER.sent_tasks = [] self.cloud = CmdbCloudConfig() self.backend = ProxMoxJBONServiceBackend(self.cloud) ProxMoxJBONServiceBackend.TASK_CREATE = MockVpsControlTask ProxMoxJBONServiceBackend.TASK_START = MockVpsControlTask ProxMoxJBONServiceBackend.TASK_STOP = MockVpsControlTask
def stop(self, request, pk=None): indata = StartStopSerializer(data=request.data) if not indata.is_valid(): return Response(indata.errors, status=status.HTTP_400_BAD_REQUEST) logger.info("Stopping VPS: %s" % indata.data) # hardcoded backend cloud = CmdbCloudConfig() backend = ProxMoxJBONServiceBackend(cloud) tracker = backend.stop_vps(**indata.data) serializer = CloudTaskTrackerSerializer(tracker) return Response(serializer.data)
def create(self, request, *args, **kwargs): indata = CreateVpsSerializer(data=request.data) if not indata.is_valid(): return Response(indata.errors, status=status.HTTP_400_BAD_REQUEST) logger.info("Creating VPS: %s" % indata.data) # hardcoded backend cloud = CmdbCloudConfig() backend = ProxMoxJBONServiceBackend(cloud) tracker = backend.create_vps(**indata.data) serializer = CloudTaskTrackerSerializer(tracker) return Response(serializer.data)
def setUp(self): Resource.objects.all().delete() self.moscow = RegionResource.objects.create(name='Moscow') self.dc1 = Datacenter.objects.create(name='Test DC 1', parent=self.moscow) self.rack1 = Rack.objects.create(name='Test Rack 1', parent=self.dc1) self.srv1 = Server.objects.create(name='Test hypervisor 1', role='hypervisor', parent=self.rack1) self.pools_group1 = RegionResource.objects.create(name='Test DC 1 IP Pools', parent=self.dc1) self.pool1 = IPNetworkPool.objects.create(network='192.168.0.0/23', parent=self.pools_group1, status=Resource.STATUS_FREE) self.pool11 = IPNetworkPool.objects.create(network='192.169.0.0/23', parent=self.pools_group1, status=Resource.STATUS_INUSE) self.srv1.set_option('agentd_taskqueue', 'test_task_queue') MockVpsControlTask.REMOTE_WORKER.sent_tasks = [] self.cloud = CmdbCloudConfig() self.backend = ProxMoxJBONServiceBackend(self.cloud) ProxMoxJBONServiceBackend.TASK_CREATE = MockVpsControlTask ProxMoxJBONServiceBackend.TASK_START = MockVpsControlTask ProxMoxJBONServiceBackend.TASK_STOP = MockVpsControlTask
def setUp(self): Resource.objects.all().delete() self.moscow = RegionResource.objects.create(name='Moscow') self.dc1 = Datacenter.objects.create(name='Test DC 1', parent=self.moscow) self.rack1 = Rack.objects.create(name='Test Rack 1', parent=self.dc1) self.srv1 = Server.objects.create(name='Test hypervisor 1', role='hypervisor', parent=self.rack1) self.pools_group1 = RegionResource.objects.create( name='Test DC 1 IP Pools', parent=self.dc1) self.pool1 = IPAddressPoolFactory.from_network( '192.168.0.0/23', parent=self.pools_group1, dns1='46.17.46.200', dns2='46.17.40.200') GlobalIPManager.get_ip('192.168.0.1').lock() self.pool11 = IPAddressPoolFactory.from_network( '192.169.0.0/23', parent=self.pools_group1, dns1='46.17.46.200', dns2='46.17.40.200') self.pool11.use() GlobalIPManager.get_ip('192.169.0.1').lock() self.srv1.set_option('agentd_taskqueue', 'test_task_queue') MockVpsControlTask.REMOTE_WORKER.sent_tasks = [] self.cloud = CmdbCloudConfig() self.backend = ProxMoxJBONServiceBackend(self.cloud) ProxMoxJBONServiceBackend.TASK_CREATE = MockVpsControlTask ProxMoxJBONServiceBackend.TASK_START = MockVpsControlTask ProxMoxJBONServiceBackend.TASK_STOP = MockVpsControlTask
def __init__(self, stdout=None, stderr=None, no_color=False): super(Command, self).__init__(stdout, stderr, no_color) self.cloud = CmdbCloudConfig() self.task_tracker = self.cloud.task_tracker self.backend = ProxMoxJBONServiceBackend(self.cloud)
class Command(BaseCommand): registered_handlers = {} def __init__(self, stdout=None, stderr=None, no_color=False): super(Command, self).__init__(stdout, stderr, no_color) self.cloud = CmdbCloudConfig() self.task_tracker = self.cloud.task_tracker self.backend = ProxMoxJBONServiceBackend(self.cloud) def add_arguments(self, parser): """ Add custom arguments and subcommands """ subparsers = parser.add_subparsers(title="Cloud management commands", help="Commands help", dest='manager_name', parser_class=ArgumentParser) # Tasks tracker_cmd_parser = subparsers.add_parser('tracker', help='Manage task trackers.') tracker_cmd_parser.add_argument('--new', action="store_true", help="Show submitted tasks.") tracker_cmd_parser.add_argument('--failed', action="store_true", help="Show failed tasks.") tracker_cmd_parser.add_argument('--success', action="store_true", help="Show success tasks.") tracker_cmd_parser.add_argument('--progress', action="store_true", help="Show running tasks.") tracker_cmd_parser.add_argument('--limit', type=int, default=10, help="Limit the output.") tracker_cmd_parser.add_argument('--attach', type=int, metavar='TRACKER-ID', help="Attach to the task console.") tracker_cmd_parser.add_argument('--cancel', type=int, metavar='TRACKER-ID', help="Cancel task.") self.register_handler('tracker', self._handle_trackers) # Cloud Services vps_cmd_parser = subparsers.add_parser('vps', help='Manage VPS services.') vps_cmd_parser.add_argument('--create', action="store_true", help="Create VPS server.") vps_cmd_parser.add_argument('--start', action="store_true", help="Start VPS server.") vps_cmd_parser.add_argument('--stop', action="store_true", help="Stop VPS server.") vps_cmd_parser.add_argument('--driver', default='kvm', help="Hypervisor driver, used to manage VPS (must be supported by backend).") vps_cmd_parser.add_argument('--template', help="VPS template.", default='centos.6.64bit') vps_cmd_parser.add_argument('--node', type=int, default=0, help="CMDB node ID. Scheduling is used if not specified.") vps_cmd_parser.add_argument('--vmid', type=int, help="Set ID of the VM..", required=True) vps_cmd_parser.add_argument('--user', help="Specify user name for the VM.") vps_cmd_parser.add_argument('--ip', help="Specify IP address for the VM.") vps_cmd_parser.add_argument('--ram', type=int, help="Set RAM amount (Mb).", default=512) vps_cmd_parser.add_argument('--hdd', type=int, help="Set HDD amount (Gb).", default=5) vps_cmd_parser.add_argument('--cpu', type=int, help="Number of vCPU cores.", default=1) self.register_handler('vps', self._handle_vps) hv_cmd_parser = subparsers.add_parser('hypervisors', help='Manage hypervisors.') hv_cmd_parser.add_argument('--list', action="store_true", help="List known hypervisors.") self.register_handler('hypervisors', self._handle_hypervisors) def _handle_hypervisors(self, *args, **options): if options['list']: table = PrettyTable(['node', 'group', 'label', 'hypervisor_driver', 'rating', 'agentd_heartbeat']) table.padding_width = 1 table.sortby = 'rating' for hypervisor in self.cloud.get_hypervisors(): hyper_driver = hypervisor.get_option_value('hypervisor_driver', default=None) if hyper_driver: current_time_stamp = int(time.time()) agentd_heartbeat = hypervisor.get_option_value('agentd_heartbeat', default=0) agentd_heartbeat_value = agentd_heartbeat if (current_time_stamp - int( agentd_heartbeat)) < 90 else "%s (!)" % agentd_heartbeat table.add_row([hypervisor.id, hypervisor.get_option_value('group'), hypervisor.get_option_value('label'), hypervisor.get_option_value('hypervisor_driver'), hypervisor.get_option_value('rating', default=0), agentd_heartbeat_value, ]) logger.info(table.get_string(reversesort=True)) def _handle_vps(self, *args, **options): tracker = None vmid = int(options['vmid']) user_name = options['user'] node_id = int(options['node']) if options['create']: ram = int(options['ram']) hdd = int(options['hdd']) cpu = int(options['cpu']) template = options['template'] ip_addr = options['ip'] tracker = self.backend.create_vps( node=node_id, vmid=vmid, template=template, user=user_name, ram=ram, hdd=hdd, cpu=cpu, ip=ip_addr) elif options['stop']: hyper_driver = options['driver'] tracker = self.backend.stop_vps( node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) elif options['start']: hyper_driver = options['driver'] tracker = self.backend.start_vps( node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) if tracker: logger.info("Attached to the task tracker %s. Ctrl-C to exit." % tracker.id) try: result_data = tracker.wait() logger.info(result_data) except Exception, ex: logger.error(ex.message)
def setUp(self): Resource.objects.all().delete() self.cloud = CmdbCloudConfig()
class ProxMoxSchedulersTest(TestCase): def setUp(self): Resource.objects.all().delete() self.cloud = CmdbCloudConfig() def test_ratingbased(self): s1 = Server.objects.create( name='CN1', rating=15, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s2 = Server.objects.create( name='CN2', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_LOCKED) s3 = Server.objects.create( name='CN3', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s4 = Server.objects.create( name='CN4', rating=10, role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s5 = Server.objects.create(name='Some server', status=Resource.STATUS_INUSE) s6 = Server.objects.create( name='CN6', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_OPENVZ, status=Resource.STATUS_INUSE) hvisors = self.cloud.get_hypervisors( hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM) self.assertEqual(3, len(hvisors)) scheduler = RatingBasedScheduler() node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id) def test_roundrobin(self): s1 = Server.objects.create( name='CN1', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s2 = Server.objects.create( name='CN2', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_LOCKED) s3 = Server.objects.create( name='CN3', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s4 = Server.objects.create( name='CN4', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM, status=Resource.STATUS_INUSE) s5 = Server.objects.create(name='Some server', status=Resource.STATUS_INUSE) s6 = Server.objects.create( name='CN6', role='hypervisor', hypervisor_driver=CmdbCloudConfig.TECH_HV_OPENVZ, status=Resource.STATUS_INUSE) hvisors = self.cloud.get_hypervisors( hypervisor_driver=CmdbCloudConfig.TECH_HV_KVM) self.assertEqual(3, len(hvisors)) scheduler = RoundRobinScheduler() scheduler.reset() node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id) node = scheduler.get_best_node(hvisors) self.assertEqual(s3.id, node.id) node = scheduler.get_best_node(hvisors) self.assertEqual(s4.id, node.id) node = scheduler.get_best_node(hvisors) self.assertEqual(s1.id, node.id)
class Command(BaseCommand): registered_handlers = {} def __init__(self, stdout=None, stderr=None, no_color=False): super(Command, self).__init__(stdout, stderr, no_color) self.cloud = CmdbCloudConfig() self.task_tracker = self.cloud.task_tracker self.backend = ProxMoxJBONServiceBackend(self.cloud) def add_arguments(self, parser): """ Add custom arguments and subcommands """ subparsers = parser.add_subparsers(title="Cloud management commands", help="Commands help", dest='manager_name', parser_class=ArgumentParser) # Tasks tracker_cmd_parser = subparsers.add_parser( 'tracker', help='Manage task trackers.') tracker_cmd_parser.add_argument('--new', action="store_true", help="Show submitted tasks.") tracker_cmd_parser.add_argument('--failed', action="store_true", help="Show failed tasks.") tracker_cmd_parser.add_argument('--success', action="store_true", help="Show success tasks.") tracker_cmd_parser.add_argument('--progress', action="store_true", help="Show running tasks.") tracker_cmd_parser.add_argument('--limit', type=int, default=10, help="Limit the output.") tracker_cmd_parser.add_argument('--attach', type=int, metavar='TRACKER-ID', help="Attach to the task console.") tracker_cmd_parser.add_argument('--cancel', type=int, metavar='TRACKER-ID', help="Cancel task.") self.register_handler('tracker', self._handle_trackers) # Cloud Services vps_cmd_parser = subparsers.add_parser('vps', help='Manage VPS services.') vps_cmd_parser.add_argument('--create', action="store_true", help="Create VPS server.") vps_cmd_parser.add_argument('--start', action="store_true", help="Start VPS server.") vps_cmd_parser.add_argument('--stop', action="store_true", help="Stop VPS server.") vps_cmd_parser.add_argument( '--driver', default='kvm', help= "Hypervisor driver, used to manage VPS (must be supported by backend)." ) vps_cmd_parser.add_argument('--template', help="VPS template.", default='centos.6.64bit') vps_cmd_parser.add_argument( '--node', type=int, default=0, help="CMDB node ID. Scheduling is used if not specified.") vps_cmd_parser.add_argument('--vmid', type=int, help="Set ID of the VM..", required=True) vps_cmd_parser.add_argument('--user', help="Specify user name for the VM.") vps_cmd_parser.add_argument('--ip', help="Specify IP address for the VM.") vps_cmd_parser.add_argument('--ram', type=int, help="Set RAM amount (Mb).", default=512) vps_cmd_parser.add_argument('--hdd', type=int, help="Set HDD amount (Gb).", default=5) vps_cmd_parser.add_argument('--cpu', type=int, help="Number of vCPU cores.", default=1) self.register_handler('vps', self._handle_vps) hv_cmd_parser = subparsers.add_parser('hypervisors', help='Manage hypervisors.') hv_cmd_parser.add_argument('--list', action="store_true", help="List known hypervisors.") self.register_handler('hypervisors', self._handle_hypervisors) def _handle_hypervisors(self, *args, **options): if options['list']: table = PrettyTable([ 'node', 'group', 'label', 'hypervisor_driver', 'rating', 'agentd_heartbeat' ]) table.padding_width = 1 table.sortby = 'rating' for hypervisor in self.cloud.get_hypervisors(): hyper_driver = hypervisor.get_option_value('hypervisor_driver', default=None) if hyper_driver: current_time_stamp = int(time.time()) agentd_heartbeat = hypervisor.get_option_value( 'agentd_heartbeat', default=0) agentd_heartbeat_value = agentd_heartbeat if ( current_time_stamp - int(agentd_heartbeat) ) < 90 else "%s (!)" % agentd_heartbeat table.add_row([ hypervisor.id, hypervisor.get_option_value('group'), hypervisor.get_option_value('label'), hypervisor.get_option_value('hypervisor_driver'), hypervisor.get_option_value('rating', default=0), agentd_heartbeat_value, ]) logger.info(table.get_string(reversesort=True)) def _handle_vps(self, *args, **options): tracker = None vmid = int(options['vmid']) user_name = options['user'] node_id = int(options['node']) if options['create']: ram = int(options['ram']) hdd = int(options['hdd']) cpu = int(options['cpu']) template = options['template'] ip_addr = options['ip'] tracker = self.backend.create_vps(node=node_id, vmid=vmid, template=template, user=user_name, ram=ram, hdd=hdd, cpu=cpu, ip=ip_addr) elif options['stop']: hyper_driver = options['driver'] tracker = self.backend.stop_vps(node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) elif options['start']: hyper_driver = options['driver'] tracker = self.backend.start_vps(node=node_id, vmid=vmid, user=user_name, driver=hyper_driver) if tracker: logger.info("Attached to the task tracker %s. Ctrl-C to exit." % tracker.id) try: result_data = tracker.wait() logger.info(result_data) except Exception, ex: logger.error(ex.message)