def pull_issue_types(self, project): backend_project = self.get_project(project.backend_id) backend_issue_types = { issue_type.id: issue_type for issue_type in backend_project.issueTypes } project_issue_types = { issue_type.backend_id: issue_type for issue_type in project.issue_types.all() } global_issue_types = { issue_type.backend_id: issue_type for issue_type in models.IssueType.objects.filter(settings=self.settings) } new_issue_types = set(backend_issue_types.keys()) - set(project_issue_types.keys()) for issue_type_id in new_issue_types: if issue_type_id in global_issue_types: issue_type = global_issue_types[issue_type_id] else: issue_type = self.import_issue_type(backend_issue_types[issue_type_id]) issue_type.save() project.issue_types.add(issue_type) stale_issue_types = set(project_issue_types.keys()) - set(backend_issue_types.keys()) project.issue_types.filter(backend_id__in=stale_issue_types).delete() common_issue_types = set(project_issue_types.keys()) & set(backend_issue_types.keys()) for issue_type_id in common_issue_types: issue_type = project_issue_types[issue_type_id] imported_issue_type = self.import_issue_type(backend_issue_types[issue_type_id]) update_pulled_fields(issue_type, imported_issue_type, ( 'name', 'description', 'icon_url', 'subtask' ))
def update_instance_fields(self, instance, backend_instance): # Preserve flavor fields in Waldur database if flavor is deleted in Rijkscloud fields = set(models.Instance.get_backend_fields()) flavor_fields = {'flavor_name', 'ram', 'cores'} if not backend_instance.flavor_name: fields = fields - flavor_fields fields = list(fields) update_pulled_fields(instance, backend_instance, fields)
def pull_instance(self, instance, update_fields=None): import_time = timezone.now() imported_instance = self.import_instance(instance.backend_id, save=False) instance.refresh_from_db() if instance.modified < import_time: if update_fields is None: update_fields = models.Instance.get_backend_fields() update_pulled_fields(instance, imported_instance, update_fields)
def pull_templates(self): """ Pull VMware templates for virtual machine provisioning from content library using VMware REST API to the local database. """ try: backend_templates = self.client.list_all_templates() except VMwareError as e: raise VMwareBackendError(e) if is_basic_mode(): # If basic mode is enabled, we should filter out templates which have more than 1 NIC backend_templates = [ template for template in backend_templates if len(template['template']['nics']) == 1 ] backend_templates_map = { item['library_item']['id']: item for item in backend_templates } frontend_templates_map = { p.backend_id: p for p in models.Template.objects.filter(settings=self.settings) } stale_ids = set(frontend_templates_map.keys()) - set( backend_templates_map.keys()) new_ids = set(backend_templates_map.keys()) - set( frontend_templates_map.keys()) common_ids = set(backend_templates_map.keys()) & set( frontend_templates_map.keys()) for library_item_id in new_ids: template = self._backend_template_to_template( backend_templates_map[library_item_id]) template.save() for library_item_id in common_ids: backend_template = self._backend_template_to_template( backend_templates_map[library_item_id]) frontend_template = frontend_templates_map[library_item_id] fields = ( 'cores', 'cores_per_socket', 'ram', 'disk', 'guest_os', 'modified', 'description', ) update_pulled_fields(frontend_template, backend_template, fields) models.Template.objects.filter(settings=self.settings, backend_id__in=stale_ids).delete()
def pull_volume(self, volume, update_fields=None): import_time = timezone.now() imported_volume = self.import_volume(volume.backend_id, save=False) volume.refresh_from_db() if volume.modified < import_time: if not update_fields: update_fields = models.Volume.get_backend_fields() update_pulled_fields(volume, imported_volume, update_fields)
def pull_host(self, host): import_time = timezone.now() imported_host = self.import_host(host.backend_id, save=False) imported_host_templates = set(self.get_host_templates(host)) host.refresh_from_db() if host.modified < import_time: update_fields = ('name', 'visible_name', 'description', 'error', 'status', 'host_group_name') update_pulled_fields(host, imported_host, update_fields) host_templates = set(host.templates.all()) host.templates.remove(*(host_templates - imported_host_templates)) host.templates.add(*(imported_host_templates - host_templates))
def pull_volumes(self): backend_volumes = self.get_volumes() volumes = models.Volume.objects.filter( service_project_link__service__settings=self.settings, state__in=[models.Volume.States.OK, models.Volume.States.ERRED] ) backend_volumes_map = {backend_volume.backend_id: backend_volume for backend_volume in backend_volumes} for volume in volumes: try: backend_volume = backend_volumes_map[volume.backend_id] except KeyError: handle_resource_not_found(volume) else: update_pulled_fields(volume, backend_volume, models.Volume.get_backend_fields()) handle_resource_update_success(volume)
def pull_virtual_machine(self, vm, update_fields=None): """ Pull virtual machine from REST API and update its information in local database. :param vm: Virtual machine database object. :type vm: :class:`waldur_vmware.models.VirtualMachine` :param update_fields: iterable of fields to be updated """ import_time = timezone.now() imported_vm = self.import_virtual_machine(vm.backend_id, save=False) vm.refresh_from_db() if vm.modified < import_time: if not update_fields: update_fields = models.VirtualMachine.get_backend_fields() update_pulled_fields(vm, imported_vm, update_fields)
def pull_catalogs_for_scope(self, remote_catalogs, scope): content_type = ContentType.objects.get_for_model(scope) local_catalogs = models.Catalog.objects.filter( content_type=content_type, object_id=scope.id, ) remote_catalog_map = { catalog['id']: self.remote_catalog_to_local(catalog, content_type, scope.id) for catalog in remote_catalogs } local_catalog_map = { catalog.backend_id: catalog for catalog in local_catalogs } remote_catalog_ids = set(remote_catalog_map.keys()) local_catalog_ids = set(local_catalog_map.keys()) stale_catalogs = local_catalog_ids - remote_catalog_ids new_catalogs = [ remote_catalog_map[catalog_id] for catalog_id in remote_catalog_ids - local_catalog_ids ] existing_catalogs = remote_catalog_ids & local_catalog_ids pulled_fields = { 'name', 'description', 'catalog_url', 'branch', 'commit', 'username', 'password', 'runtime_state', } for catalog_id in existing_catalogs: local_catalog = local_catalog_map[catalog_id] remote_catalog = remote_catalog_map[catalog_id] update_pulled_fields(local_catalog, remote_catalog, pulled_fields) models.Catalog.objects.bulk_create(new_catalogs) local_catalogs.filter(backend_id__in=stale_catalogs).delete()
def pull_vm_ports(self, vm): try: backend_ports = self.client.list_nics(vm.backend_id) except VMwareError as e: raise VMwareBackendError(e) backend_ports_map = {item['nic']: item for item in backend_ports} frontend_ports_map = { p.backend_id: p for p in models.Port.objects.filter(vm=vm) } networks_map = { p.backend_id: p for p in models.Network.objects.filter( settings=vm.service_settings) } stale_ids = set(frontend_ports_map.keys()) - set( backend_ports_map.keys()) new_ids = set(backend_ports_map.keys()) - set( frontend_ports_map.keys()) common_ids = set(backend_ports_map.keys()) & set( frontend_ports_map.keys()) for item_id in new_ids: backend_port = backend_ports_map[item_id] port = self._backend_port_to_port(backend_port) port.service_settings = vm.service_settings port.port = vm.port network_id = backend_port['backing']['network'] port.network = networks_map.get(network_id) port.vm = vm port.save() for item_id in common_ids: backend_port = self._backend_port_to_port( backend_ports_map[item_id]) frontend_port = frontend_ports_map[item_id] fields = ('mac_address', 'runtime_state') update_pulled_fields(frontend_port, backend_port, fields) models.Port.objects.filter(vm=vm, backend_id__in=stale_ids).delete()
def pull_project_workloads(self, project): remote_workloads = self.client.list_workloads(project.backend_id) local_workloads = models.Workload.objects.filter(project=project) local_namespaces = models.Namespace.objects.filter(project=project) local_namespaces_map = { namespace.backend_id: namespace for namespace in local_namespaces } remote_workload_map = { workload['id']: self.remote_workload_to_local(workload, project, local_namespaces_map) for workload in remote_workloads } local_workload_map = { workload.backend_id: workload for workload in local_workloads } remote_workload_ids = set(remote_workload_map.keys()) local_workload_ids = set(local_workload_map.keys()) stale_workloads = local_workload_ids - remote_workload_ids new_workloads = [ remote_workload_map[workload_id] for workload_id in remote_workload_ids - local_workload_ids ] existing_workloads = remote_workload_ids & local_workload_ids pulled_fields = { 'name', 'runtime_state', 'scale', } for workload_id in existing_workloads: local_workload = local_workload_map[workload_id] remote_workload = remote_workload_map[workload_id] update_pulled_fields(local_workload, remote_workload, pulled_fields) models.Workload.objects.bulk_create(new_workloads) local_workloads.filter(backend_id__in=stale_workloads).delete()
def pull_namespaces_for_cluster(self, cluster: models.Cluster): remote_namespaces = self.client.list_namespaces(cluster.backend_id) local_namespaces = models.Namespace.objects.filter( project__cluster=cluster) local_projects = models.Project.objects.filter(cluster=cluster) local_project_map = { project.backend_id: project for project in local_projects } remote_namespace_map = { namespace['id']: self.remote_namespace_to_local(namespace, local_project_map) for namespace in remote_namespaces } local_namespace_map = { namespace.backend_id: namespace for namespace in local_namespaces } remote_namespace_ids = set(remote_namespace_map.keys()) local_namespace_ids = set(local_namespace_map.keys()) stale_namespaces = local_namespace_ids - remote_namespace_ids new_namespaces = [ remote_namespace_map[namespace_id] for namespace_id in remote_namespace_ids - local_namespace_ids ] existing_namespaces = remote_namespace_ids & local_namespace_ids pulled_fields = { 'name', 'runtime_state', 'project', } for namespace_id in existing_namespaces: local_namespace = local_namespace_map[namespace_id] remote_namespace = remote_namespace_map[namespace_id] update_pulled_fields(local_namespace, remote_namespace, pulled_fields) models.Namespace.objects.bulk_create(new_namespaces) local_namespaces.filter(backend_id__in=stale_namespaces).delete()
def pull_project_hpas(self, project): local_workloads = models.Workload.objects.filter(project=project) local_workloads_map = { workload.backend_id: workload for workload in local_workloads } local_hpas = models.HPA.objects.filter(project=project) local_hpa_map = {hpa.backend_id: hpa for hpa in local_hpas} remote_hpas = self.client.list_hpas(project.backend_id) remote_hpa_map = { hpa['id']: self.remote_hpa_to_local(hpa, local_workloads_map) for hpa in remote_hpas } remote_hpa_ids = set(remote_hpa_map.keys()) local_hpa_ids = set(local_hpa_map.keys()) stale_hpas = local_hpa_ids - remote_hpa_ids new_hpas = [ remote_hpa_map[hpa_id] for hpa_id in remote_hpa_ids - local_hpa_ids ] existing_hpas = remote_hpa_ids & local_hpa_ids pulled_fields = { 'name', 'runtime_state', 'current_replicas', 'desired_replicas', 'min_replicas', 'max_replicas', 'metrics', } for hpa_id in existing_hpas: local_hpa = local_hpa_map[hpa_id] remote_hpa = remote_hpa_map[hpa_id] update_pulled_fields(local_hpa, remote_hpa, pulled_fields) models.HPA.objects.bulk_create(new_hpas) local_hpas.filter(backend_id__in=stale_hpas).delete()
def pull_port(self, port, update_fields=None): """ Pull Ethernet port from REST API and update its information in local database. :param port: Port to be updated. :type port: :class:`waldur_vmware.models.Port` :param update_fields: iterable of fields to be updated :return: None """ import_time = timezone.now() imported_port = self.import_port(port.vm.backend_id, port.backend_id, save=False) port.refresh_from_db() if port.modified < import_time: if not update_fields: update_fields = models.Port.get_backend_fields() update_pulled_fields(port, imported_port, update_fields)
def pull_disk(self, disk, update_fields=None): """ Pull virtual disk from REST API and update its information in local database. :param disk: Virtual disk database object. :type disk: :class:`waldur_vmware.models.Disk` :param update_fields: iterable of fields to be updated :return: None """ import_time = timezone.now() imported_disk = self.import_disk(disk.vm.backend_id, disk.backend_id, save=False) disk.refresh_from_db() if disk.modified < import_time: if not update_fields: update_fields = models.Disk.get_backend_fields() update_pulled_fields(disk, imported_disk, update_fields)
def _pull_projects(self, local_clusters, local_projects, remote_projects): """ This private method pulls projects for given clusters and projects. """ local_cluster_map = { cluster.backend_id: cluster for cluster in local_clusters } remote_project_map = { project['id']: self.remote_project_to_local(project, local_cluster_map) for project in remote_projects } local_project_map = { project.backend_id: project for project in local_projects } remote_project_ids = set(remote_project_map.keys()) local_project_ids = set(local_project_map.keys()) stale_projects = local_project_ids - remote_project_ids new_projects = [ remote_project_map[project_id] for project_id in remote_project_ids - local_project_ids ] existing_projects = remote_project_ids & local_project_ids pulled_fields = { 'name', 'description', 'runtime_state', 'cluster', } for project_id in existing_projects: local_project = local_project_map[project_id] remote_project = remote_project_map[project_id] update_pulled_fields(local_project, remote_project, pulled_fields) models.Project.objects.bulk_create(new_projects) local_projects.filter(backend_id__in=stale_projects).delete()
def pull_datastores(self): try: backend_datastores = self.client.list_datastores() except VMwareError as e: raise VMwareBackendError(e) backend_datastores_map = { item['datastore']: item for item in backend_datastores } frontend_datastores_map = { p.backend_id: p for p in models.Datastore.objects.filter(settings=self.settings) } stale_ids = set(frontend_datastores_map.keys()) - set( backend_datastores_map.keys()) new_ids = set(backend_datastores_map.keys()) - set( frontend_datastores_map.keys()) common_ids = set(backend_datastores_map.keys()) & set( frontend_datastores_map.keys()) for item_id in new_ids: datastore = self._backend_datastore_to_datastore( backend_datastores_map[item_id]) datastore.save() for item_id in common_ids: backend_datastore = self._backend_datastore_to_datastore( backend_datastores_map[item_id]) frontend_datastore = frontend_datastores_map[item_id] fields = ('name', 'capacity', 'free_space') update_pulled_fields(frontend_datastore, backend_datastore, fields) models.Datastore.objects.filter(settings=self.settings, backend_id__in=stale_ids).delete()
def test_model_is_saved_if_pulled_fields_are_different(self): vm1 = InstanceMock() vm2 = InstanceMock(runtime_state='ERRED') update_pulled_fields(vm1, vm2, ('name', 'runtime_state')) self.assertEqual(vm1.save.call_count, 1)
def _pull_templates( self, local_templates, local_catalogs, local_clusters, local_projects, remote_templates, ): local_catalog_map = { catalog.backend_id: catalog for catalog in local_catalogs } local_cluster_map = { cluster.backend_id: cluster for cluster in local_clusters } local_project_map = { project.backend_id: project for project in local_projects } local_template_map = { template.backend_id: template for template in local_templates } remote_template_map = { template['id']: self.remote_template_to_local(template, local_catalog_map, local_cluster_map, local_project_map) for template in remote_templates } remote_template_ids = set(remote_template_map.keys()) local_template_ids = set(local_template_map.keys()) stale_templates = local_template_ids - remote_template_ids new_templates = [ remote_template_map[template_id] for template_id in remote_template_ids - local_template_ids ] existing_templates = remote_template_ids & local_template_ids pulled_fields = { 'name', 'description', 'runtime_state', 'project_url', 'icon_url', 'default_version', 'versions', 'catalog', 'cluster', 'project', } for template_id in existing_templates: local_template = local_template_map[template_id] remote_template = remote_template_map[template_id] update_pulled_fields(local_template, remote_template, pulled_fields) models.Template.objects.bulk_create(new_templates) local_templates.filter(backend_id__in=stale_templates).delete()
def test_model_is_not_saved_if_pulled_fields_are_the_same(self): vm = InstanceMock() update_pulled_fields(vm, vm, ('name', 'runtime_state')) self.assertEqual(vm.save.call_count, 0)
def test_model_is_not_saved_if_changed_fields_are_ignored(self): vm1 = InstanceMock() vm2 = InstanceMock(runtime_state='ERRED') update_pulled_fields(vm1, vm2, ('name', )) self.assertEqual(vm1.save.call_count, 0)
def test_error_message_saved_if_it_changed(self): vm1 = InstanceMock() vm2 = InstanceMock(error_message='Server does not respond.') update_pulled_fields(vm1, vm2, ('name', )) self.assertEqual(vm1.save.call_count, 1)