def mocked_k8s_CoreV1Api(mocker): mocked_coreV1Api_class = mocker.patch('kubernetes.client.CoreV1Api') mocker.patch('kubernetes.client.ApiClient') coreV1API_instance = mocked_coreV1Api_class.return_value pods_mock = MagicMock() pods_mock.items = [ MagicMock(spec=V1Pod), MagicMock(spec=V1Pod), MagicMock(spec=V1Pod) ] coreV1API_instance.list_pod_for_all_namespaces.return_value = pods_mock services_mock = MagicMock() services_mock.items = [ MagicMock(spec=V1Service), MagicMock(spec=V1Service), MagicMock(spec=V1Service) ] coreV1API_instance.list_service_for_all_namespaces.return_value = services_mock v1_namespace = V1Namespace() v1_metadata_namespace = V1ObjectMeta(name=test_namespace) v1_namespace.metadata = v1_metadata_namespace v1_namespace_status = V1NamespaceStatus(phase=NamespaceStatus.ACTIVE.value) v1_namespace.status = v1_namespace_status coreV1API_instance.read_namespace.return_value = v1_namespace coreV1API_instance.delete_namespace.return_value = V1Status( status="{'phase': 'Terminating'}") v1_config_map = V1ConfigMap(data=test_config_map_data()) coreV1API_instance.read_namespaced_config_map.return_value = v1_config_map secret_data = {"token": TEST_TOKEN} v1_metadata_secret = V1ObjectMeta(name="default-token") v1_secret = V1Secret(metadata=v1_metadata_secret, data=secret_data) v1_secret_list = V1SecretList(items=[v1_secret]) coreV1API_instance.list_namespaced_secret.return_value = v1_secret_list v1_pod_status = V1PodStatus(phase=K8S_RUNNING_POD_STATUS) v1_pod = V1Pod(status=v1_pod_status) v1_pod_lists = V1PodList(items=[v1_pod]) coreV1API_instance.list_namespaced_pod.return_value = v1_pod_lists v1_metadata_event = V1ObjectMeta(name="default-name") v1_object = V1ObjectReference(name="pod_name") v1_event = V1Event(message="Insufficient cpu", involved_object=v1_object, metadata=v1_metadata_event) v1_event_list = V1EventList(items=[v1_event]) coreV1API_instance.list_namespaced_event.return_value = v1_event_list return coreV1API_instance
def test_extract_pipelineparams_from_dict(self): """Test extract_pipeleineparams.""" p1 = PipelineParam(name='param1', op_name='op1') p2 = PipelineParam(name='param2') configmap = V1ConfigMap(data={str(p1): str(p2)}) params = extract_pipelineparams_from_any(configmap) self.assertListEqual(sorted([p1, p2]), sorted(params))
def mocked_k8s_CoreV1Api(mocker): mocked_coreV1Api_class = mocker.patch('kubernetes.client.CoreV1Api') mocker.patch('kubernetes.client.ApiClient') coreV1API_instance = mocked_coreV1Api_class.return_value v1_config_map = V1ConfigMap( data={ NAUTAConfigMap.PLATFORM_VERSION: PLATFORM_VERSION, NAUTAConfigMap.IMAGE_TILLER_FIELD: "", NAUTAConfigMap.EXTERNAL_IP_FIELD: "", NAUTAConfigMap.IMAGE_TENSORBOARD_SERVICE_FIELD: "", NAUTAConfigMap.REGISTRY_FIELD: "" }) coreV1API_instance.read_namespaced_config_map.return_value = v1_config_map return coreV1API_instance
def migrate(self, backupList: BackupList, manifestList: ManifestList, **kwargs: Any) -> ManifestList: ml = ManifestList() clusterBackup = backupList.backup(pluginName=self.plugin_name, backupName='default') if not clusterBackup: logging.critical( "Cluster backup not found. Cannot provide DC/OS annotations") return ml metadata = V1ObjectMeta( name="dcos-{}".format(clusterBackup.data['CLUSTER_ID'])) metadata.annotations = { utils.namespace_path("cluster-id"): clusterBackup.data['CLUSTER_ID'], utils.namespace_path("cluster-name"): clusterBackup.data['CLUSTER'], utils.namespace_path("backup-date"): clusterBackup.data['BACKUP_DATE'], } cfgmap = V1ConfigMap(metadata=metadata) # models do not set defaults -.- cfgmap.kind = "ConfigMap" cfgmap.api_version = "v1" cfgmap.data = { 'MESOS_MASTER_STATE_SUMMARY_BASE64': b64encode( json.dumps( clusterBackup.data['MESOS_MASTER_STATE-SUMMARY']).encode( 'ascii')) } manifest = Manifest(pluginName=self.plugin_name, manifestName="dcos-cluster") manifest.append(cfgmap) ml.append(manifest) return ml
def create_manifest_list_cluster() -> ManifestList: clusterID = "test-1234-test-test" ml = ManifestList() metadata = V1ObjectMeta(name="dcos-{}".format(clusterID)) metadata.annotations = { "migration.dcos.d2iq.com/cluster-id": clusterID, "migration.dcos.d2iq.com/cluster-name": "testcluster", "migration.dcos.d2iq.com/backup-date": "2021-01-25", } cfgmap = V1ConfigMap(metadata=metadata) # models do not set defaults -.- cfgmap.kind = "ConfigMap" cfgmap.api_version = "v1" cfgmap.data = { "MESOS_MASTER_STATE_SUMMARY_BASE64": b64encode(json.dumps({ "foo": "bar" }).encode("ascii")) } manifest = Manifest(pluginName="cluster", manifestName="dcos-cluster") manifest.append(cfgmap) ml.append(manifest) secret = Manifest(pluginName="secret", manifestName="hello-world.secret") sec = V1Secret(metadata=V1ObjectMeta(name="hello-world.secret", annotations=metadata.annotations)) sec.api_version = 'v1' sec.kind = 'Secret' sec.data = {"hello-world.secret": "Zm9vYmFy"} secret.append(sec) ml.append(secret) return ml
def backup_details(url, api_token, interval, backups, config_map): # Fetch the list of users. global cached_admin_users global cached_user_whitelist auth_header = {'Authorization': 'token %s' % api_token} req = HTTPRequest(url=url + '/users', headers=auth_header) client = AsyncHTTPClient() resp = yield client.fetch(req) users = json.loads(resp.body.decode('utf8', 'replace')) admin_users = set() user_whitelist = set() for user in users: if user['admin']: admin_users.add(user['name']) else: user_whitelist.add(user['name']) timestamp = time.strftime('%Y-%m-%d-%H-%M-%S', time.gmtime()) os.makedirs(backups, exist_ok=True) if admin_users != cached_admin_users: name = 'admin_users-%s.txt' % timestamp path = os.path.join(backups, name) print('creating backup: %s' % path) with open(path, 'w') as fp: fp.write('\n'.join(admin_users)) fp.write('\n') cached_admin_users = admin_users try: latest = os.path.join(backups, 'admin_users-latest.txt') if os.path.exists(latest): os.unlink(latest) os.symlink(name, latest) except OSError: print('ERROR: could not update: admin_users-latest.txt') pass if user_whitelist != cached_user_whitelist: name = 'user_whitelist-%s.txt' % timestamp path = os.path.join(backups, name) print('creating backup: %s' % path) with open(path, 'w') as fp: fp.write('\n'.join(user_whitelist)) fp.write('\n') cached_user_whitelist = user_whitelist try: latest = os.path.join(backups, 'user_whitelist-latest.txt') if os.path.exists(latest): os.unlink(latest) os.symlink(name, latest) except OSError: print('ERROR: could not update: user_whitelist-latest.txt') pass if config_map: config_map_object = V1ConfigMap() config_map_object.kind = "ConfigMap" config_map_object.api_version = "v1" config_map_object.metadata = V1ObjectMeta( name=config_map, labels={'app': service_name}) config_map_object.data = { 'admin_users.txt': '\n'.join(admin_users) + '\n', 'user_whitelist.txt': '\n'.join(user_whitelist) + '\n' } try: corev1api.replace_namespaced_config_map( config_map, namespace, config_map_object) except ApiException as e: if e.status == 404: try: corev1api.create_namespaced_config_map( namespace, config_map_object) except Exception as e: print('cannot update config map %s: %s' % (config_map, e)) else: print('cannot update config map %s: %s' % (config_map, e)) except Exception as e: print('cannot update config map %s: %s' % (config_map, e))
def make_configmap(configname, data): configmap = V1ConfigMap( data=data, metadata=V1ObjectMeta(name=configname) ) return configmap