def test_get_or_create_not_exist(self, mock_uuid): session = settings.Session() session.query(KubeWorkerIdentifier).update( {KubeWorkerIdentifier.worker_uuid: ''}) mock_uuid.return_value = 'abcde' worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid( session) self.assertEqual(worker_uuid, 'abcde')
def test_get_or_create_not_exist(self, mock_uuid): session = settings.Session() session.query(KubeWorkerIdentifier).update({ KubeWorkerIdentifier.worker_uuid: '' }) mock_uuid.return_value = 'abcde' worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid(session) self.assertEqual(worker_uuid, 'abcde')
def start(self): self.log.info('Start Kubernetes executor') self.worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid( ) self.log.debug('Start with worker_uuid: %s', self.worker_uuid) # always need to reset resource version since we don't know # when we last started, note for behavior below # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs # /CoreV1Api.md#list_namespaced_pod KubeResourceVersion.reset_resource_version() self.task_queue = self._manager.Queue() self.result_queue = self._manager.Queue() self.kube_client = get_kube_client() self.kube_scheduler = AirflowKubernetesScheduler( self.kube_config, self.task_queue, self.result_queue, self.kube_client, self.worker_uuid) self._inject_secrets() self.clear_not_launched_queued_tasks()
def start(self): self.log.info('Start Kubernetes executor') self.worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid() self.log.debug('Start with worker_uuid: %s', self.worker_uuid) # always need to reset resource version since we don't know # when we last started, note for behavior below # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs # /CoreV1Api.md#list_namespaced_pod KubeResourceVersion.reset_resource_version() self.task_queue = Queue() self.result_queue = Queue() self.kube_client = get_kube_client() self.kube_scheduler = AirflowKubernetesScheduler( self.kube_config, self.task_queue, self.result_queue, self.kube_client, self.worker_uuid ) self._inject_secrets() self.clear_not_launched_queued_tasks()
def start(self): logger.info("Starting Kubernetes executor..") self._manager.start(mgr_init) dbnd_run = try_get_databand_run() if dbnd_run: self.worker_uuid = str(dbnd_run.run_uid) else: self.worker_uuid = ( KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid() ) self.log.debug("Start with worker_uuid: %s", self.worker_uuid) # always need to reset resource version since we don't know # when we last started, note for behavior below # https://github.com/kubernetes-client/python/blob/master/kubernetes/docs # /CoreV1Api.md#list_namespaced_pod # KubeResourceVersion.reset_resource_version() self.task_queue = self._manager.Queue() self.result_queue = self._manager.Queue() self.kube_client = self.kube_dbnd.kube_client self.kube_scheduler = DbndKubernetesScheduler( self.kube_config, self.task_queue, self.result_queue, self.kube_client, self.worker_uuid, kube_dbnd=self.kube_dbnd, ) if self.kube_dbnd.engine_config.debug: self.log.setLevel(logging.DEBUG) self.kube_scheduler.log.setLevel(logging.DEBUG) self._inject_secrets() self.clear_not_launched_queued_tasks() self._flush_result_queue()
def test_get_or_create_exist(self): session = settings.Session() KubeWorkerIdentifier.checkpoint_kube_worker_uuid('fghij', session) worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid( session) self.assertEqual(worker_uuid, 'fghij')
def test_get_or_create_exist(self): session = settings.Session() KubeWorkerIdentifier.checkpoint_kube_worker_uuid('fghij', session) worker_uuid = KubeWorkerIdentifier.get_or_create_current_kube_worker_uuid(session) self.assertEqual(worker_uuid, 'fghij')