def restart_job(self, cluster, role, environment, jobname, jobspec=None, instances=[]):
        """Method to restart aurora job"""

        job_key = AuroraJobKey(cluster, role, environment, jobname)
        logger.info("request to restart => %s", job_key.to_path())

        instances = self.pack_instance_list(instances)
        try:
            config = self.make_job_config(job_key, jobspec)
        except Exception as e:
            return(job_key.to_path(), ["Failed to restart Aurora job",
                                       "Can not create job configuration object because", str(e)])

        # these are the default values from apache.aurora.client.commands.core.restart()
        updater_config = UpdaterConfig(
            1,          # options.batch_size
            60,         # options.restart_threshold
            30,         # options.watch_secs
            0,          # options.max_per_shard_failures
            0           # options.max_total_failures
        )

        api = make_client(job_key.cluster)
        # instances = all shards, health check = 3 sec
        resp = api.restart(job_key, instances, updater_config, 3, config=config)
        if resp.responseCode != ResponseCode.OK:
            logger.warning("aurora -- restart job failed")
            responseStr = self.response_string(resp)
            logger.warning(responseStr)
            return(job_key.to_path(), ["Error reported by aurora client:", responseStr])

        logger.info("aurora -- restart job successful")
        return(job_key.to_path(), None)
    def cancel_update_job(self, cluster, role, environment, jobname, jobspec=None):
        """Method to cancel an update of aurora job"""

        job_key = AuroraJobKey(cluster, role, environment, jobname)
        logger.info("request to cancel update of => %s", job_key.to_path())

        cmd_output = ""
        try:
            cmd_args = [job_key.to_path(),]

            # aurora client requires jobspec be passed as file, no reading from STDIN
            jobspec_file = self.make_jobspec_file(jobspec)
            if jobspec_file is not None:
                cmd_args.append(jobspec_file.name)

            cmd_output = subprocess.check_output(
                [self.aurora_cmd, "cancel_update"] + cmd_args, stderr=subprocess.STDOUT)

        except subprocess.CalledProcessError as e:
            logger.warning("aurora client exit status: %d, details follow" % e.returncode)
            for s in e.output.splitlines():
                logger.warning("> %s" % s)
            logger.warning("----------------------------------------")

            return(job_key.to_path(), ["Error reported by aurora client:"] + e.output.splitlines())

        finally:
            if jobspec_file: jobspec_file.close()

        if self.is_aurora_command_successful(cmd_output):
            logger.info("aurora -- cancel update successful")
            return(job_key.to_path(), None)
        else:
            logger.warning("aurora -- cancel update job")
            return(job_key.to_path(), ["Error reported by aurora client:"] + cmd_output.splitlines())
    def cancel_update_job(self, cluster, role, environment, jobname, jobspec=None):
        """Method to cancel an update of aurora job"""

        job_key = AuroraJobKey(cluster, role, environment, jobname)
        logger.info("request to cancel update of => %s", job_key.to_path())

        try:
            config = self.make_job_config(job_key, jobspec)
        except Exception as e:
            return(job_key.to_path(), ["Failed to cancel update of Aurora job",
                                       "Can not create job configuration object because", str(e)])

        api = make_client(cluster)
        resp = api.cancel_update(job_key, config=config)
        if resp.responseCode != ResponseCode.OK:
            logger.warning("aurora -- cancel the update of job failed")
            responseStr = self.response_string(resp)
            logger.warning(responseStr)
            return(job_key.to_path(), ["Error reported by aurora client:", responseStr])

        logger.info("aurora -- cancel of update job successful")
        return(job_key.to_path(), None)
    def delete_job(self, cluster, role, environment, jobname, jobspec=None, instances=[]):
        """Method to delete aurora job"""

        job_key = AuroraJobKey(cluster, role, environment, jobname)
        logger.info("request to delete => %s", job_key.to_path())

        instances = self.pack_instance_list(instances)
        try:
            config = self.make_job_config(job_key, jobspec)
        except Exception as e:
            return(job_key.to_path(), ["Failed to delete Aurora job",
                                       "Can not create job configuration object because", str(e)])

        api = make_client(job_key.cluster)
        resp = api.kill_job(job_key, config=config, instances=instances)
        if resp.responseCode != ResponseCode.OK:
            logger.warning("aurora -- kill job failed")
            responseStr = self.response_string(resp)
            logger.warning(responseStr)
            return(job_key.to_path(), [], ["Error reported by aurora client:", responseStr])

        logger.info("aurora -- kill job successful")
        return(job_key.to_path(), [job_key.to_path()], None)
Ejemplo n.º 5
0
  def test_successful_schedule(self):
    mock_context = FakeAuroraCommandContext()
    key = AuroraJobKey("west", "bozo", "test", "hello")
    with contextlib.nested(
        patch('apache.aurora.client.cli.cron.CronNoun.create_context', return_value=mock_context)):

      api = mock_context.get_api('west')
      api.schedule_cron.return_value = self.create_simple_success_response()
      with temporary_file() as fp:
        fp.write(self.get_valid_cron_config())
        fp.flush()
        cmd = AuroraCommandLine()
        cmd.execute(['cron', 'schedule', key.to_path(), fp.name])

      # Now check that the right API calls got made.
      # Check that create_job was called exactly once, with an AuroraConfig parameter.
      assert api.schedule_cron.call_count == 1
      assert isinstance(api.schedule_cron.call_args[0][0], AuroraConfig)

      # The last text printed out to the user should contain a url to the job
      assert mock_context.get_job_page(api, key) in mock_context.out[-1]
    def delete_job(self, cluster, role, environment, jobname, jobspec=None, instances=[]):
        """Method to delete aurora job"""

        job_key = AuroraJobKey(cluster, role, environment, jobname)
        logger.info("request to delete => %s", job_key.to_path())

        instances = self.pack_instance_list(instances)
        cmd_output = ""
        try:
            cmd_args = [job_key.to_path(),]

            # aurora client requires jobspec be passed as file, no reading from STDIN
            jobspec_file = self.make_jobspec_file(jobspec)
            if jobspec_file is not None:
                cmd_args.append(jobspec_file.name)

            if instances is not None:
                cmd = "kill"
                cmd_args = ["--shards=" + instances] + cmd_args
            else:
                cmd = "killall"

            cmd_output = subprocess.check_output(
                [self.aurora_cmd, cmd] + cmd_args, stderr=subprocess.STDOUT)

        except subprocess.CalledProcessError as e:
            logger.warning("aurora client exit status: %d, details follow" % e.returncode)
            for s in e.output.splitlines():
                logger.warning("> %s" % s)
            logger.warning("----------------------------------------")

            return(job_key.to_path(), [], ["Error reported by aurora client"] + e.output.splitlines())

        finally:
            if jobspec_file: jobspec_file.close()

        if self.is_aurora_command_successful(cmd_output):
            logger.info("aurora -- delete job successful")
            return(job_key.to_path(), [job_key.to_path()], None)
        else:
            logger.warning("aurora -- delete job failed")
            return(job_key.to_path(), [], ["Error reported by aurora client"] + cmd_output.splitlines())
    def update_job(self, cluster, role, environment, jobname, jobspec, instances=[]):
        """Method to update aurora job"""

        job_key = AuroraJobKey(cluster, role, environment, jobname)
        logger.info("request to update = %s", job_key.to_path())

        instances = self.pack_instance_list(instances)
        cmd_output = ""
        try:
            # aurora client requires jobspec be passed as file, no reading from STDIN
            jobspec_file = self.make_jobspec_file(jobspec)
            if jobspec_file is None:
                logger.warning("can not proceed with request, job configuration is missing")
                return(job_key.to_path(), ["Failed to update Aurora job",
                                           "Can not create job configuration object because",
                                           "Job configuration is missing (not provided)!"])

            cmd_args = [job_key.to_path(), jobspec_file.name]
            if instances is not None:
                cmd_args = ["--shards=" + instances] + cmd_args

            cmd_output = subprocess.check_output(
                [self.aurora_cmd, "update"] + cmd_args, stderr=subprocess.STDOUT)

        except subprocess.CalledProcessError as e:
            logger.warning("aurora client exit status: %d, details follow" % e.returncode)
            for s in e.output.splitlines():
                logger.warning("> %s" % s)
            logger.warning("----------------------------------------")

            return(job_key.to_path(), ["Error reported by aurora client:"] + e.output.splitlines())

        finally:
            if jobspec_file: jobspec_file.close()

        if self.is_aurora_command_successful(cmd_output):
            logger.info("aurora -- update job successful")
            return(job_key.to_path(), None)
        else:
            logger.warning("aurora -- update job failed")
            return(job_key.to_path(), ["Error reported by aurora client:"] + cmd_output.splitlines())