Пример #1
0
    def cancel(self, pr_id, pr_sha1):
        force_update = True

        # Stop pending jobs
        for i, elem in enumerate(self.q):
            job_in_queue = self.job_dict[elem]
            if (job_in_queue.pr_id() == pr_id and
                    job_in_queue.pr_sha1() == pr_sha1):
                log.debug("Got a stop from web {}/{}".format(pr_id, pr_sha1))
                del self.q[i]
                db.update_job(job_in_queue.pr_id(), job_in_queue.pr_sha1(),
                              status.d[status.CANCEL], "N/A")
                force_update = False

        # Stop the running job
        if self.jt is not None:
            if (self.jt.job.pr_id() == pr_id and
                    self.jt.job.pr_sha1() == pr_sha1):
                log.debug("Got a stop from web {}/{}".format(pr_id, pr_sha1))
                self.jt.stop()
                force_update = False

        # If it wasn't in the queue nor running, then just update the status
        if force_update:
            db.update_job(pr_id, pr_sha1, status.d[status.CANCEL], "N/A")
            payload = db.get_payload_from_pr_id(pr_id, pr_sha1)
            github.update_state(payload, "failure", "Job cancelled!")
Пример #2
0
    def add(self, payload):
        """Responsible of adding new jobs the the job queue."""
        if payload is None:
            log.error("Missing payload when trying to add job")
            return

        pr_id = github.pr_id(payload)
        pr_number = github.pr_number(payload)
        pr_sha1 = github.pr_sha1(payload)
        pr_full_name = github.pr_full_name(payload)

        with self.lock:
            log.info("Got GitHub initiated add {}/{} --> PR#{}".format(
                     pr_id, pr_sha1, pr_number))
            # Check whether the jobs in the current queue touches the same PR
            # number as this incoming request does.
            for i, elem in enumerate(self.q):
                job_in_queue = self.job_dict[elem]
                # Remove existing jobs as long as they are not user initiated
                # jobs.
                if (job_in_queue.pr_number() == pr_number and
                        job_in_queue.pr_full_name() == pr_full_name):
                    if not job_in_queue.user_initiated:
                        log.debug("Non user initiated job found in queue, "
                                  "removing {}".format(elem))
                        del self.q[i]
                        db.update_job(job_in_queue.pr_id(),
                                      job_in_queue.pr_sha1(),
                                      status.d[status.CANCEL], "N/A")
                        github.update_state(job_in_queue.payload,
                                            "failure", "Job cancelled!")

            # Check whether current job also should be stopped (i.e, same
            # PR, but _not_ user initiated).
            if (self.jt is not None and
                    self.jt.job.pr_number() == pr_number and
                    self.jt.job.pr_full_name == pr_full_name and not
                    self.jt.job.user_initiated):
                log.debug("Non user initiated job found running, "
                          "stopping {}".format(self.jt.job))
                self.jt.stop()

            pr_id_sha1 = "{}-{}".format(pr_id, pr_sha1)
            self.q.append(pr_id_sha1)
            new_job = job.Job(payload, False)
            self.job_dict[pr_id_sha1] = new_job
            db.add_build_record(new_job.payload)
            db.update_job(pr_id, pr_sha1, status.d[status.PENDING], "N/A")
            github.update_state(payload, "pending", "Job added to queue")
Пример #3
0
def submit_impl(details, job_parameters):
    print("Submitting new job...")

    # Create and enter the working directory
    wk_dir = create_working_directory(details)

    # Get the ini ready for job submission
    args = prepare_ini_data(job_parameters, wk_dir)

    # Write the updated ini file
    args, inputs = write_ini_file(args)

    # Generate the submission scripts
    submission_script = write_submission_scripts(inputs)

    # If the job is open, we need to run the data generation step on the head nodes (ozstar specific) because compute
    # nodes do not have internet access. This is only applicable for slurm on ozstar
    if (not args.gaussian_noise or args.n_simulation
            == 0) and settings.scheduler == EScheduler.SLURM:
        # Process the slurm scripts to remove the data generation step
        data_gen_command = refactor_slurm_data_generation_step(
            submission_script)

        # Run the data generation step now
        run_data_generation(data_gen_command, wk_dir)

    # Actually submit the job
    sched = get_scheduler()
    submit_bash_id = sched.submit(submission_script, wk_dir)

    # If the job was not submitted, simply return. When the job controller does a status update, we'll detect that
    # the job doesn't exist and report an error
    if not submit_bash_id:
        return None

    # Create a new job to store details
    job = {
        'job_id': get_unique_job_id(),
        'submit_id': submit_bash_id,
        'working_directory': wk_dir,
        'submit_directory': inputs.submit_directory
    }

    # Save the job in the database
    update_job(job)

    # return the job id
    return job['job_id']
Пример #4
0
def get_submit_status(job):
    """
    Gets the status of the job submission step for slurm. If the job submission step is successful, it removes the
    submit_id from the job record and updates it. If the job submission fails for some reason the job is deleted.

    :param job: The internal job db record for the job to get the submit status of
    :return: A single job status object, True/False if the job errored
    """
    sched = get_scheduler()

    if 'submit_id' in job:
        _status, info = sched.status(job['submit_id'], job)

        # If the job is a state less than or equal to running, return it's state
        if _status <= JobStatus.RUNNING:
            result = {
                'what': 'submit',
                'status': _status,
                'info': info
            }

            return result, False

        # If the job is not completed, then some other error has occurred
        if _status != JobStatus.COMPLETED:
            # Delete the job from the database
            db.delete_job(job)

            # Report the error
            result = {
                'what': 'submit',
                'status': _status,
                'info': info
            }

            return result, True

        # The batch submission was successful, remove the submit id from the job
        del job['submit_id']
        db.update_job(job)

    result = {
        'what': 'submit',
        'status': JobStatus.COMPLETED,
        'info': "Completed"
    }

    return result, False
Пример #5
0
    def user_add(self, pr_id, pr_sha1):
        if pr_id is None or pr_sha1 is None:
            log.error("Missing pr_id or pr_sha1 when trying to submit user "
                      "job")
            return

        with self.lock:
            log.info("Got user initiated add {}/{}".format(pr_id, pr_sha1))
            payload = db.get_payload_from_pr_id(pr_id, pr_sha1)
            if payload is None:
                log.error("Didn't find payload for ID:{}".format(pr_id))
                return

            pr_id_sha1 = "{}-{}".format(pr_id, pr_sha1)
            self.q.append(pr_id_sha1)
            self.job_dict[pr_id_sha1] = job.Job(payload, True)
            db.update_job(pr_id, pr_sha1, status.d[status.PENDING], "N/A")
            github.update_state(payload, "pending", "Job added to queue")
Пример #6
0
    def run(self):
        """This is the main function for running a complete clone, build, flash
        and test job."""
        global export_history
        current_status = status.d[status.RUNNING]

        log.debug("Job/{} : {}".format(current_status, self.job))
        time_start = time.time()

        pr_id = self.job.pr_id()
        pr_sha1 = self.job.pr_sha1()

        db.update_job(pr_id, pr_sha1, current_status, "N/A")
        github.update_state(self.job.payload, "pending", "Job running!")

        current_status = status.d[self.start_job()]

        export_history.clear()

        running_time = utils.get_running_time(time_start)
        log.debug("Job/{} : {} --> {}".format(current_status, self.job,
                  running_time))
        db.update_job(pr_id, pr_sha1, current_status, running_time)
Пример #7
0
def spawn_worker(data):
    # Update job status to running
    db.update_job(data['job_id'], 'running')

    # Get list of all target IPs
    start = ip_address(data['ip_range']['start'])
    end = ip_address(data['ip_range']['end'])
    targets = []
    while start <= end:
        targets.append(str(start))
        start += 1
    exclude = []
    if 'exclude_ips' in data:
        exc_start = ip_address(data['exclude_ips']['start'])
        exc_end = ip_address(data['exclude_ips']['end'])
        while exc_start <= exc_end:
            exclude.append(str(exc_start))
            exc_start += 1
    targets = [t for t in targets if t not in exclude]

    # Create a job directory
    os.mkdir('/var/tmp/jobs/{}'.format(data['job_id']))
    data['job_dir'] = '/var/tmp/jobs/{}'.format(data['job_id'])

    # Conduct ICMP scan
    if 'icmp' in data['protocol']:
        icmp_scan(data, targets)

    # Conduct TCP scan
    if 'tcp' in data['protocol']:
        tcp_scan(data, targets)

    # Update job status to finished
    db.update_job(data['job_id'], 'finished', int(time.time()))

    return