def testCreateSubmission(self):
        """Test create_submission correctly loads dummy information to submission_queue.json"""
        service = 'service'
        submissions = dict()
        submissions[service] = dict()
        presubmit_ids = ['foo', 'bar']

        for pre_id in presubmit_ids:
            submissions[service][pre_id] = pre_id
        pre_len = len(submissions[service])

        orch = orchestrator.Orchestrator()

        util.save_json(self.queue_loc, submissions)
        new_id = orch.create_submission(service, 'data', 'wf_type', 'wf_name',
                                        'sample')
        submissions = util.get_json(self.queue_loc)

        for pre_id in presubmit_ids:
            self.assertEqual(submissions[service][pre_id], pre_id)

        # Check that new entry was added appropriately.
        self.assertEqual(pre_len + 1, len(submissions[service]))
        self.assertFalse(new_id in presubmit_ids)
        self.assertTrue((new_id in submissions[service]))

        # Check that entry has correct data.
        self.assertEqual(submissions[service][new_id]['status'], 'RECEIVED')
        self.assertEqual(submissions[service][new_id]['data'], 'data')
        self.assertEqual(submissions[service][new_id]['wf_id'], 'wf_name')
        self.assertEqual(submissions[service][new_id]['type'], 'wf_type')
        self.assertEqual(submissions[service][new_id]['sample'], 'sample')
def update_submission_run(eval_id, submission_id, run_data):
    """
    Update information for a workflow run.
    """
    evals = get_json(EVALS_PATH)
    evals[eval_id][submission_id]['run'] = run_data
    save_json(EVALS_PATH, evals)
def update_submission_status(eval_id, submission_id, status):
    """
    Update the status of a submission.
    """
    evals = get_json(EVALS_PATH)
    evals[eval_id][submission_id]['status'] = status
    save_json(EVALS_PATH, evals)
Beispiel #4
0
 def set_json(self, section, service, var2add):
     try:
         orchestrator_config = get_json(self.config_path)
         orchestrator_config.setdefault(section, {})[service] = var2add
         save_json(self.config_path, orchestrator_config)
     except AttributeError:
         raise AttributeError('The config file needs to be set: ' +
                              self.config_path)
def create_submission(wes_id, submission_data, wf_type, wf_name, sample):
    submissions = get_json(queue_path())
    submission_id = dt.datetime.now().strftime('%d%m%d%H%M%S%f')

    submissions.setdefault(wes_id, {})[submission_id] = {
        'status': 'RECEIVED',
        'data': submission_data,
        'wf_id': wf_name,
        'type': wf_type,
        'sample': sample
    }
    save_json(queue_path(), submissions)
    logger.info(" Queueing Job for '{}' endpoint:"
                "\n - submission ID: {}".format(wes_id, submission_id))
    return submission_id
Beispiel #6
0
    def testConfigs(self):
        """
        Make sure that the various config fetching functions reads the right data from the config file.

        This test checks that the following functions return as expected:
            config.wf_config()
            config.trs_config()
            config.wes_config()
        """
        c = config.Config(self.config_loc)
        config_entries = {'workflows': c.wf_config,
                          'toolregistries': c.trs_config,
                          'workflowservices': c.wes_config}

        for entry, get_func in config_entries.items():
            config_file = util.get_json(c.config_path)
            config_file[entry] = entry  # X_config() returns whatever is stored here.
            util.save_json(c.config_path, config_file)
            self.assertEqual(get_func(), entry)
def create_submission(eval_id, submission_data, wes_id, type=None):
    """
    Submit a new job request to an evaluation queue.
    """
    evals = get_json(EVALS_PATH)
    submission_id = dt.datetime.now().strftime('%d%m%d%H%M%S%f')

    evals.setdefault(eval_id, {})[submission_id] = {
        'status': 'RECEIVED',
        'data': submission_data,
        'wes_id': wes_id,
        'type': type
    }
    save_json(EVALS_PATH, evals)
    logger.info("Created new job submission:\n - submission ID: {}".format(
        submission_id))
    logger.debug("\n - evaluation queue: {} ({})"
                 "\n - data:\n{}".format(
                     eval_id, config.eval_config[eval_id]['workflow_id'],
                     json.dumps(submission_data, indent=2)))
    return submission_id
def update_submission_run(wes_id, submission_id, param, status):
    """Update the status of a submission."""
    submissions = get_json(queue_path())
    submissions[wes_id][submission_id]['run'][param] = status
    save_json(queue_path(), submissions)
 def update_submission(self, wes_id, submission_id, param, status):
     """Update the status of a submission."""
     submissions = get_json(self.queue_path)
     submissions[wes_id][submission_id][param] = status
     save_json(self.queue_path, submissions)