def schedule_horizon(): """Get the length of the server's scheduling horizon""" cursor = dbcursor_query("SELECT schedule_horizon FROM configurables", onerow=True) return ok_json(pscheduler.timedelta_as_iso8601(cursor.fetchone()[0]))
def get_status(): response = {} response["time"] = pscheduler.datetime_as_iso8601(pscheduler.time_now()) # Get the heartbeat status try: services = dbcursor_query("SELECT * FROM heartbeat_json", onerow=True).fetchone()[0] except Exception: services = {} # Add the database status try: # query database, calculate server run time cursor = dbcursor_query( "SELECT extract(epoch from current_timestamp - pg_postmaster_start_time())", onerow=True) time_val = pscheduler.seconds_as_timedelta(cursor.fetchone()[0]) response["services"]["database"] = { "uptime": str(pscheduler.timedelta_as_iso8601(time_val)) } except Exception: pass response["services"] = services runs = {} # query database for last run information try: cursor = dbcursor_query( "SELECT times_actual FROM run WHERE state=run_state_finished()") times = cursor.fetchall() formatted = [] for val in times: formatted.append(val[0].upper) runs["last-finished"] = str( pscheduler.datetime_as_iso8601(max(formatted))) except Exception: # handles empty result and faulty query runs["last-finished"] = None # query database for last scheduled information try: cursor = dbcursor_query("SELECT added FROM run") times = cursor.fetchall() formatted = [] for val in times: formatted.append(val[0]) runs["last-scheduled"] = str( pscheduler.datetime_as_iso8601(max(formatted))) except Exception: # handles empty result and faulty query runs["last-scheduled"] = None response["runs"] = runs return ok_json(response)
def schedule_horizon(): """Get the length of the server's scheduling horizon""" try: cursor = dbcursor_query( "SELECT schedule_horizon FROM configurables", onerow=True) except Exception as ex: log.exception() return error(str(ex)) return ok_json(pscheduler.timedelta_as_iso8601(cursor.fetchone()[0]))
def __evaluate_limits( task, # Task UUID start_time # When the task should start ): """Evaluate the limits for a run.""" log.debug("Applying limits") # Let this throw what it may; callers have to catch it. cursor = dbcursor_query( "SELECT json, duration, hints FROM task where uuid = %s", [task]) if cursor.rowcount == 0: # TODO: This or bad_request when the task isn't there? return false, None, not_found() task_spec, duration, hints = cursor.fetchone() cursor.close() log.debug("Task is %s, duration is %s" % (task_spec, duration)) limit_input = { 'type': task_spec['test']['type'], 'spec': task_spec['test']['spec'], 'schedule': { 'start': pscheduler.datetime_as_iso8601(start_time), 'duration': pscheduler.timedelta_as_iso8601(duration) } } log.debug("Checking limits against %s" % str(limit_input)) processor, whynot = limitprocessor() if processor is None: log.debug("Limit processor is not initialized. %s", whynot) return false, None, no_can_do( "Limit processor is not initialized: %s" % whynot) # Don't pass hints since that would have been covered when the # task was submitted and only the scheduler will be submitting # runs. passed, limits_passed, diags = processor.process(limit_input, hints) log.debug("Passed: %s. Diags: %s" % (passed, diags)) # This prevents the run from being put in a non-starter state if passed: diags = None return passed, diags, None
def stat_control_pause(): cursor = dbcursor_query(""" SELECT control_is_paused(), date_trunc('second', pause_runs_until - now()), pause_runs_until = tstz_infinity() FROM control""") if cursor.rowcount != 1: pscheduler.fail("Got back more data than expected.") (is_paused, left, infinite) = cursor.fetchone() cursor.close() result = {"is_paused": is_paused} if is_paused: result["infinite"] = infinite if not infinite: result["remaining"] = pscheduler.timedelta_as_iso8601(left) return ok_json(result)
def __evaluate_limits( task, # Task UUID start_time # When the task should start ): log.debug("Applying limits") # Let this throw what it may; callers have to catch it. cursor = dbcursor_query( "SELECT json, duration, hints FROM task where uuid = %s", [task]) if cursor.rowcount == 0: # TODO: This or bad_request when the task isn't there? return not_found() task_spec, duration, hints = cursor.fetchone() log.debug("Task is %s, duration is %s" % (task_spec, duration)) limit_input = { 'type': task_spec['test']['type'], 'spec': task_spec['test']['spec'], 'schedule': { 'start': pscheduler.datetime_as_iso8601(start_time), 'duration': pscheduler.timedelta_as_iso8601(duration) } } log.debug("Checking limits against %s" % str(limit_input)) processor, whynot = limitprocessor() if processor is None: log.debug("Limit processor is not initialized. %s", whynot) return no_can_do("Limit processor is not initialized: %s" % whynot) # Don't pass hints since that would have been covered when the # task was submitted and only the scheduler will be submitting # runs. passed, diags = processor.process(limit_input, hints) log.debug("Passed: %s. Diags: %s" % (passed, diags)) # This prevents the run from being put in a non-starter state if passed: diags = None return passed, diags
def tasks_uuid(uuid): if request.method == 'GET': # Get a task, adding server-derived details if a 'detail' # argument is present. try: cursor = dbcursor_query(""" SELECT task.json, task.added, task.start, task.slip, task.duration, task.post, task.runs, task.participants, scheduling_class.anytime, scheduling_class.exclusive, scheduling_class.multi_result, task.participant, task.enabled, task.cli FROM task JOIN test ON test.id = task.test JOIN scheduling_class ON scheduling_class.id = test.scheduling_class WHERE uuid = %s """, [uuid]) except Exception as ex: return error(str(ex)) if cursor.rowcount == 0: return not_found() row = cursor.fetchone() if row is None: return not_found() json = row[0] # Redact anything in the test spec or archivers that's marked # private as well as _key at the top level if there is one. if "_key" in json: json["_key"] = None json["test"]["spec"] = pscheduler.json_decomment( json["test"]["spec"], prefix="_", null=True) try: for archive in range(0,len(json["archives"])): json["archives"][archive]["data"] = pscheduler.json_decomment( json["archives"][archive]["data"], prefix="_", null=True) except KeyError: pass # Don't care if not there. # Add details if we were asked for them. if arg_boolean('detail'): part_list = row[7]; # The database is not supposed to allow this, but spit out # a sane default as a last resort in case it happens. if part_list is None: part_list = [None] if row[10] == 0 and part_list[0] is None: part_list[0] = pscheduler.api_this_host() json['detail'] = { 'added': None if row[1] is None \ else pscheduler.datetime_as_iso8601(row[1]), 'start': None if row[2] is None \ else pscheduler.datetime_as_iso8601(row[2]), 'slip': None if row[3] is None \ else pscheduler.timedelta_as_iso8601(row[3]), 'duration': None if row[4] is None \ else pscheduler.timedelta_as_iso8601(row[4]), 'post': None if row[5] is None \ else pscheduler.timedelta_as_iso8601(row[5]), 'runs': None if row[6] is None \ else int(row[6]), 'participants': part_list, 'anytime': row[8], 'exclusive': row[9], 'multi-result': row[10], 'enabled': row[12], 'cli': row[13] } return ok_json(json) elif request.method == 'POST': log.debug("Posting to %s", uuid) log.debug("Data is %s", request.data) # TODO: This is only for participant 1+ # TODO: This should probably a PUT and not a POST. try: json_in = pscheduler.json_load(request.data) except ValueError: return bad_request("Invalid JSON") log.debug("JSON is %s", json_in) try: participant = arg_cardinal('participant') except ValueError as ex: return bad_request("Invalid participant: " + str(ex)) log.debug("Participant %d", participant) # Evaluate the task against the limits and reject the request # if it doesn't pass. log.debug("Checking limits on task") processor, whynot = limitprocessor() if processor is None: message = "Limit processor is not initialized: %s" % whynot log.debug(message) return no_can_do(message) # TODO: This is cooked up in two places. Make a function of it. hints = { "ip": request.remote_addr } hints_data = pscheduler.json_dump(hints) passed, diags = processor.process(json_in["test"], hints) if not passed: return forbidden("Task forbidden by limits:\n" + diags) log.debug("Limits passed") # TODO: Pluck UUID from URI uuid = url_last_in_path(request.url) log.debug("Posting task %s", uuid) try: cursor = dbcursor_query( "SELECT * FROM api_task_post(%s, %s, %s, %s)", [request.data, hints_data, participant, uuid]) except Exception as ex: return error(str(ex)) if cursor.rowcount == 0: return error("Task post failed; poster returned nothing.") # TODO: Assert that rowcount is 1 log.debug("All done: %s", base_url()) return ok(base_url()) elif request.method == 'DELETE': parsed = list(urlparse.urlsplit(request.url)) parsed[1] = "%s" template = urlparse.urlunsplit(parsed) try: cursor = dbcursor_query( "SELECT api_task_disable(%s, %s)", [uuid, template]) except Exception as ex: return error(str(ex)) return ok() else: return not_allowed()
def __tasks_get_filtered(uri_base, where_clause='TRUE', args=[], expanded=False, detail=False, single=True): """Get one or more tasks from a table using a WHERE clause.""" # Let this throw; callers are responsible for catching. cursor = dbcursor_query( """ SELECT task.json, task.added, task.start, task.slip, task.duration, task.post, task.runs, task.participants, scheduling_class.anytime, scheduling_class.exclusive, scheduling_class.multi_result, task.participant, task.enabled, task.cli, task.limits_passed, task.participant, task.uuid FROM task JOIN test ON test.id = task.test JOIN scheduling_class ON scheduling_class.id = test.scheduling_class WHERE %s """ % (where_clause), args) tasks_returned = [] for row in cursor: uri = uri_base if single else "%s/%s" % (uri_base, row[16]) if not expanded: tasks_returned.append(uri) continue json = row[0] # The lead participant passes the participant list to the # others within the JSON, but that shouldn't come out when # querying it. try: del json["participants"] except KeyError: pass # Add details if we were asked for them. if detail: part_list = row[7] # The database is not supposed to allow this, but spit out # a sane default as a last resort in case it happens. if part_list is None: part_list = [None] if row[10] == 0 and part_list[0] is None: part_list[0] = server_netloc() json['detail'] = { 'added': None if row[1] is None \ else pscheduler.datetime_as_iso8601(row[1]), 'start': None if row[2] is None \ else pscheduler.datetime_as_iso8601(row[2]), 'slip': None if row[3] is None \ else pscheduler.timedelta_as_iso8601(row[3]), 'duration': None if row[4] is None \ else pscheduler.timedelta_as_iso8601(row[4]), 'post': None if row[5] is None \ else pscheduler.timedelta_as_iso8601(row[5]), 'runs': None if row[6] is None \ else int(row[6]), 'participants': part_list, 'anytime': row[8], 'exclusive': row[9], 'multi-result': row[10], 'enabled': row[12], 'cli': row[13], 'spec-limits-passed': row[14], 'participant': row[15], 'href': uri, 'runs-href': "%s/runs" % (uri), 'first-run-href': "%s/runs/first" % (uri), 'next-run-href': "%s/runs/next" % (uri) } tasks_returned.append(json) return tasks_returned
def tasks_uuid_runs_run(task, run): if not uuid_is_valid(task): return not_found() if ((request.method in ['PUT', 'DELETE'] and not uuid_is_valid(run)) or (run not in ['first', 'next'] and not uuid_is_valid(run))): return not_found() if request.method == 'GET': # Wait for there to be a local result wait_local = arg_boolean('wait-local') # Wait for there to be a merged result wait_merged = arg_boolean('wait-merged') if wait_local and wait_merged: return bad_request("Cannot wait on local and merged results") # Figure out how long to wait in seconds. Zero means don't # wait. wait_time = arg_integer('wait') if wait_time is None: wait_time = 30 if wait_time < 0: return bad_request("Wait time must be >= 0") # If asked for 'first', dig up the first run and use its UUID. if run in ['next', 'first']: future = run == 'next' wait_interval = 0.5 tries = int(wait_time / wait_interval) if wait_time > 0 \ else 1 while tries > 0: try: run = __runs_first_run(task, future) except Exception as ex: log.exception() return error(str(ex)) if run is not None: break if wait_time > 0: time.sleep(1.0) tries -= 1 if run is None: return not_found() # 60 tries at 0.5s intervals == 30 sec. tries = 60 if (wait_local or wait_merged) else 1 while tries: try: cursor = dbcursor_query( """ SELECT lower(run.times), upper(run.times), upper(run.times) - lower(run.times), task.participant, task.nparticipants, task.participants, run.part_data, run.part_data_full, run.result, run.result_full, run.result_merged, run_state.enum, run_state.display, run.errors, run.clock_survey, run.id, archiving_json(run.id), run.added FROM run JOIN task ON task.id = run.task JOIN run_state ON run_state.id = run.state WHERE task.uuid = %s AND run.uuid = %s""", [task, run]) except Exception as ex: log.exception() return error(str(ex)) if cursor.rowcount == 0: cursor.close() return not_found() row = cursor.fetchone() cursor.close() if not (wait_local or wait_merged): break else: if (wait_local and row[7] is None) \ or (wait_merged and row[9] is None): time.sleep(0.5) tries -= 1 else: break # Return a result Whether or not we timed out and let the # client sort it out. result = {} # This strips any query parameters and replaces the last item # with the run, which might be needed if the 'first' option # was used. href_path_parts = urlparse.urlparse(request.url).path.split('/') href_path_parts[-1] = run href_path = '/'.join(href_path_parts) href = urlparse.urljoin(request.url, href_path) result['href'] = href result['start-time'] = pscheduler.datetime_as_iso8601(row[0]) result['end-time'] = pscheduler.datetime_as_iso8601(row[1]) result['duration'] = pscheduler.timedelta_as_iso8601(row[2]) participant_num = row[3] result['participant'] = participant_num result['participants'] = [ server_netloc() if participant is None and participant_num == 0 else participant for participant in row[5] ] result['participant-data'] = row[6] result['participant-data-full'] = row[7] result['result'] = row[8] result['result-full'] = row[9] result['result-merged'] = row[10] result['state'] = row[11] result['state-display'] = row[12] result['errors'] = row[13] if row[14] is not None: result['clock-survey'] = row[14] if row[16] is not None: result['archivings'] = row[16] if row[17] is not None: result['added'] = pscheduler.datetime_as_iso8601(row[17]) result['task-href'] = root_url('tasks/' + task) result['result-href'] = href + '/result' return json_response(result) elif request.method == 'PUT': log.debug("Run PUT %s", request.url) # Get the JSON from the body try: run_data = pscheduler.json_load(request.data, max_schema=1) except ValueError: log.exception() log.debug("Run data was %s", request.data) return bad_request("Invalid or missing run data") # If the run doesn't exist, take the whole thing as if it were # a POST. try: cursor = dbcursor_query( "SELECT EXISTS (SELECT * FROM run WHERE uuid = %s)", [run], onerow=True) except Exception as ex: log.exception() return error(str(ex)) fetched = cursor.fetchone()[0] cursor.close() if not fetched: log.debug("Record does not exist; full PUT.") try: start_time = \ pscheduler.iso8601_as_datetime(run_data['start-time']) except KeyError: return bad_request("Missing start time") except ValueError: return bad_request("Invalid start time") try: passed, diags, response = __evaluate_limits(task, start_time) if response is not None: return response cursor = dbcursor_query( "SELECT * FROM api_run_post(%s, %s, %s)", [task, start_time, run], onerow=True) succeeded, uuid, conflicts, error_message = cursor.fetchone() cursor.close() if conflicts: return conflict(error_message) if not succeeded: return error(error_message) log.debug("Full put of %s, got back %s", run, uuid) except Exception as ex: log.exception() return error(str(ex)) return ok() # For anything else, only one thing can be udated at a time, # and even that is a select subset. log.debug("Record exists; partial PUT.") if 'part-data-full' in run_data: log.debug("Updating part-data-full from %s", run_data) try: part_data_full = \ pscheduler.json_dump(run_data['part-data-full']) except KeyError: return bad_request("Missing part-data-full") except ValueError: return bad_request("Invalid part-data-full") log.debug("Full data is: %s", part_data_full) try: cursor = dbcursor_query( """ UPDATE run SET part_data_full = %s WHERE uuid = %s AND EXISTS (SELECT * FROM task WHERE UUID = %s) """, [part_data_full, run, task]) except Exception as ex: log.exception() return error(str(ex)) rowcount = cursor.rowcount cursor.close() if rowcount != 1: return not_found() log.debug("Full data updated") return ok() elif 'result-full' in run_data: log.debug("Updating result-full from %s", run_data) try: result_full = \ pscheduler.json_dump(run_data['result-full']) except KeyError: return bad_request("Missing result-full") except ValueError: return bad_request("Invalid result-full") try: succeeded = bool(run_data['succeeded']) except KeyError: return bad_request("Missing success value") except ValueError: return bad_request("Invalid success value") log.debug("Updating result-full: JSON %s", result_full) log.debug("Updating result-full: Run %s", run) log.debug("Updating result-full: Task %s", task) try: cursor = dbcursor_query( """ UPDATE run SET result_full = %s, state = CASE %s WHEN TRUE THEN run_state_finished() ELSE run_state_failed() END WHERE uuid = %s AND EXISTS (SELECT * FROM task WHERE UUID = %s) """, [result_full, succeeded, run, task]) except Exception as ex: log.exception() return error(str(ex)) rowcount = cursor.rowcount cursor.close() if rowcount != 1: return not_found() return ok() elif request.method == 'DELETE': # TODO: If this is the lead, the run's counterparts on the # other participating nodes need to be removed as well. try: requester = task_requester(task) if requester is None: return not_found() if not access_write_ok(requester): return forbidden() except Exception as ex: return error(str(ex)) try: cursor = dbcursor_query( """ DELETE FROM run WHERE task in (SELECT id FROM task WHERE uuid = %s) AND uuid = %s """, [task, run]) except Exception as ex: log.exception() return error(str(ex)) rowcount = cursor.rowcount cursor.close() return ok() if rowcount == 1 else not_found() else: return not_allowed()
def run(input): # TODO: Check the spec schema try: assert input["test"]["type"] == "http" source = input['test']['spec']['url'] always_succeed = input['test']['spec'].get('always-succeed', False) keep_content = input['test']['spec'].get('keep-content', None) timeout_iso = input['test']['spec'].get('timeout', 'PT10S') ip_version = input['test']['spec'].get('ip-version', None) timeout = pscheduler.timedelta_as_seconds( pscheduler.iso8601_as_timedelta(timeout_iso)) except KeyError as ex: return ({"succeeded": False, "error": "Missing data in input"}) # Can-run should weed these out, but backstop it with a check just in case. if source[0:5] == "file:" and keep_content is not None: return ({ "succeeded": False, "error": "Cannot keep content from file:// URLs", "diags": None }) succeeded = False error = None diags = [] STDERR = "" # TODO: Implement this with libcurl curl = pycurl.Curl() curl.setopt(pycurl.URL, str(source)) # TODO: This test doesn't have bind but needs one. # curl.setopt(pycurl.INTERFACE, str(bind)) # TODO: Redirects as an option? # curl.setopt(pycurl.FOLLOWLOCATION, allow_redirects) if timeout is not None: curl.setopt(pycurl.TIMEOUT_MS, int(timeout * 1000.0)) curl.setopt(pycurl.SSL_VERIFYHOST, False) curl.setopt(pycurl.SSL_VERIFYPEER, False) if ip_version is not None: curl.setopt( pycurl.IPRESOLVE, pycurl.IPRESOLVE_V4 if ip_version == 4 else pycurl.IPRESOLVE_V6) buf = io.BytesIO() curl.setopt(pycurl.WRITEFUNCTION, buf.write) text = "" try: start_time = datetime.datetime.now() curl.perform() status = curl.getinfo(pycurl.HTTP_CODE) # PycURL returns a zero for non-HTTP URLs if status == 0: status = 200 text = buf.getvalue().decode() except pycurl.error as ex: code, message = ex.args status = 400 text = message finally: end_time = datetime.datetime.now() curl.close() buf.close() # 200-299 is success; anything else is an error. fetch_succeeded = (status >= 200 and status < 300) succeeded = always_succeed or fetch_succeeded if succeeded: schema = pscheduler.HighInteger(1) run_result = { "succeeded": True, "time": pscheduler.timedelta_as_iso8601(end_time - start_time) } if always_succeed: run_result["status"] = status schema.set(2) try: run_result["found"] = text.find( input['test']['spec']["parse"]) >= 0 except KeyError: pass # If the fetch failed or we've been told to keep 0 content, plaster it all in. if (not fetch_succeeded) or (keep_content is not None and keep_content == 0): run_result["content"] = text schema.set(2) elif keep_content is not None: run_result["content"] = text[:keep_content] schema.set(2) run_result["schema"] = schema.value() return { "succeeded": True, "diags": None, "error": None, "result": run_result } else: return { "succeeded": False, "diags": "Fetch returned non-success status %d" % (status), "error": text } assert False, "Should not be reached."
def tasks_uuid_runs_run(task, run): if task is None: return bad_request("Missing or invalid task") if run is None: return bad_request("Missing or invalid run") if request.method == 'GET': # Wait for there to be a local result wait_local = arg_boolean('wait-local') # Wait for there to be a merged result wait_merged = arg_boolean('wait-merged') if wait_local and wait_merged: return error("Cannot wait on local and merged results") # If asked for 'first', dig up the first run and use its UUID. if run == 'first': # 60 tries at 0.5s intervals == 30 sec. tries = 60 while tries > 0: try: run = __runs_first_run(task) except Exception as ex: log.exception() return error(str(ex)) if run is not None: break time.sleep(1.0) tries -= 1 if run is None: return not_found() # 60 tries at 0.5s intervals == 30 sec. tries = 60 if (wait_local or wait_merged) else 1 while tries: try: cursor = dbcursor_query( """ SELECT lower(run.times), upper(run.times), upper(run.times) - lower(run.times), task.participant, task.nparticipants, task.participants, run.part_data, run.part_data_full, run.result, run.result_full, run.result_merged, run_state.enum, run_state.display, run.errors, run.clock_survey FROM run JOIN task ON task.id = run.task JOIN run_state ON run_state.id = run.state WHERE task.uuid = %s AND run.uuid = %s""", [task, run]) except Exception as ex: log.exception() return error(str(ex)) if cursor.rowcount == 0: return not_found() row = cursor.fetchone() if not (wait_local or wait_merged): break else: if (wait_local and row[7] is None) \ or (wait_merged and row[9] is None): time.sleep(0.5) tries -= 1 else: break # Return a result Whether or not we timed out and let the # client sort it out. result = {} # This strips any query parameters and replaces the last item # with the run, which might be needed if the 'first' option # was used. href_path_parts = urlparse.urlparse(request.url).path.split('/') href_path_parts[-1] = run href_path = '/'.join(href_path_parts) href = urlparse.urljoin( request.url, href_path ) result['href'] = href result['start-time'] = pscheduler.datetime_as_iso8601(row[0]) result['end-time'] = pscheduler.datetime_as_iso8601(row[1]) result['duration'] = pscheduler.timedelta_as_iso8601(row[2]) participant_num = row[3] result['participant'] = participant_num result['participants'] = [ pscheduler.api_this_host() if participant is None and participant_num == 0 else participant for participant in row[5] ] result['participant-data'] = row[6] result['participant-data-full'] = row[7] result['result'] = row[8] result['result-full'] = row[9] result['result-merged'] = row[10] result['state'] = row[11] result['state-display'] = row[12] result['errors'] = row[13] if row[14] is not None: result['clock-survey'] = row[14] result['task-href'] = root_url('tasks/' + task) result['result-href'] = href + '/result' return json_response(result) elif request.method == 'PUT': log.debug("Run PUT %s", request.url) # Get the JSON from the body try: run_data = pscheduler.json_load(request.data) except ValueError: log.exception() log.debug("Run data was %s", request.data) return error("Invalid or missing run data") # If the run doesn't exist, take the whole thing as if it were # a POST. try: cursor = dbcursor_query( "SELECT EXISTS (SELECT * FROM run WHERE uuid = %s)", [run], onerow=True) except Exception as ex: log.exception() return error(str(ex)) if not cursor.fetchone()[0]: log.debug("Record does not exist; full PUT.") try: start_time = \ pscheduler.iso8601_as_datetime(run_data['start-time']) except KeyError: return bad_request("Missing start time") except ValueError: return bad_request("Invalid start time") passed, diags = __evaluate_limits(task, start_time) try: cursor = dbcursor_query("SELECT api_run_post(%s, %s, %s)", [task, start_time, run], onerow=True) log.debug("Full put of %s, got back %s", run, cursor.fetchone()[0]) except Exception as ex: log.exception() return error(str(ex)) return ok() # For anything else, only one thing can be udated at a time, # and even that is a select subset. log.debug("Record exists; partial PUT.") if 'part-data-full' in run_data: log.debug("Updating part-data-full from %s", run_data) try: part_data_full = \ pscheduler.json_dump(run_data['part-data-full']) except KeyError: return bad_request("Missing part-data-full") except ValueError: return bad_request("Invalid part-data-full") log.debug("Full data is: %s", part_data_full) try: cursor = dbcursor_query(""" UPDATE run SET part_data_full = %s WHERE uuid = %s AND EXISTS (SELECT * FROM task WHERE UUID = %s) """, [ part_data_full, run, task]) except Exception as ex: log.exception() return error(str(ex)) if cursor.rowcount != 1: return not_found() log.debug("Full data updated") return ok() elif 'result-full' in run_data: log.debug("Updating result-full from %s", run_data) try: result_full = \ pscheduler.json_dump(run_data['result-full']) except KeyError: return bad_request("Missing result-full") except ValueError: return bad_request("Invalid result-full") try: succeeded = bool(run_data['succeeded']) except KeyError: return bad_request("Missing success value") except ValueError: return bad_request("Invalid success value") log.debug("Updating result-full: JSON %s", result_full) log.debug("Updating result-full: Run %s", run) log.debug("Updating result-full: Task %s", task) try: cursor = dbcursor_query(""" UPDATE run SET result_full = %s, state = CASE %s WHEN TRUE THEN run_state_finished() ELSE run_state_failed() END WHERE uuid = %s AND EXISTS (SELECT * FROM task WHERE UUID = %s) """, [ result_full, succeeded, run, task ]) except Exception as ex: log.exception() return error(str(ex)) if cursor.rowcount != 1: return not_found() return ok() elif request.method == 'DELETE': # TODO: If this is the lead, the run's counterparts on the # other participating nodes need to be removed as well. try: cursor = dbcursor_query(""" DELETE FROM run WHERE task in (SELECT id FROM task WHERE uuid = %s) AND uuid = %s """, [task, run]) except Exception as ex: log.exception() return error(str(ex)) return ok() if cursor.rowcount == 1 else not_found() else: return not_allowed()