예제 #1
0
def next_action():
    inp, out = options()

    with open(inp, 'r') as istream:
        payload = json.loads(istream.read())

    print "Query with pid=%s is completed, fetch results" % payload['workflow']['pid']
예제 #2
0
def poll_request():
    inp, out = options()

    with open(inp, 'r') as istream:
        payload = json.loads(istream.read())

    # Emulate polling for the PID by waiting a random amount of time since
    # making the request
    # You would obviously poll DAS with the PID in question to see if it had
    # finished, instead
    host = payload['workflow']['das_server']
    query = payload['workflow']['query']
    print "poll_request, host=%s, query=%s" % (host, query)
    pid = payload['workflow']['pid']
    timestamp = payload['workflow']['timestamp']

    now = int(time.time())
    if ( now - timestamp > random.randint(10, 30) ):
        print "poll_request: pid=",payload['workflow']['pid']," Complete!"
    else: #Push the event for this script back on the stack, so it loops...
        print "poll_request: pid=",payload['workflow']['pid']," Still waiting..."
        payload['workflow']['Events'].insert(0,'das_poll_request')

    with open(out, 'w') as ostream:
        ostream.write(json.dumps(payload))
예제 #3
0
def make_request():
    "Make request action"
    inp, out = options()

    with open(inp, "r") as istream:
        payload = json.loads(istream.read())

    host = payload["workflow"]["das_server"]
    query = payload["workflow"]["query"]
    # emulate fake request which returns uid and we assign it
    payload["workflow"]["pid"] = generate_uid(32, query)
    payload["workflow"]["timestamp"] = int(time.time())

    print "make_request: pid=%s" % payload["workflow"]["pid"]

    with open(out, "w") as ostream:
        ostream.write(json.dumps(payload))
예제 #4
0
def spawn_workflow():
    inp, out = options()

    with open(inp, 'r') as istream:
        payload = json.loads(istream.read())

    # This just does a repeated clone of the worklow and modifies a parameter
    # (which isn't used, it's just for show). You could adapt this to fetch a
    # bunch of dataset names, for example, and create a new payload for each one
    host = payload['workflow'].get('das_server', 'https://cmsweb.cern.ch')
    payloads = []
    for idx in range(2):
        pld = copy.deepcopy(payload)
        query = 'das-query-%s' % idx
        pld['workflow']['query'] = query
        pld['workflow']['das_server'] = host
        print "spawn-workflows: query=%s" % query
        payloads.append(pld)

    with open(out, 'w') as ostream:
        ostream.write(json.dumps(payloads))
예제 #5
0
                                                gt_bboxes,
                                                gt_labels,
                                                iou_thresh=iou_thresh)
            bbox_align_stats = eval_detection_voc(refn_bboxes,
                                                  pred_labels,
                                                  pred_scores,
                                                  gt_bboxes,
                                                  gt_labels,
                                                  iou_thresh=iou_thresh)
            straddling_stats = eval_detection_voc(mtsm_bboxes,
                                                  pred_labels,
                                                  pred_scores,
                                                  gt_bboxes,
                                                  gt_labels,
                                                  iou_thresh=iou_thresh)

            sstr = self.pprint(detector_stats, bbox_align_stats,
                               straddling_stats, iou_thresh)
            with open(
                    join([logsdir, 'metrics_{:.2f}.table'.format(iou_thresh)]),
                    'w') as f:
                f.write(sstr)


if __name__ == '__main__':
    opts = options().parse(train_mode=False)
    evaluater = Evaulate(opts)

    logsdir = opts['logs_root']
    evaluater.evaulate_single_model(logsdir)