Пример #1
0
 def __init__(self, pid, request=None):
     try:
         self.parsed = parse_pid(pid)
     except StopIteration:
         abort(404)
     self.time_series = self.parsed['ts_label']
     self.bin_lid = self.parsed['bin_lid']
     self.lid = self.parsed[LID]
     self.schema_version = self.parsed[SCHEMA_VERSION]
     self.adc_cols = re.split(' ', self.parsed['adc_cols'])
     if request is not None:
         self.url_root = get_url_root()
     self.canonical_pid = canonicalize(self.url_root, self.time_series, self.lid)
     self.extension = 'json' # default
     if 'extension' in self.parsed:
         self.extension = self.parsed['extension']
     self.timestamp = get_timestamp(self.parsed)
     self.product = self.parsed[PRODUCT]
Пример #2
0
def serve_after_before(ts_label,after_before,n=1,pid=None):
    if not after_before in ['before','after']:
        abort(400)
    try:
        parsed = next(ifcb().pid(pid))
    except StopIteration:
        abort(404)
    bin_lid = parsed['bin_lid']
    with Feed(session, ts_label) as feed:
        if after_before=='before':
            bins = list(feed.before(bin_lid, n))
        else:
            bins = list(feed.after(bin_lid, n))
    resp = []
    for bin in bins:
        sample_time_str = iso8601(bin.sample_time.timetuple())
        pid = canonicalize(get_url_root(), ts_label, bin.lid)
        resp.append(dict(pid=pid, date=sample_time_str))
    return Response(json.dumps(resp), mimetype=MIME_JSON)
Пример #3
0
def acc_wakeup(wakeup_key):
    """- wake up and expire the session
    - acquire a mutex on the acquisition key
    - query for the instrument
    - run the copy job
    - schedule the accession job
    - wakeup accession workers"""
    # figure out if this wakeup matters to us
    if not is_acc_key(wakeup_key):
        return
    time_series = get_time_series(wakeup_key)
    # attempt to acquire mutex. if fails, that's fine,
    # that means batch accession is already underway
    try:
        then = time.time()
        count = 0
        with Mutex(wakeup_key,ttl=45) as mutex:
            session.expire_all() # don't be stale!
            accession = Accession(session, time_series)
            logging.warn('START BATCH %s' % time_series)
            for fs in accession.list_filesets(): # FIXME debug, do all
                lid = fs[LID]
                if accession.bin_exists(lid):
                    continue # don't schedule accession if bin exists
                pid = canonicalize(URL_PREFIX, time_series, fs[LID])
                count += 1
                if count % 100 == 0:
                    logging.warn('batch %s: scheduled %d bins' % (time_series, count))
                schedule_accession(client,pid)
                elapsed = time.time() - then
                if elapsed > 25: # don't send heartbeats too often
                    mutex.heartbeat() # retain mutex
                    then = time.time()
                client.wakeup() # wakeup workers
            logging.warn('END BATCH %s: %d bins scheduled' % (time_series,count))
            client.wakeup()
    except Busy:
        logging.warn('BATCH not waking up')
        pass
Пример #4
0
def serve_mosaic_image(time_series=None, pid=None, params='/'):
    """Generate a mosaic of ROIs from a sample bin.
    params include the following, with default values
    - series (mvco) - time series (FIXME: handle with resolver, clarify difference between namespace, time series, pid, lid)
    - size (1024x1024) - size of the mosaic image
    - page (1) - page. for typical image sizes the entire bin does not fit and so is split into pages.
    - scale - scaling factor for image dimensions """
    # parse params
    size, scale, page = parse_mosaic_params(params)
    (w,h) = size
    parsed = parse_pid(pid)
    schema_version = parsed['schema_version']
    try:
        paths = get_fileset(parsed)
    except NotFound:
        abort(404)
    adc_path = paths['adc_path']
    roi_path = paths['roi_path']
    bin_pid = canonicalize(get_url_root(), time_series, parsed['bin_lid'])
    # perform layout operation
    scaled_size = (int(w/scale), int(h/scale))
    layout = list(get_mosaic_layout(adc_path, schema_version, bin_pid, scaled_size, page))
    extension = parsed['extension']
    # serve JSON on request
    if extension == 'json':
        return Response(json.dumps(list(layout2json(layout, scale))), mimetype=MIME_JSON)
    mimetype = mimetypes.types_map['.' + extension]
    # read all images needed for compositing and inject into Tiles
    image_layout = []
    with open(roi_path,'rb') as roi_file:
        for tile in layout:
            target = tile.image # in mosaic API, the record is called 'image'
            # FIXME 1. replace PIL
            image = get_target_image(parsed, target, file=roi_file, raw_stitch=True)
            image_layout.append(Tile(PIL.Image.fromarray(image), tile.size, tile.position))
    # produce and serve composite image
    mosaic_image = thumbnail(mosaic.composite(image_layout, scaled_size, mode='L', bgcolor=160), (w,h))
    #pil_format = filename2format('foo.%s' % extension)
    return Response(as_bytes(mosaic_image), mimetype=mimetype)
Пример #5
0
def serve_pid(pid):
    req = DashboardRequest(pid, request)
    try:
        paths = get_fileset(req.parsed)
    except NotFound:
        abort(404)
    hdr_path = paths['hdr_path']
    adc_path = paths['adc_path']
    roi_path = paths['roi_path']
    adc = Adc(adc_path, req.schema_version)
    heft = 'full' # heft is either short, medium, or full
    if 'extension' in req.parsed:
        extension = req.parsed['extension']
    if 'target' in req.parsed:
        canonical_bin_pid = canonicalize(req.url_root, req.time_series, req.bin_lid)
        target_no = int(req.parsed['target'])
        # pull three targets, then find any stitched pair
        targets = adc.get_some_targets(target_no-1, 3)
        targets = list_stitched_targets(targets)
        for t in targets:
            if t[TARGET_NUMBER] == target_no:
                target = t
        add_pid(target, canonical_bin_pid)
        # check for image
        mimetype = mimetypes.types_map['.' + extension]
        if mimetype.startswith('image/'):
            if req.product=='raw':
                img = get_target_image(req.parsed, target, roi_path)
                return Response(as_bytes(img,mimetype),mimetype=mimetype)
            if req.product=='blob':
                return serve_blob_image(req.parsed, mimetype)
            if req.product=='blob_outline':
                img = get_target_image(req.parsed, target, roi_path)
                return serve_blob_image(req.parsed, mimetype, outline=True, target_img=img)
        # not an image, so get more metadata
        targets = get_targets(adc, canonical_bin_pid)
        # not an image, check for JSON
        if extension == 'json':
            return Response(json.dumps(target),mimetype=MIME_JSON)
        target = get_target_metadata(target,targets)
        # more metadata representations. we'll need the header
        hdr = parse_hdr_file(hdr_path)
        if extension == 'xml':
            return Response(target2xml(req.canonical_pid, target, req.timestamp, canonical_bin_pid), mimetype='text/xml')
        if extension == 'rdf':
            return Response(target2rdf(req.canonical_pid, target, req.timestamp, canonical_bin_pid), mimetype='text/xml')
        if extension in ['html', 'htm']:
            template = {
                'static': STATIC,
                'target_pid': req.canonical_pid,
                'bin_pid': canonical_bin_pid,
                'properties': target,
                'target': target.items(), # FIXME use order_keys
                'date': req.timestamp
            }
            return template_response('target.html',**template)
    else: # bin
        if req.extension in ['hdr', 'adc', 'roi']:
            path = dict(hdr=hdr_path, adc=adc_path, roi=roi_path)[req.extension]
            mimetype = dict(hdr='text/plain', adc='text/csv', roi='application/octet-stream')[req.extension]
            return Response(file(path,'rb'), direct_passthrough=True, mimetype=mimetype)
        try:
            if req.product in ['blobs','blob']: # accept old url pattern
                return serve_blob_bin(req.parsed)
            if req.product=='features':
                return serve_features_bin(req.parsed)
            if req.product=='class_scores':
                return serve_class_scores_bin(req.parsed)
        except NotFound:
            abort(404)
        # gonna need targets unless heft is medium or below
        targets = []
        if req.product != 'short':
            targets = get_targets(adc, req.canonical_pid)
        # end of views
        # not a special view, handle representations of targets
        if req.extension=='csv':
            adc_cols = req.parsed[ADC_COLS].split(' ')
            lines = targets2csv(targets,adc_cols)
            return Response('\n'.join(lines)+'\n',mimetype='text/csv')
        # we'll need the header for the other representations
        hdr = parse_hdr_file(hdr_path)
        if req.extension in ['html', 'htm']:
            targets = list(targets)
            context, props = split_hdr(hdr)
            template = {
                'static': STATIC,
                'bin_pid': req.canonical_pid,
                'time_series': req.time_series,
                'context': context,
                'properties': props,
                'targets': targets,
                'target_pids': [t['pid'] for t in targets],
                'date': req.timestamp,
                'files': get_files(req.parsed,check=True) # note: ORM call!
            }
            print get_files(req.parsed)
            return template_response('bin.html', **template)
        if req.extension=='json':
            if req.product=='short':
                return Response(bin2json_short(req.canonical_pid,hdr,req.timestamp),mimetype=MIME_JSON)
            if req.product=='medium':
                return Response(bin2json_medium(req.canonical_pid,hdr,targets,req.timestamp),mimetype=MIME_JSON)
            return Response(bin2json(req.canonical_pid,hdr,targets,req.timestamp),mimetype=MIME_JSON)
        if req.extension=='xml':
            return Response(bin2xml(req.canonical_pid,hdr,targets,req.timestamp),mimetype='text/xml')
        if req.extension=='rdf':
            return Response(bin2rdf(req.canonical_pid,hdr,targets,req.timestamp),mimetype='text/xml')
        if req.extension=='zip':
            # look to see if the zipfile is resolvable
            try:
                zip_path = get_product_file(req.parsed, 'binzip')
                if os.path.exists(zip_path):
                    return Response(file(zip_path), direct_passthrough=True, mimetype='application/zip')
            except NotFound:
                pass
            except:
                raise
            buffer = BytesIO()
            bin2zip(req.parsed,req.canonical_pid,targets,hdr,req.timestamp,roi_path,buffer)
            return Response(buffer.getvalue(), mimetype='application/zip')
    return 'unimplemented'
Пример #6
0
def canonicalize_bin(ts_label, b):
    return {
        'pid': canonicalize(get_url_root(), ts_label, b.lid),
        'date': iso8601(b.sample_time.timetuple())
    }