コード例 #1
0
    def _list():
        resource = find_resource(path)
        children = Resource.query.filter(Resource.parent_id == resource.id,
                                         Resource.deleted == False)
        items = []

        for child in children:
            metadata = None

            #
            # For datasets, populate metadata.
            #
            if type == 'datasets':
                ds_path = path + "/" + child.name
                file = find_resource(ds_path + "/metadata")
                if file is not None:
                    metadata = read_resource(file)
                    if metadata is not None:
                        metadata = json.loads(metadata)
                        metadata['recording_location'] = ds_path
            elif type == 'sequences':
                if child.name == 'metadata':
                    ds_path = path + "/" + child.name
                    file = find_resource(ds_path)
                    if file is not None:
                        metadata = read_resource(file)
                        if metadata is not None:
                            metadata = json.loads(metadata)
            elif type == 'programs':
                ds_path = path + "/" + child.name
                file = find_resource(ds_path + "/metadata")
                if file is not None:
                    metadata = read_resource(file)
                    if metadata is not None:
                        metadata = json.loads(metadata)

            items.append({'name': child.name, 'metadata': metadata})

        return json.dumps({
            'success': True,
            'message': 'Listed %s' % (path),
            'items': items
        })
コード例 #2
0
ファイル: ext.py プロジェクト: concord-consortium/flow-server
    def _load():
        resource    = find_resource(path)
        data        = read_resource(resource)
        if data is not None:
            data = data.decode('utf-8')

        return json.dumps({
                    'success': True,
                    'message': 'Loaded file %s.' % (resource.name),
                    'content': data
                },ensure_ascii=False)
コード例 #3
0
def get_flow_userinfo(username):

    path = '%s/%s/%s/userinfo' % ('testing', 'student-folders', username)
    resource = find_resource(path)

    if resource is None:
        return {}

    data = read_resource(resource)
    userinfo = json.loads(data)

    return userinfo
コード例 #4
0
ファイル: views.py プロジェクト: sgrimm/rhizo-server
def file_viewer(resource, check_timing=False, is_home_page=False):
    contents = read_resource(
        resource, check_timing=check_timing
    )  # returns binary data; must decode if expecting a string
    if contents is None:
        print('file_viewer: storage not found (resource: %d, path: %s)' %
              (resource.id, resource.path()))
        abort(404)
    if resource.name.endswith('.md'):  # fix(soon): revisit this
        if 'edit' in request.args:
            return render_template(
                'resources/text-editor.html',
                resource=resource,
                contents=contents.decode(),
                show_view_button=True,
            )
        else:
            file_html = process_doc_page(contents.decode())
            allow_edit = access_level(
                resource.query_permissions()) >= ACCESS_LEVEL_WRITE
            title = current_app.config[
                'SYSTEM_NAME'] if is_home_page else resource.name  # fix(later): allow specify title for doc page?
            return render_template(
                'resources/doc-viewer.html',
                resource=resource,
                allow_edit=allow_edit,
                file_html=file_html,
                hide_loc_nav=is_home_page,
                title=title,
            )
    else:
        file_ext = resource.name.rsplit('.', 1)[-1]
        edit = request.args.get('edit', False)
        if file_ext == 'csv' and edit is False:
            reader = csv.reader(StringIO(contents.decode()))
            data = list(reader)
            return render_template('resources/table-editor.html',
                                   resource=resource,
                                   data_json=json.dumps(data))
        elif file_ext == 'txt' or file_ext == 'csv':
            return render_template('resources/text-editor.html',
                                   resource=resource,
                                   contents=contents.decode())
        return Response(response=contents,
                        status=200,
                        mimetype=mime_type_from_ext(resource.name))
コード例 #5
0
def thumbnail_viewer(resource):
    width = int(request.args.get('width', 100))
    # fix(later): switch back to .one() query after fix issues with duplicates
    thumbnails = Thumbnail.query.filter(Thumbnail.resource_id == resource.id, Thumbnail.width == width)
    if thumbnails.count():
        thumbnail = thumbnails[0]
        thumbnail_contents = thumbnail.data
    else:
        contents = read_resource(resource)
        # fix(later): if this returns something other than requested width, we'll keep missing the cache
        (thumbnail_contents, thumbnail_width, thumbnail_height) = compute_thumbnail(contents, width)
        thumbnail = Thumbnail()
        thumbnail.resource_id = resource.id
        thumbnail.width = thumbnail_width
        thumbnail.height = thumbnail_height
        thumbnail.format = 'jpg'
        thumbnail.data = thumbnail_contents
        db.session.add(thumbnail)
        db.session.commit()
    return Response(response=thumbnail_contents, status=200, mimetype='image/jpeg')
コード例 #6
0
def add_to_zip(zip, resource, path_prefix, uncompressed_size):
    name = (path_prefix + '/' + resource.name) if path_prefix else resource.name

    # add file contents
    if resource.type == Resource.FILE:

        # read data
        data = read_resource(resource)
        if not data:
            abort(404)
        uncompressed_size[0] += len(data)
        if uncompressed_size[0] >= 500 * 1024 * 1024:
            abort(400, 'Batch download only supported if total file size is less than 500MB.')  # fix(later): friendlier error handling

        # add to zip file
        zip.writestr(name, data)

    # add folder contents
    elif resource.type == Resource.BASIC_FOLDER:
        resources = Resource.query.filter(Resource.parent_id == resource.id, Resource.deleted == False)
        for r in resources:
            add_to_zip(zip, r, name, uncompressed_size)  # fix(soon): should we check permissions on each resource?
コード例 #7
0
    def get(self, resource_path):
        args = request.values
        result = {}

        # handle case of controller requesting about self
        if resource_path == 'self':
            if 'authCode' in request.values:
                auth_code = request.values.get('authCode', '')  # fix(soon): remove auth codes
                key = find_key_by_code(auth_code)
            elif request.authorization:
                key = find_key(request.authorization.password)
            else:
                key = None
            if key and key.access_as_controller_id:
                try:
                    r = Resource.query.filter(Resource.id == key.access_as_controller_id).one()
                except NoResultFound:
                    abort(404)
            else:
                abort(403)

        # look up the resource record
        else:
            r = find_resource('/' + resource_path)
            if not r:
                abort(404)  # fix(later): revisit to avoid leaking file existance
            if access_level(r.query_permissions()) < ACCESS_LEVEL_READ:
                abort(403)

        # if request meta-data
        if request.values.get('meta', False):
            result = r.as_dict(extended = True)
            if request.values.get('include_path', False):
                result['path'] = r.path()

        # if request data
        else:

            # if folder, return contents list or zip of collection of files
            if r.type >= 10 and r.type < 20:

                # multi-file download
                if 'ids' in args and args.get('download', False):
                    ids = args['ids'].split(',')
                    return batch_download(r, ids)

                # contents list
                else:
                    recursive = request.values.get('recursive', False)
                    type_name = request.values.get('type', None)
                    if type_name:
                        type = resource_type_number(type_name)
                    else:
                        type = None
                    filter = request.values.get('filter', None)
                    extended = request.values.get('extended', False)
                    result = resource_list(r.id, recursive, type, filter, extended)

            # if sequence, return value(s)
            # fix(later): merge with file case?
            elif r.type == Resource.SEQUENCE:

                # get parameters
                text = request.values.get('text', '')
                download = request.values.get('download', False)
                count = int(request.values.get('count', 1))
                start_timestamp = request.values.get('start_timestamp', '')
                end_timestamp = request.values.get('end_timestamp', '')
                if start_timestamp:
                    try:
                        start_timestamp = parse_json_datetime(start_timestamp)
                    except:
                        abort(400, 'Invalid date/time.')
                if end_timestamp:
                    try:
                        end_timestamp = parse_json_datetime(end_timestamp)
                    except:
                        abort(400, 'Invalid date/time.')

                # if filters specified, assume we want a sequence of values
                if text or start_timestamp or end_timestamp or count > 1:

                    # get summary of values
                    if int(request.values.get('summary', False)):
                        return sequence_value_summary(r.id)

                    # get preliminary set of values
                    resource_revisions = ResourceRevision.query.filter(ResourceRevision.resource_id == r.id)

                    # apply filters (if any)
                    if text:
                        resource_revisions = resource_revisions.filter(text in ResourceRevision.data)
                    if start_timestamp:
                        resource_revisions = resource_revisions.filter(ResourceRevision.timestamp >= start_timestamp)
                    if end_timestamp:
                        resource_revisions = resource_revisions.filter(ResourceRevision.timestamp <= end_timestamp)
                    resource_revisions = resource_revisions.order_by('timestamp')
                    if resource_revisions.count() > count:
                        resource_revisions = resource_revisions[-count:]  # fix(later): is there a better/faster way to do this?

                    # return data
                    if download:
                        #timezone = r.root().system_attributes['timezone']  # fix(soon): use this instead of UTC
                        lines = ['utc_timestamp,value\n']
                        for rr in resource_revisions:
                            lines.append('%s,%s\n' % (rr.timestamp.strftime('%Y-%m-%d %H:%M:%S.%f'), rr.data))
                        result = make_response(''.join(lines))
                        result.headers['Content-Type'] = 'application/octet-stream'
                        result.headers['Content-Disposition'] = 'attachment; filename=' + r.name + '.csv'
                        return result
                    else:
                        epoch = datetime.datetime.utcfromtimestamp(0)  # fix(clean): merge with similar code for sequence viewer
                        timestamps = [(rr.timestamp.replace(tzinfo = None) - epoch).total_seconds() for rr in resource_revisions]  # fix(clean): use some sort of unzip function
                        values = [rr.data for rr in resource_revisions]
                        units = json.loads(r.system_attributes).get('units', None)
                        return {'name': r.name, 'units': units, 'timestamps': timestamps, 'values': values}

                # if no filter assume just want current value
                # fix(later): should instead provide all values and have a separate way to get more recent value?
                else:
                    rev = request.values.get('rev')
                    if rev:
                        rev = int(rev)  # fix(soon): save int conversion
                    result = make_response(read_resource(r, revision_id = rev))
                    data_type = json.loads(r.system_attributes)['data_type']
                    if data_type == Resource.IMAGE_SEQUENCE:
                        result.headers['Content-Type'] = 'image/jpeg'
                    else:
                        result.headers['Content-Type'] = 'text/plain'

            # if file, return file data/contents
            else:
                data = read_resource(r)
                if not data:
                    abort(404)
                name = r.name
                if request.values.get('convert_to', request.values.get('convertTo', '')) == 'xls' and r.name.endswith('csv'):
                    data = convert_csv_to_xls(data)
                    name = name.replace('csv', 'xls')
                result = make_response(data)
                result.headers['Content-Type'] = 'application/octet-stream'
                if request.values.get('download', False):
                    result.headers['Content-Disposition'] = 'attachment; filename=' + name
        return result