コード例 #1
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def download_path(request, filename):
    parts = posixpath.normpath(filename).split("/")
    if len(parts) < 3:
        return HttpResponseBadRequest()
    
    requested_file = parts[-1]

    try: version_num = str(int(parts[-2]))
    except ValueError: version_num = None
    
    if version_num is None:
        base_path = "/".join(parts[:-2])
        type_id = parts[-2]
        versions = get_versions('/' + base_path)
        if versions is None:
            return HttpResponseNotFound()
        version_num = str(max(map(int, versions)))
    else:
        base_path = "/".join(parts[:-3])
        type_id = parts[-3]

    try: 
        file_metadata = get_file_metadata("/%s/%s" % (base_path, version_num))
    except NotFoundError:
        return HttpResponseNotFound()

    if requested_file == posixpath.basename(base_path):
        is_mesh = True
        hash = file_metadata['types'][type_id]['hash']
    else:
        is_mesh = False
        subfile_map = {}
        for subfile in file_metadata['types'][type_id]['subfiles']:
            (subfile_base, vers) = posixpath.split(subfile)
            subfile_basename = posixpath.basename(subfile_base)
            subfile_map[subfile_basename] = subfile

        if requested_file not in subfile_map:
            return HttpResponseNotFound()

        subfile_metadata = get_file_metadata(subfile_map[requested_file])
        hash = subfile_metadata['hash']

    try:
        rec = get_hash(hash)
    except NotFoundError:
        return HttpResponseNotFound()

    data = rec['data']
    mime = rec['mimetype']

    response = HttpResponse(data, mimetype=mime)
    response['Content-Length'] = str(len(data))
    response['Access-Control-Allow-Origin'] = '*'
    return response
コード例 #2
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def clone_file(request, filename):
    
    try: file_metadata = get_file_metadata("/%s" % filename)
    except NotFoundError: return HttpResponseNotFound()
    
    split = filename.split("/")
    file_username = split[0]
    basepath = "/" + "/".join(split[:-1])
    postpath = "/".join(split[1:-1])
    version = split[-1:][0]
    
    if request.method == 'POST':
        form = CloneFile(request.POST)
        if form.is_valid():
            title = form.cleaned_data['title']
            description = form.cleaned_data['description']
            path = "/%s/%s" % (request.session['username'], form.cleaned_data['path'])
            updated_info = {'title': title, 'description': description}
            try:
                new_filename = copy_file(basepath, version, path, updated_info)
            except:
                return HttpResponseServerError("There was an error cloning your file.")
            return redirect('content.views.view', new_filename[1:])
    else:
        form = CloneFile(initial={'path':postpath, 'title':file_metadata['title'], 'description':file_metadata['description']})
    
    view_params = {}
    view_params['clone_path'] = filename
    view_params['form'] = form
    return render_to_response('content/clone.html', view_params, context_instance = RequestContext(request))
コード例 #3
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def update_labels(request, filename):
    try: file_metadata = get_file_metadata(filename)
    except NotFoundError: return HttpResponseNotFound()

    split = filename.split("/")
    file_username = split[0]
    basepath = "/".join(split[:-1])
    version = split[-1:][0]

    if request.method != 'POST':
        return HttpResponseBadRequest()

    form = UpdateLabels(request.POST)
    if not form.is_valid():
        return HttpResponseBadRequest()
    
    labels = form.cleaned_data['labels'].split(',')
    labels = [label.strip() for label in labels if len(label.strip()) > 0]

    updated_info = {
        'labels': labels,
    }
    add_base_metadata(basepath, version, updated_info)

    json_result = {'state': 'SUCCESS',
                   'updated_labels': ', '.join(labels)}
    return HttpResponse(simplejson.dumps(json_result, default=json_handler), mimetype='application/json')
コード例 #4
0
ファイル: rerun.py プロジェクト: hnkien/sirikata-cdn
def do_single(task, path, modeltype=None):
    metadata = get_file_metadata(path)
    if modeltype is None:
        for t in metadata['types']:
            do_task(task, path, t, metadata=metadata)
    else:
        if modeltype not in metadata['types']:
            print >> sys.stderr, 'Invalid type', modeltype, 'for path', path
            return
        do_task(task, path, modeltype, metadata=metadata)
コード例 #5
0
def generate_screenshot(filename, typeid):
    metadata = get_file_metadata(filename)
    hash = metadata['types'][typeid]['hash']
    subfiles = metadata['types'][typeid]['subfiles']
    
    dae_data = get_hash(hash)['data']

    subfile_map = {}
    for subfile in subfiles:
        img_meta = get_file_metadata(subfile)
        img_hash = img_meta['hash']
        img_data = get_hash(img_hash)['data']
        base_name = os.path.basename(os.path.split(subfile)[0])
        subfile_map[base_name] = img_data
    
    #The below is a total hack and I feel really dirty doing it, but
    # there is no way to get panda3d to clean up after itself except to
    # exit the process. Celery workers are run as a daemon, so they can't
    # create child processes. Doing so could cause orphaned, defunct processes.
    # I'm doing it anyway because I haven't found any other way to do this. Sorry.
    q = multiprocessing.Queue()
    daemonic = multiprocessing.current_process()._daemonic
    multiprocessing.current_process()._daemonic = False
    p = multiprocessing.Process(target=_get_screenshot, args=[q, dae_data, subfile_map])
    p.start()
    main_screenshot = q.get()
    p.join()
    multiprocessing.current_process()._daemonic = daemonic
    
    im = Image.open(StringIO(main_screenshot))
    thumbnail = StringIO()
    im.thumbnail((96,96), Image.ANTIALIAS)
    im.save(thumbnail, "PNG", optimize=1)
    thumbnail = thumbnail.getvalue()
    
    main_key = hashlib.sha256(main_screenshot).hexdigest()
    thumb_key = hashlib.sha256(thumbnail).hexdigest()
    save_file_data(main_key, main_screenshot, "image/png")
    save_file_data(thumb_key, thumbnail, "image/png")
    
    ss_info = {'screenshot': main_key, 'thumbnail': thumb_key}
    base_filename, version_num = os.path.split(filename)
    add_metadata(base_filename, version_num, typeid, ss_info)
コード例 #6
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def view_json(request, filename):
    try: file_metadata = get_file_metadata("/%s" % filename)
    except NotFoundError: return HttpResponseNotFound()

    view_params = {'metadata': file_metadata}

    split = filename.split("/")
    view_params['version'] = split[-1]
    view_params['basename'] = split[-2]
    view_params['basepath'] = "/".join(split[:-1])
    view_params['fullpath'] = filename
    response = HttpResponse(simplejson.dumps(view_params, default=json_handler, indent=4), mimetype='application/json')
    response['Access-Control-Allow-Origin'] = '*'
    return response
コード例 #7
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def delete_file(request, filename):
    try: file_metadata = get_file_metadata("/%s" % filename)
    except NotFoundError: return HttpResponseNotFound()

    split = filename.split("/")
    file_username = split[0]
    basepath = "/" + "/".join(split[:-1])
    version = split[-1:][0]

    if file_username != request.user.get('username') and not request.user.get('is_superuser', False):
        return HttpResponseForbidden()

    delete_file_metadata(basepath, version)

    return redirect('users.views.uploads')
コード例 #8
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def view(request, filename):    
    split = filename.split("/")
    try:
        version = str(int(split[-1]))
    except ValueError:
        version = None

    if version is None:
        basename = split[-1]
        basepath = filename
    else:
        basename = split[-2]
        basepath = '/'.join(split[:-1])

    versions = get_versions('/' + basepath)
    if versions is None:
        return HttpResponseNotFound()

    latest_version = str(max(map(int, versions)))
    if version is None:
        version = latest_version

    try: file_metadata = get_file_metadata("/%s/%s" % (basepath, version))
    except NotFoundError: return HttpResponseNotFound()

    view_params = {'metadata': file_metadata}

    view_params['version'] = version
    view_params['basename'] = basename
    view_params['basepath'] = basepath
    view_params['fullpath'] = filename
    view_params['all_versions'] = map(str, sorted(map(int, versions)))
    view_params['latest_version'] = latest_version
    file_username = split[0]

    view_params['can_change'] = False
    if file_username == request.user.get('username') or request.user.get('is_superuser', False):
        view_params['can_change'] = True
        
    view_params['can_clone'] = False
    if request.user['is_authenticated'] and file_username != request.user.get('username'):
        view_params['can_clone'] = True
    
    if file_metadata['type'] == 'image':
        html_page = 'content/view_image.html'
    else:
        html_page = 'content/view.html'
    return render_to_response(html_page, view_params, context_instance = RequestContext(request))
コード例 #9
0
ファイル: search.py プロジェクト: hnkien/sirikata-cdn
def do_update(full_path):
    SOLR_URL = getattr(settings, 'SOLR_WRITE_URL')
    SOLR_CONNECTION = None if SOLR_URL is None else pysolr.Solr(SOLR_URL)
    
    if SOLR_CONNECTION is None:
        return 0
        
    model_data = get_model_data_from_path(full_path)
    try:
        model_data['metadata'] = get_file_metadata(full_path)
    except NotFoundError:
        SOLR_CONNECTION.delete(id=full_path)
        return
    
    if model_data['metadata'].get('ephemeral', False):
        return 1
    
    model_data['timestamp'] = datetime.datetime.fromtimestamp(model_data['metadata']['timestamp'] / 1e6)
    to_insert = [item_to_search_fields(model_data)]
    SOLR_CONNECTION.add(to_insert)
    return 1
コード例 #10
0
def generate_progressive_errors(filename, typeid):
    if typeid != 'progressive':
        return
    
    metadata = get_file_metadata(filename)
    hash = metadata['types'][typeid]['hash']
    subfiles = metadata['types'][typeid]['subfiles']
    progressive_stream_hash = metadata['types'][typeid]['progressive_stream']
    mipmap_tar_hash = metadata['types'][typeid]['mipmaps'].values()[0]['hash']
    
    dae_data = get_hash(hash)['data']
    pm_data = get_hash(progressive_stream_hash)['data'] if progressive_stream_hash is not None else None
    mipmap_tar_data = get_hash(mipmap_tar_hash)['data']
    
    #The below is a total hack and I feel really dirty doing it, but
    # there is no way to get panda3d to clean up after itself except to
    # exit the process. Celery workers are run as a daemon, so they can't
    # create child processes. Doing so could cause orphaned, defunct processes.
    # I'm doing it anyway because I haven't found any other way to do this. Sorry.
    q = multiprocessing.Queue()
    daemonic = multiprocessing.current_process()._daemonic
    multiprocessing.current_process()._daemonic = False
    p = multiprocessing.Process(target=_get_progressive_errors, args=[q, dae_data, pm_data, mipmap_tar_data])
    p.start()
    success, error_data = q.get()
    p.join()
    multiprocessing.current_process()._daemonic = daemonic
    
    if not success:
        print 'Exception from worker, %s' % str(error_data)
        raise Exception("got exception from worker: %s" % str(error_data))
    
    if error_data is None:
        return
    
    error_info = {'progressive_perceptual_error': error_data}
    base_filename, version_num = os.path.split(filename)
    add_metadata(base_filename, version_num, typeid, error_info)
コード例 #11
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def edit_file(request, filename):
    try: file_metadata = get_file_metadata("/%s" % filename)
    except NotFoundError: return HttpResponseNotFound()

    split = filename.split("/")
    file_username = split[0]
    basepath = "/" + "/".join(split[:-1])
    version = split[-1:][0]

    if file_username != request.user.get('username') and not request.user.get('is_superuser', False):
        return HttpResponseForbidden()

    if request.method == 'POST':
        form = EditFile(request.POST)
        if form.is_valid():
            title = form.cleaned_data['title']
            description = form.cleaned_data['description']
            labels = form.cleaned_data['labels'].split(',')
            labels = [label.strip() for label in labels if len(label.strip()) > 0]
            
            updated_info = {
                'title': title,
                'description': description,
                'labels': labels,
            }
            add_base_metadata(basepath, version, updated_info)

            return redirect('content.views.view', filename)
    else:
        form = EditFile(initial={
            'title': file_metadata['title'],
            'description' : file_metadata['description'],
            'labels': ', '.join(file_metadata.get('labels', []))
        })

    view_params = {'form': form,
                   'filename': filename}
    return render_to_response('content/edit.html', view_params, context_instance = RequestContext(request))
コード例 #12
0
def generate_optimized(filename, typeid):
    metadata = get_file_metadata(filename)
    hash = metadata['types'][typeid]['hash']
    subfiles = metadata['types'][typeid]['subfiles']
    path, version = posixpath.split(filename)

    dae_data = get_hash(hash)['data']

    subfile_map = {}
    for subfile in subfiles:
        img_meta = get_file_metadata(subfile)
        img_hash = img_meta['hash']
        img_data = get_hash(img_hash)['data']
        base_name = posixpath.basename(posixpath.split(subfile)[0])
        subfile_map[base_name] = img_data

    def customImageLoader(filename):
        return subfile_map[posixpath.basename(filename)]

    mesh = collada.Collada(StringIO(dae_data), aux_file_loader=customImageLoader)

    med_opts = meshtool.filters.factory.getInstance('medium_optimizations')
    mesh = med_opts.apply(mesh)

    #Make sure image paths are just the base name
    current_prefix = "optimized"
    subfile_names = []
    subfile_map = {}
    for img in mesh.images:
        base_name = posixpath.basename(img.path)
        subfile_map[base_name] = img.data

        img_hex_key = hashlib.sha256(subfile_map[base_name]).hexdigest()
        save_file_data(img_hex_key, subfile_map[base_name], "image/%s" % img.pilimage.format.lower())
        img_path = "%s/%s/%s" % (path, current_prefix, base_name)
        img_len = len(subfile_map[base_name])
        img_version_num = get_new_version_from_path(img_path, file_type="image")
        save_file_name(img_path, img_version_num, img_hex_key, img_len)
        subfile_names.append("%s/%s" % (img_path, img_version_num))

    str_buffer = StringIO()
    mesh.write(str_buffer)
    orig_save_data = str_buffer.getvalue()
    orig_hex_key = hashlib.sha256(orig_save_data).hexdigest()

    save_file_data(orig_hex_key, orig_save_data, "application/xml")

    zip_buffer = StringIO()
    combined_zip = zipfile.ZipFile(zip_buffer, mode='w', compression=zipfile.ZIP_DEFLATED)
    combined_zip.writestr(posixpath.basename(path), orig_save_data)
    for img_name, img_data in subfile_map.iteritems():
        combined_zip.writestr(img_name, img_data)
    combined_zip.close()

    zip_save_data = zip_buffer.getvalue()
    zip_hex_key = hashlib.sha256(zip_save_data).hexdigest()
    save_file_data(zip_hex_key, zip_save_data, "application/zip")

    save_version_type(path, version, orig_hex_key, len(orig_save_data),
                      subfile_names, zip_hex_key, "optimized")

    send_task("celery_tasks.generate_screenshot.generate_screenshot", args=[filename, "optimized"])
    send_task("celery_tasks.generate_metadata.generate_metadata", args=[filename, "optimized"])
コード例 #13
0
def generate_metadata(filename, typeid):
    
    metadata = get_file_metadata(filename)
    hash = metadata['types'][typeid]['hash']
    subfiles = metadata['types'][typeid]['subfiles']
    
    dae_data = get_hash(hash)['data']

    subfile_map = {}
    subfile_sizes = {}
    subfile_sizes_gzip = {}
    for subfile in subfiles:
        img_meta = get_file_metadata(subfile)
        img_hash = img_meta['hash']
        img_data = get_hash(img_hash)['data']
        subfile_sizes[subfile] = len(img_data)
        subfile_sizes_gzip[subfile] = get_gzip_size(img_data)
        base_name = os.path.basename(os.path.split(subfile)[0])
        subfile_map[base_name] = img_data
    
    def customImageLoader(filename):
        return subfile_map[posixpath.basename(filename)]
    
    mesh = collada.Collada(StringIO(dae_data), aux_file_loader=customImageLoader)
    
    stream_hash = metadata['types'][typeid].get('progressive_stream', None)
    stream_data = get_hash(stream_hash)['data'] if stream_hash is not None else None
    
    if stream_data is not None:
        # add back the progressive stream so we get accurate metadata
        mesh = add_back_pm(mesh, StringIO(stream_data), 100)
    
    json_data = json.loads(getJSON(mesh))

    metadata_info = {}
    metadata_info['num_triangles'] = json_data['num_triangles']
    metadata_info['num_materials'] = len(json_data['materials'])
    metadata_info['num_images'] = len(json_data['images'])
    metadata_info['texture_ram_usage'] = json_data['texture_ram']
    metadata_info['num_draw_calls'] = json_data['num_draw_with_batching']
    metadata_info['num_vertices'] = json_data['num_vertices']
    metadata_info['bounds_info'] = json_data['bounds_info']

    triangulate = meshtool.filters.factory.getInstance('triangulate')
    mesh = triangulate.apply(mesh)
    save_ply = meshtool.filters.factory.getInstance('save_ply')
    ply_temp_file = tempfile.mktemp(suffix='.ply', prefix='meshtool-genmetadata-zernike')
    save_ply.apply(mesh, ply_temp_file)
    
    zernike_calc = os.path.join(os.path.dirname(__file__), 'zernike_calculator')
    zernike_output = subprocess.check_output([zernike_calc, ply_temp_file])
    zernike_nums = zernike_output.split(',')
    zernike_nums = map(float, zernike_nums)
    metadata_info['zernike'] = zernike_nums
    os.remove(ply_temp_file)

    split = filename.split("/")
    version = split[-1:][0]
    file_key = "/".join(split[:-1])
    added_metadata = { 'metadata': metadata_info }
    
    # the size of the mesh, gzipped
    added_metadata['size_gzip'] = get_gzip_size(dae_data)
    
    # the size of each subfile
    added_metadata['subfile_sizes'] = subfile_sizes
    # the size of each subfile, gzipped
    added_metadata['subfile_sizes_gzip'] = subfile_sizes_gzip
    
    if stream_data is not None:
        # the size of the progressive stream, if exists
        added_metadata['progressive_stream_size'] = len(stream_data)
        added_metadata['progressive_stream_size_gzip'] = get_gzip_size(stream_data)
    
    
    add_metadata(file_key, version, typeid, added_metadata)
コード例 #14
0
def generate_panda3d(filename, typeid):
    metadata = get_file_metadata(filename)
    hash = metadata['types'][typeid]['hash']
    subfiles = metadata['types'][typeid]['subfiles']
    progressive_stream = metadata['types'][typeid].get('progressive_stream')
    progressive_data = get_hash(progressive_stream)['data'] if progressive_stream else None
    mipmaps = metadata['types'][typeid].get('mipmaps')
    pathinfo = PathInfo(filename)
    dae_data = get_hash(hash)['data']

    if mipmaps is not None:

        mipmap_data = {}
        for mipmap_name, mipmap_info in mipmaps.iteritems():
            tar_hash = mipmap_info['hash']
            tar_data = get_hash(tar_hash)['data']
            
            min_range = None
            max_range = None
            min_size = 128
            for byte_range in mipmap_info['byte_ranges']:
                if byte_range['width'] <= min_size and byte_range['height'] <= min_size:
                    min_range = (byte_range['offset'], byte_range['length'])
                max_range = (byte_range['offset'], byte_range['length'])
    
            mipmap_data[mipmap_name] = {}
            mipmap_data[mipmap_name]['base'] = tar_data[min_range[0]:min_range[0]+min_range[1]]
            mipmap_data[mipmap_name]['full'] = tar_data[max_range[0]:max_range[0]+max_range[1]]
    
        def base_loader(filename):
            return mipmap_data[filename]['base']
        def full_loader(filename):
            return mipmap_data[filename]['full']
    
        base_mesh = collada.Collada(StringIO(dae_data), aux_file_loader=base_loader)
        base_bam_data = getBam(base_mesh, 'base_' + filename)
        base_bam_hex_key = hashlib.sha256(base_bam_data).hexdigest()
        save_file_data(base_bam_hex_key, base_bam_data, "model/x-bam")
    
        full_mesh = collada.Collada(StringIO(dae_data), aux_file_loader=full_loader)
        if progressive_data is not None:
            full_mesh = add_back_pm.add_back_pm(full_mesh, StringIO(progressive_data), 100)
        full_bam_data = getBam(full_mesh, 'full_' + filename)
        full_bam_hex_key = hashlib.sha256(full_bam_data).hexdigest()
        save_file_data(full_bam_hex_key, full_bam_data, "model/x-bam")
    
        add_metadata(pathinfo.basepath, pathinfo.version, typeid, {'panda3d_base_bam': base_bam_hex_key,
                                                                   'panda3d_full_bam': full_bam_hex_key})
    else:
        
        subfile_map = {}
        for subfile in subfiles:
            img_meta = get_file_metadata(subfile)
            img_hash = img_meta['hash']
            img_data = get_hash(img_hash)['data']
            base_name = os.path.basename(os.path.split(subfile)[0])
            subfile_map[base_name] = img_data
        
        def customImageLoader(filename):
            return subfile_map[posixpath.basename(filename)]
        
        mesh = collada.Collada(StringIO(dae_data), aux_file_loader=customImageLoader)
        other_bam_data = getBam(mesh, typeid + '_' + filename)
        other_bam_hex_key = hashlib.sha256(other_bam_data).hexdigest()
        save_file_data(other_bam_hex_key, other_bam_data, "model/x-bam")
        
        add_metadata(pathinfo.basepath, pathinfo.version, typeid, {'panda3d_bam': other_bam_hex_key})
        
コード例 #15
0
def generate_progressive(filename, typeid):
    metadata = get_file_metadata(filename)
    hash = metadata['types'][typeid]['hash']
    subfiles = metadata['types'][typeid]['subfiles']
    path, version = posixpath.split(filename)

    dae_data = get_hash(hash)['data']

    subfile_map = {}
    for subfile in subfiles:
        img_meta = get_file_metadata(subfile)
        img_hash = img_meta['hash']
        img_data = get_hash(img_hash)['data']
        base_name = posixpath.basename(posixpath.split(subfile)[0])
        subfile_map[base_name] = img_data

    def customImageLoader(filename):
        return subfile_map[posixpath.basename(filename)]

    mesh = collada.Collada(StringIO(dae_data), aux_file_loader=customImageLoader)

    strip_lines = meshtool.filters.factory.getInstance('strip_lines')
    mesh = strip_lines.apply(mesh)
    med_opts = meshtool.filters.factory.getInstance('medium_optimizations')
    mesh = med_opts.apply(mesh)

    progressive_stream = StringIO()
    sander_simplify = SanderSimplify(mesh, progressive_stream)
    mesh = sander_simplify.simplify()
    
    if sander_simplify.base_tri_count != sander_simplify.orig_tri_count:
        progressive_stream = progressive_stream.getvalue()
        progressive_hex_key = hashlib.sha256(progressive_stream).hexdigest()
        save_file_data(progressive_hex_key, progressive_stream, "model/vnd.pdae")
        progressive_stream_num_triangles = sander_simplify.orig_tri_count - sander_simplify.base_tri_count
    else:
        progressive_hex_key = None
        progressive_stream_num_triangles = 0

    mipmap_metadata = {}
    mipmaps = getMipMaps(mesh)
    for imgpath, (tarbuf, ranges) in mipmaps.iteritems():
        mipmap_tar_hex_key = hashlib.sha256(tarbuf).hexdigest()
        save_file_data(mipmap_tar_hex_key, tarbuf, "application/x-tar")
        mipmap_metadata[imgpath] = {'hash':mipmap_tar_hex_key, 'byte_ranges':ranges}

    #Make sure image paths are just the base name
    current_prefix = "progressive"
    subfile_names = []
    subfile_map = {}
    for img in mesh.images:
        base_name = posixpath.basename(img.path)
        subfile_map[base_name] = img.data

        img_hex_key = hashlib.sha256(subfile_map[base_name]).hexdigest()
        save_file_data(img_hex_key, subfile_map[base_name], "image/%s" % img.pilimage.format.lower())
        img_path = "%s/%s/%s" % (path, current_prefix, base_name)
        img_len = len(subfile_map[base_name])
        img_version_num = get_new_version_from_path(img_path, file_type="image")
        save_file_name(img_path, img_version_num, img_hex_key, img_len)
        subfile_names.append("%s/%s" % (img_path, img_version_num))

    str_buffer = StringIO()
    mesh.write(str_buffer)
    orig_save_data = str_buffer.getvalue()
    orig_hex_key = hashlib.sha256(orig_save_data).hexdigest()

    save_file_data(orig_hex_key, orig_save_data, "application/xml")

    zip_buffer = StringIO()
    combined_zip = zipfile.ZipFile(zip_buffer, mode='w', compression=zipfile.ZIP_DEFLATED)
    combined_zip.writestr(posixpath.basename(path), orig_save_data)
    for img_name, img_data in subfile_map.iteritems():
        combined_zip.writestr(img_name, img_data)
    combined_zip.close()

    zip_save_data = zip_buffer.getvalue()
    zip_hex_key = hashlib.sha256(zip_save_data).hexdigest()
    save_file_data(zip_hex_key, zip_save_data, "application/zip")

    save_version_type(path, version, orig_hex_key, len(orig_save_data),
                      subfile_names, zip_hex_key, "progressive")

    add_metadata(path, version, "progressive", { 'progressive_stream': progressive_hex_key,
                                                 'progressive_stream_num_triangles': progressive_stream_num_triangles,
                                                 'mipmaps': mipmap_metadata  })

    send_task("celery_tasks.generate_screenshot.generate_screenshot", args=[filename, "progressive"])
    send_task("celery_tasks.generate_metadata.generate_metadata", args=[filename, "progressive"])
コード例 #16
0
ファイル: views.py プロジェクト: hnkien/sirikata-cdn
def dns(request, filename):
    send_body = False
    if request.method != 'HEAD' and request.method != 'GET':
        return HttpResponseBadRequest()

    #check if filename exists as-is, otherwise try meerkat URI
    try:
        file_metadata = get_file_metadata("/" + filename)
        pathinfo = PathInfo(filename)
        requested_file = pathinfo.basename
        version_num = pathinfo.version
        base_path = pathinfo.basepath
        hash = file_metadata['hash']
        file_size = file_metadata['size']
        is_mesh = False
        meerkat = False
    except NotFoundError:
        meerkat = True

    if meerkat:
        parts = posixpath.normpath(filename).split("/")
        if len(parts) < 3:
            return HttpResponseBadRequest()
        
        requested_file = parts[-1]
    
        try: version_num = str(int(parts[-2]))
        except ValueError: version_num = None
        
        if version_num is None:
            base_path = "/".join(parts[:-2])
            type_id = parts[-2]
            versions = get_versions('/' + base_path)
            if versions is None:
                return HttpResponseNotFound()
            version_num = str(max(map(int, versions)))
        else:
            base_path = "/".join(parts[:-3])
            type_id = parts[-3]
    
        try: 
            file_metadata = get_file_metadata("/%s/%s" % (base_path, version_num))
        except NotFoundError:
            return HttpResponseNotFound()

        if type_id not in file_metadata['types']:
            return HttpResponseNotFound()

        if requested_file == posixpath.basename(base_path):
            is_mesh = True
            hash = file_metadata['types'][type_id]['hash']
            file_size = file_metadata['types'][type_id]['size']
        else:
            is_mesh = False
            subfile_map = {}
            for subfile in file_metadata['types'][type_id]['subfiles']:
                (subfile_base, vers) = posixpath.split(subfile)
                subfile_basename = posixpath.basename(subfile_base)
                subfile_map[subfile_basename] = subfile
    
            if requested_file not in subfile_map:
                return HttpResponseNotFound()
    
            subfile_metadata = get_file_metadata(subfile_map[requested_file])
            hash = subfile_metadata['hash']
            file_size = subfile_metadata['size']
    
    if request.method == 'GET':
        body = {'Hash': hash, 'File-Size': file_size}
        bodydata = simplejson.dumps(body)
        response = HttpResponse(bodydata, mimetype='application/json')
    else:
        response = HttpResponse()
        
    if is_mesh and 'progressive_stream' in file_metadata['types'][type_id] and file_metadata['types'][type_id]['progressive_stream'] is not None:
        response['Progresive-Stream'] = file_metadata['types'][type_id]['progressive_stream']
    if is_mesh and 'progressive_stream_num_triangles' in file_metadata['types'][type_id]:
        response['Progresive-Stream-Num-Triangles'] = file_metadata['types'][type_id]['progressive_stream_num_triangles']

    if is_mesh and 'metadata' in file_metadata['types'][type_id]:
        extra_metadata = file_metadata['types'][type_id]['metadata']
        if 'num_triangles' in extra_metadata:
            response['Num-Triangles'] = extra_metadata['num_triangles']
        if 'zernike' in extra_metadata:
            response['Zernike']  = ','.join(map(str, extra_metadata['zernike']))

    if is_mesh and 'subfiles' in file_metadata['types'][type_id]:
        subfiles = file_metadata['types'][type_id]['subfiles']
        response['Subfiles'] = len(subfiles)
        for subfile_number, subfile_path in enumerate(subfiles):
            pathinfo = PathInfo(subfile_path)
            response['Subfile-%d-Name' % subfile_number] = pathinfo.basename
            response['Subfile-%d-Path' % subfile_number] = pathinfo.normpath

    if is_mesh and 'mipmaps' in file_metadata['types'][type_id]:
        mipmaps = file_metadata['types'][type_id]['mipmaps']
        response['Mipmaps'] = len(mipmaps)
        for mipmap_number, (mipmap_name, mipmap_data) in enumerate(mipmaps.iteritems()):
            response['Mipmap-%d-Name' % mipmap_number] = mipmap_name
            response['Mipmap-%d-Hash' % mipmap_number] = mipmap_data['hash']
            for mipmap_level_number, mipmap_level in enumerate(mipmap_data['byte_ranges']):
                response['Mipmap-%d-Level-%d' % (mipmap_number,mipmap_level_number)] = '%s,%s,%s,%s' % (mipmap_level['offset'], mipmap_level['length'], mipmap_level['width'], mipmap_level['height'])

    response['Access-Control-Allow-Origin'] = '*'
    response['Access-Control-Expose-Headers'] = 'Hash, File-Size'
    response['Hash'] = hash
    response['File-Size'] = file_size

    return response