Beispiel #1
0
def upload():
    logger = current_app.logger
    """
    The upload method takes a uploaded file and puts it into
    the celery processing queue using the Redis backend. Filename
    integrity is enforced by renaming the uploaded file to a unique
    name and deleting it after successfull processing.

    """
    if request.method == 'POST':
        # FIXME: convert this to WTForms
        meshlab = request.form.getlist('meshlab')
        template = request.form['template']

        if 'email_to' in request.form:
            email_to = request.form['email_to']
        else:
            email_to=None

        file = request.files['file']
        metadata = request.files['metadata']
        url = request.form['url']

        # options to pass to convertion task
        options = dict()

        # create a UUID like hash for temporary file storage
        hash = uuid.uuid4().hex
        options.update(hash=hash)



        # This whole section is kind of flimsy. 
        # - code more idiomatic
        # - download should be performed asynchrounously, though some checks
        #   should be performed here (allowed hosts, filenames, etc.)
        # - downloading a url triggered via public web form is a HUGE security
        #   risk. Basically anyone can kill our sever by pasting a url to be
        #   downloaded. Therefore, at the  moment, the ULRs are restricted to 
        #   the ALLOWED_DOWNLOAD_HOSTS settings.

        # error handling can be done smarter
        # URL instead of file
        if url:

            # basic security
            if not security.is_allowed_host(url):
                flash("Tried to download from a insecure source ({0}). Only the following hosts are allowed: {1}".format(url, ", ".join(current_app.config['ALLOWED_DOWNLOAD_HOSTS'])), 'error')
                return render_template('frontend/index.html')

            # download file to disk
            r = requests.get(url, stream=True, verify=False)
            filename = secure_filename(os.path.split(url)[-1].split("?")[0])

            # FIXME: this should check the mimetype in the http response header
            # as well
            if not security.is_allowed_file(filename):
                flash("Please upload a file of the following type: %s" %
                ", ".join(current_app.config['ALLOWED_EXTENSIONS']), 'error')
                return render_template('frontend/index.html')

            if r.status_code == requests.codes.ok:

                if int(r.headers['content-length']) > current_app.config['MAX_CONTENT_LENGTH']:
                    flash("File too big. Please don't upload files greater than {0}".format(humanize.bytes(current_app.config['MAX_CONTENT_LENGTH'])), 'error')
                    return render_template('frontend/index.html')
                else:
                    
                    upload_directory = os.path.join(current_app.config['UPLOAD_PATH'], hash)
                    os.mkdir(upload_directory)
                    filename = os.path.join(upload_directory, filename)

                    with open(filename, "wb") as data:
                        data.write(r.content)

            else:
                flash("Could not download file {0} Status code: {1}".format(url, r.status_code), 'error')
                return render_template('frontend/index.html')

        else:
            # in case of file upload place the uploaded file in a folder
            # named <uuid>
            if file and security.is_allowed_file(file.filename):
                filename = secure_filename(file.filename)
                upload_directory = os.path.join(current_app.config['UPLOAD_PATH'], hash)
                os.mkdir(upload_directory)
                filename = os.path.join(upload_directory, file.filename)
                file.save(filename)

                # in case the user uploaded a meta file, store this as well
                # FIXME make sure only processed when valid template selection
                if metadata and not compression.is_archive(filename):
                    meta_filename = os.path.join(upload_directory, 'metadata' + os.path.splitext(metadata.filename)[1])
                    metadata.save(meta_filename)
                    # options for task
                    options.update(meta_filename=meta_filename)

            else:
                flash("Please upload a file of the following type: %s" %
                    ", ".join(current_app.config['ALLOWED_EXTENSIONS']), 'error')
                return render_template('frontend/index.html')


        if email_to:
            # we need to add at least captcha system to protect from 
            # spammers, for now setting the sender env var enables the
            # email system, use with care behind pw protected 
            if current_app.config['DEFAULT_MAIL_SENDER'] == 'noreply@localhost':
                options.update(email_to=None)
            else:
                options.update(email_to=email_to)

       
        if meshlab:
            options.update(meshlab=meshlab)

        
        options.update(
            template=template
        )

        retval = tasks.convert_model.apply_async((filename, options))
        
        return redirect(url_for('frontend.status', task_id=retval.task_id))


    return render_template('frontend/index.html')
def convert_model(input_file, options=None):
    """
    Task to run the model convestion. This task is written in a manner
    that is can run independently from the web application. You can call
    this task from the command line or other rich gui. The only requirement
    is a settings module in the current package as well as the requierd 
    dependencies (celery, jinja, aopt, meshlab, etc.).
    
    This tasks is currently very monolithic and does too much.
    To make this more flexible, this task should really only convert
    and optimize one file. If multiple files are uploaded, tasks
    can be chained.

    This task is currently a spaghetti monster mess from hell.
    """

    # used for configuration handling
    TASK_CONFIG_SECTION = 'task:modelconvert.tasks.convert_model'


    update_progress("Warming up...")

    # # need this so pubsub will work, it's probably due to too fast processing
    # # and the reconn timout of evensource
    # # FIXME find out why we need to wait a bit and fix it
    # import time
    # time.sleep(10)

    logger = current_app.logger

    # The current tasks id
    task_id = current_task.request.id
    
    # options:
    #   meshlab
    #   template
    #   hash
    #   meta
    
    # keep the generated hash for filenames
    # use the taskid for status only
    # delted uploaded file on successfull conversion, otherwise
    # log filename in error trace
    
    # the hash should always be provided, however if not
    # i.e. running outside a web application, we reuse the taskid
    hash = options.get('hash', task_id)

    # meshlab options
    meshlab = options.get('meshlab', None)
    
    # alternative template
    template = options.get('template', 'basic')

    # copy metadata to output dir if present
    meta_filename = options.get('meta_filename', None)


    # get the filename without extension 
    # i.e. /path/tp/foo.obj     -> foo
    #      /path/tp/foo.bar.obj -> foo.bar
    # FIXME: get rid of this, in case of zip file this is hash.zip
    # and not usable the way we use it right now
    input_filename = os.path.splitext(os.path.basename(input_file))[0]


    update_progress("Starting to process uploaded file")
    logger.info("Uploaded file: {0}".format(input_file))

    download_path = current_app.config['DOWNLOAD_PATH']
    bundles_path = current_app.config['BUNDLES_PATH']
    upload_path = current_app.config['UPLOAD_PATH']

    # this should actuayll come in as parameter, as we assume too much here
    upload_directory = os.path.join(current_app.config['UPLOAD_PATH'], hash)
    
    # first create where everything is stored
    output_directory = os.path.join(download_path, hash)
    os.mkdir(output_directory)


    # {{{ Per bundle configuration
    update_progress("Reading output template configuration")

    # TODO FIXME
    # this whole config stuff is a very naive implementation anbelongs in a 
    # task superclass / or coposite object which then scopes for configuration relevant for the 
    # related task. i.g. self.config.get_boolean('aopt.option', default) should actually be
    # task_config_parser.get_boolean('aopt.option', default_value)
    # task_config_arser is a subclass of config farser and actually performs this:
    # config_parser.get+boolean('task:path.to.this_task', 'aopt.option', default_value)
    import ConfigParser

    bundle_config = ConfigParser.SafeConfigParser()
#    bundle_config = ConfigParser.SafeConfigParser(allow_no_value=True)  2.7

    # read the defaults and template config
    bundle_config.read([
        os.path.abspath(os.path.join(os.path.dirname(__file__), 'convert_model.defaults')),
        os.path.join(bundles_path, template, 'settings.ini')
    ])
    
    # convert this to no_value with 2.7 requirement
    try:
        if bundle_config.getboolean(TASK_CONFIG_SECTION, 'meshlab.disabled'):
            meshlab = None
    except ConfigParser.NoOptionError:
        pass

    meshlab_log = bundle_config.getboolean(TASK_CONFIG_SECTION, 'meshlab.log')
    aopt_log = bundle_config.getboolean(TASK_CONFIG_SECTION, 'aopt.log')


    # {{{ ZIPFILES
    # If the uploaded file is a archive, uncompress it.
    # Note that this step should be moved to the controller (or another task)
    # once we switch to a different workflow (upload file->select template->convert)
    # for each model. but for now, we are using the naive approach.
    # refactor this out
    

    # format ('infile.obj','outfile.x3d', ['outfile.xml'] or None)
    # it might make sense to create a class or at least a dict for this 
    # in the future,
    models_to_convert = []  

    if compression.is_archive(input_file):
        update_progress("Uncompressing archive")
        
        uncompressed_path = upload_directory + '.tmp'
        os.mkdir(uncompressed_path)

        compression.unzip(input_file, uncompressed_path)

        update_progress("Archive uncompressed")

        resources_to_copy =  []  # entry format path/to/resource
        found_models = [] 
        found_metadata = []

        update_progress("Detecting models and resources")

        # detect and collect models, metadata, and resources
        for root, dirs, files in os.walk(uncompressed_path, topdown=False):
            for name in files:
                if security.is_model_file(name):
                    update_progress("Found model: {0}".format(name))
                    found_models.append(name)
                elif security.is_meta_file(name):
                    update_progress("Found meta data: {0}".format(name))
                    found_metadata.append(name)
                    resources_to_copy.append(name)
                else:
                    update_progress("Found resource: {0}".format(name))
                    resources_to_copy.append(name)
                    # just copy over
                    
            for name in dirs:
                update_progress("Found directory: {0}".format(name))
                resources_to_copy.append(name)

        if not found_models:
            raise ConversionError("No models found in archive. Be sure to put all models at the root level of your archive.")

        logger.info("****** FOUND_META: {0}".format(found_metadata))

        update_progress("Associating meta data to models")

        # FIXME: this could be improved
        for model in found_models:
            m_root = os.path.splitext(os.path.basename(model))[0]

            m_output_inline = m_root + '.x3d'
            m_output_html = m_root + '.html'

            # now we have a list of metas belonging to the current model
            # store that in the master list
            model_base_split = os.path.splitext(os.path.basename(model))
            model_info_dict = {
                'name':   model_base_split[0],

                'input':  model,
                'input_format': model_base_split[1][1:], # fixme, use magic|mime instead of extension
                'input_path': os.path.abspath(uncompressed_path),

                'output': model_base_split[0] + '.x3d',
                'output_format': 'x3d',
                
                'inline': model_base_split[0] + '.x3d',
                'preview': model_base_split[0] + '.html',
                
                'resources': resources_to_copy,
            }

            # then walk each metadata to find match for model
            m_metas = []
            for r in found_metadata:
                # found matching metadata file, mark it
                r_root = os.path.splitext(os.path.basename(r))[0]
                r_ext = os.path.splitext(os.path.basename(r))[1][1:]
                if r_root == m_root:
                    m_metas.append({
                        'file': r,
                        'type': r_ext,
                    })

            if m_metas:
                model_info_dict.update(metadata=m_metas)

            models_to_convert.append(model_info_dict)
#            models_to_convert.append( (model, m_output_inline, m_output_html, m_metas,) )

        # we now have list of models with associated metadata
        # and a list of plain resources that simply need to be copied
        #   models_to_convert
        #   resources_to_copy
        logger.info("****** MODELS: {0}".format(models_to_convert))
        logger.info("****** RESOURCES: {0}".format(resources_to_copy))
        

        ####### First copy the resources

        # simplified: we just copy everything that is dir blindly as well
        # as all files in the root level which are not models.
        for resource in resources_to_copy:
            src = os.path.join(uncompressed_path, resource)
            dest = os.path.join(output_directory, resource)

            if os.path.isdir(src):
                fs.copytree(src, dest)
            else:
                shutil.copy(src, dest)
    # }}}
    else:
        # no compression, no multimodel, no, textures..
        current_input_filename = os.path.splitext(os.path.basename(input_file))[0]

        model_base_split = os.path.splitext(os.path.basename(input_file))
        model_info_dict = {
            'name':   model_base_split[0],

            'input':  os.path.basename(input_file),
            'input_format': model_base_split[1][1:], # fixme, use magic|mime instead of extension
            'input_path': os.path.abspath(os.path.dirname(input_file)),

            'output': model_base_split[0] + '.x3d',
            'output_format': 'x3d',
            
            'inline': model_base_split[0] + '.x3d',
            'preview': model_base_split[0] + '.html',
            
            'resources': None,
        }

        # we have a meta file which could be named whatever, normalize
        # this is a mess - but for the review...
        # this should be handled by the zip code above, since its the same
        # but only one file.
        if meta_filename:
            
            meta_dest_filename = input_filename + os.path.splitext(meta_filename)[1]
            shutil.copy(meta_filename, os.path.join(output_directory, meta_dest_filename))

            meta_data_list = []
            meta_data_list.append({
                'file': meta_dest_filename,
                'type': os.path.splitext(meta_filename)[1][1:]
            })

            model_info_dict.update(metadata=meta_data_list)

        models_to_convert.append(model_info_dict);
        logger.info("***** MODELS TO CONVERT: {0} ".format(models_to_convert))

#        models_to_convert = [ (input_file, current_input_filename+'.x3d', current_input_filename+'.html', meta_dest_filename) ]



    logger.info("***** MODELS TO CONVERT: {0} ".format(models_to_convert))

    # ------------------------------------------------------------------
    # The following steop only generates templates, for the uploaded
    # data. This can be refactored out. The reason this runs before aopt
    # is in order to allow live preview of partially optimized models later
    # on as well as reading configuration for aopt/meshlab


    # first copy static assets
    output_directory_static = os.path.join(output_directory, 'static')
    input_directory_static = os.path.join(bundles_path, template, 'static')
    input_directory_shared = os.path.join(bundles_path, '_shared')
        
    # copy shared resources
    fs.copytree(input_directory_shared, output_directory)

    # copy template resources
    fs.copytree(input_directory_static, output_directory_static)


    # init template engine
    jinja = Environment(loader=FileSystemLoader(os.path.join(bundles_path, template)))

    tpl_job_context = {
        # fixme assuming too much here
        'archive_uri': hash + '.zip'
    }

    list_template = 'list.html'
    model_template = 'view.html'

    # first render index template if it's present in the template bundle.
    # we always do this, even if there's only one model to convert
    try:
        update_progress("Starting to render list template")

        tpl = jinja.get_template(list_template)
        context = { }
        context.update(models=models_to_convert, job=tpl_job_context)
        # we need info on the models, probably a object would be nice

        tpl_output = os.path.join(output_directory, 'list.html')
        # finally render template bundle
        with open(tpl_output, 'w+') as f:
            f.write(tpl.render(context))

    except TemplateNotFound as tplnf:
        # not sure if we should stop here, for the moment we proceed
        # since the list.html list view is technically not necessary
        # to complete rendering
        update_progress("List template not found, proceeding without")
        logger.error("Template '{0}' not found - ignoring list view".format(list_template))
    finally:
        update_progress("Done processing list template")

    
    model_template_context = dict()
    try:
        model_template_renderer = jinja.get_template(model_template)

        for model in models_to_convert:
            # now render templates for individual models
            update_progress("Rendering template for model: {0}".format(model['name']))

            # write out template
            model_template_context.update(
                model=model,
                # the job this model belongs to (used for getting archive name)
                job=tpl_job_context  
            )

            tpl_output = os.path.join(output_directory, model['preview'])
            with open(tpl_output, 'w+') as f:
                f.write(model_template_renderer.render(model_template_context))

    except TemplateNotFound:
        logger.error("Template '{0}'' not found - ignoring list view".format(view_template))
    


    ### temp
    output_filename = models_to_convert[0]['output']
    output_template_filename = models_to_convert[0]['preview']


    # end template generation
    # ------------------------------------------------------------------


    # ------------------------------------------------------------------
    # Meshlab doing it's thing, generating temproary outputs
    # this should live in its own task
    # ------------------------------------------------------------------
    working_directory = os.getcwd()
    os.chdir(output_directory)
    
    logger.info("Output filename:   {0}".format(output_filename) )
    logger.info("Output directory: {0}".format(output_directory) )
    logger.info("Working directory: {0}".format(working_directory) )
    logger.info("Aopt binary: {0}".format(current_app.config['AOPT_BINARY']))
    logger.info("Meshlab binary: {0}".format(current_app.config['MESHLAB_BINARY']))

    
    if meshlab:
        #inputfile = outputfile could lead to problems later on
        
        update_progress("Meshlab optimization...")
        
        env = os.environ.copy()
        env['DISPLAY'] = current_app.config['MESHLAB_DISPLAY']

                
        meshlab_filter = ""
        meshlab_filter += "<!DOCTYPE FilterScript><FilterScript>"

        for item in meshlab:
            # fixme, parameterization could be dynamic, not hardcoded
            if item == "Remove Isolated pieces (wrt Face Num.)":
                meshlab_filter += '<filter name="' + item + '">'
                meshlab_filter += '<Param type="RichInt" value="2500" name="MinComponentSize"/>'
                meshlab_filter += '</filter>'
                meshlab_filter += '<filter name="Remove Unreferenced Vertex"/>'
            else:
                meshlab_filter += '<filter name="' + item + '"/>' 

        meshlab_filter += "</FilterScript>"



        # todo-> name this after model
        meshlab_filter_filename = os.path.join(output_directory, hash + '.mlx')
        with open(meshlab_filter_filename, 'w+') as f:
            f.write(meshlab_filter)
        
        # Subprocess in combination with PIPE/STDOUT could deadlock
        # be careful with this. Prefer the Python 2.7 version below or
        
        for model in models_to_convert:
            update_progress("Meshlab optimization: {0}".format(model['input']))
            
            meshlab_input_file = os.path.join(model['input_path'], model['input'])

            # options to account for many different attributes: -om vc fc vn vt wt
            proc = subprocess.Popen([
                current_app.config['MESHLAB_BINARY'], 
                "-i", 
                meshlab_input_file, 
                "-o",
                meshlab_input_file, 
                "-s",
                meshlab_filter_filename,
                "-om",
                "vc" if model['input_format'] != "obj" else "",
                "vt",
                "wt",
                "ff"
                ],
                env=env, 
                stdout=subprocess.PIPE, 
                stderr=subprocess.STDOUT)
            
            output = proc.communicate()[0]
            returncode = proc.wait()

            # create a aopt log in debug mode
            if current_app.config['DEBUG'] or meshlab_log:
                with open(os.path.join(output_directory, 'meshlab.log'), 'a+') as f:
                    f.write(output)


            logger.info("Meshlab optimization model: {0} return: {1}".format(meshlab_input_file, returncode))
            logger.info(output)

            if returncode == 0:
                update_progress("Meshlab successfull")
            else:
                update_progress("Meshlab failed!")
                logger.error("Meshlab problem: {0} return: {1}".format(meshlab_input_file, returncode))

        

        # Python 2.7
        # try:
        #     check_output([
        #         current_app.config['MESHLAB_BINARY'], 
        #         "-i", 
        #         input_file, 
        #         "-o",
        #         input_file, 
        #         "-s",
        #         meshlab_filter_filename,
        #         "-om",
        #         "ff"
        #         ], env=env)
        #         
        # except CalledProcessError as e:
        #     logger.info("Meshlab problem exit code {0}".format(e.returncode))
        #     logger.error("Meshlab: " + e.output)
            

        # if status == 0:
        #     logger.info("Meshlab optimization {0}".format(status))
        # else:
        #     logger.info("Meshlab problem exit code {0}".format(status))

    else:
       update_progress("Skipping Meshlab optimization")


    # end Meshlab
    # ------------------------------------------------------------------

    # ------------------------------------------------------------------
    # Aopt call
    # this should live in its own task (or maybe transcoder in the future)
    # ------------------------------------------------------------------
    update_progress("Starting AOPT conversion")

    status = -100

    for model in models_to_convert:

        update_progress("Converting: {0}".format(model['input']))

        aopt_bingeo = "{0}_bin".format(model['name'])
        os.mkdir(os.path.join(output_directory, aopt_bingeo))

        infile  = os.path.join(model['input_path'], model['input'])
        outfile = os.path.join(output_directory, model['output'])

        ## Config aopt call {{{

        ### FIXME: naive impl for the config stuff        
        ### build a custom config parser, set defaults on init
        ### maybe useing argparse to reconstruct arguments
        ### http://stackoverflow.com/questions/14823363/is-it-possible-to-reconstruct-a-command-line-with-pythons-argparse
        ### however, probably would make it less straight forward
        aopt_bingeo_param = bundle_config.get(TASK_CONFIG_SECTION, 'aopt.binGeoParams')
        # validation check not really necessary, since all combinations are possible and invalid ones are just ignored...
        aopt_bingeo_valid = ['sa', 'sac', 'sacp']
        if not aopt_bingeo_param in aopt_bingeo_valid:
            logger.warning("AOPT binGeo param {0} invalid, useing default 'sa'".format(aopt_bingeo_param))
            aopt_bingeo_param = 'sa'   

        aopt_gencam = bundle_config.getboolean(TASK_CONFIG_SECTION, 'aopt.genCam')
        aopt_flatten_graph =  bundle_config.getboolean(TASK_CONFIG_SECTION, 'aopt.flattenGraph')        


        aopt_cmd = [
            current_app.config['AOPT_BINARY'], 
            '-i', 
            infile
        ]
        
        
        if aopt_flatten_graph:
            aopt_cmd.append('-F')
            aopt_cmd.append('Scene:"cacheopt(true)"')
        
        aopt_cmd.append('-f')
        aopt_cmd.append('PrimitiveSet:creaseAngle:4')
        aopt_cmd.append('-f')
        aopt_cmd.append('PrimitiveSet:normalPerVertex:TRUE')
        
        if aopt_gencam:
            aopt_cmd.append('-V')
        
        aopt_cmd.append('-G')
        aopt_cmd.append(aopt_bingeo + '/:' + aopt_bingeo_param)
        
        aopt_cmd.append('-x')
        aopt_cmd.append(outfile)
        

        # }}} end config aopt call

        # aopt_cmd = [
        #     current_app.config['AOPT_BINARY'], 
        #     '-i', 
        #     infile, 
        #     '-F',
        #     'Scene:"cacheopt(true)"',
        #     '-f',
        #     'PrimitiveSet:creaseAngle:4',              
        #     '-f',
        #     'PrimitiveSet:normalPerVertex:TRUE',
        #     '-V',
        #     '-G',
        #     aopt_bingeo + '/:sacp',
        #     '-x', 
        #     outfile
        # ]

        try:
        
            update_progress("Running AOPT on: {0}".format(model['input']))
            #status = subprocess.call(aopt_cmd)


            process = subprocess.Popen(aopt_cmd, 
               stdout=subprocess.PIPE, 
               stderr=subprocess.STDOUT) 
            output = process.communicate()[0] 

            status = process.wait()

            # create a aopt log in debug mode
            # this is a secuirty concern as long as aopt does include
            # full path names in the log output
            if current_app.config['DEBUG'] or aopt_log:
                with open(os.path.join(output_directory, 'aopt.log'), 'a+') as f:
                    f.write(output)


            logger.info("Aopt return: {0}".format(status))
            logger.info(output)

        except OSError:
            update_progress("Failure to execute AOPT")
            err_msg = "Error: AOPT not found or not executable {0}".format(repr(aopt_cmd))
            logger.error(err_msg)
            raise ConversionError(err_msg)


    if status < 0:
        # FIXME error handling and cleanup (breaking early is good but
        # cleanup calls for try/catch/finally or contextmanager)
        os.chdir(working_directory)

        # fixme: put this in exception code
        update_progress("Error on conversion process")
        logger.error("Error converting file!!!!!!!!!!")
        raise ConversionError('AOPT RETURNS: {0}'.format(status))
    
    else:
        # ------------------------------------------------------------------
        # Final creation of deliverable, could live in it's own task
        # ------------------------------------------------------------------
        update_progress("Assembling deliverable...")
        zip_path = os.path.join(download_path, hash)
        compression.zipdir(zip_path, '%s.zip' % hash)

        os.chdir(working_directory)
    


    # ------------------------------------------------------------------
    # cleaning up
    # ------------------------------------------------------------------

    if not current_app.config['DEBUG']:
        # delete the uploaded file
        update_progress("Cleaning up...")

        # todo remove upload directory
        uncompressed_path = upload_directory + '.tmp'
        if os.path.exists(uncompressed_path):
            shutil.rmtree(uncompressed_path)
        if os.path.exists(upload_directory):
            shutil.rmtree(upload_directory)

    update_progress("Done")
    
    # import time
    # time.sleep(10)

    if len(models_to_convert) > 1:
        preview = 'list.html'
    else:
        preview = models_to_convert[0]['preview']

    result_set = dict(
        hash = hash,
        filenames = [preview, '%s.zip' % hash],
        input_file = input_file,
    )
    return result_set
Beispiel #3
0
def convert_model(input_file, options=None):
    """
    Task to run the model convestion. This task is written in a manner
    that is can run independently from the web application. You can call
    this task from the command line or other rich gui. The only requirement
    is a settings module in the current package as well as the requierd 
    dependencies (celery, jinja, aopt, meshlab, etc.).
    
    This tasks is currently very monolithic and does too much.
    To make this more flexible, this task should really only convert
    and optimize one file. If multiple files are uploaded, tasks
    can be chained.

    This task is currently a spaghetti monster mess from hell.
    """

    # used for configuration handling
    TASK_CONFIG_SECTION = 'task:modelconvert.tasks.convert_model'

    update_progress("Warming up...")

    # # need this so pubsub will work, it's probably due to too fast processing
    # # and the reconn timout of evensource
    # # FIXME find out why we need to wait a bit and fix it
    # import time
    # time.sleep(10)

    logger = current_app.logger

    # The current tasks id
    task_id = current_task.request.id

    # options:
    #   meshlab
    #   template
    #   hash
    #   meta

    # keep the generated hash for filenames
    # use the taskid for status only
    # delted uploaded file on successfull conversion, otherwise
    # log filename in error trace

    # the hash should always be provided, however if not
    # i.e. running outside a web application, we reuse the taskid
    hash = options.get('hash', task_id)

    # meshlab options
    meshlab = options.get('meshlab', None)

    # alternative template
    template = options.get('template', 'basic')

    email_to = options.get('email_to', None)

    # copy metadata to output dir if present
    meta_filename = options.get('meta_filename', None)

    # get the filename without extension
    # i.e. /path/tp/foo.obj     -> foo
    #      /path/tp/foo.bar.obj -> foo.bar
    # FIXME: get rid of this, in case of zip file this is hash.zip
    # and not usable the way we use it right now
    input_filename = os.path.splitext(os.path.basename(input_file))[0]

    update_progress("Starting to process uploaded file")
    logger.info("Uploaded file: {0}".format(input_file))

    download_path = current_app.config['DOWNLOAD_PATH']
    bundles_path = current_app.config['BUNDLES_PATH']
    upload_path = current_app.config['UPLOAD_PATH']

    # this should actuayll come in as parameter, as we assume too much here
    upload_directory = os.path.join(current_app.config['UPLOAD_PATH'], hash)

    # first create where everything is stored
    output_directory = os.path.join(download_path, hash)
    os.mkdir(output_directory)

    # {{{ Per bundle configuration
    update_progress("Reading output template configuration")

    # TODO FIXME
    # this whole config stuff is a very naive implementation anbelongs in a
    # task superclass / or coposite object which then scopes for configuration relevant for the
    # related task. i.g. self.config.get_boolean('aopt.option', default) should actually be
    # task_config_parser.get_boolean('aopt.option', default_value)
    # task_config_arser is a subclass of config farser and actually performs this:
    # config_parser.get+boolean('task:path.to.this_task', 'aopt.option', default_value)
    import ConfigParser

    bundle_config = ConfigParser.SafeConfigParser()
    #    bundle_config = ConfigParser.SafeConfigParser(allow_no_value=True)  2.7

    # read the defaults and template config
    bundle_config.read([
        os.path.abspath(
            os.path.join(os.path.dirname(__file__), 'convert_model.defaults')),
        os.path.join(bundles_path, template, 'settings.ini')
    ])

    # convert this to no_value with 2.7 requirement
    try:
        if bundle_config.getboolean(TASK_CONFIG_SECTION, 'meshlab.disabled'):
            meshlab = None
    except ConfigParser.NoOptionError:
        pass

    meshlab_log = bundle_config.getboolean(TASK_CONFIG_SECTION, 'meshlab.log')
    aopt_log = bundle_config.getboolean(TASK_CONFIG_SECTION, 'aopt.log')
    #nexus_log = bundle_config.getboolean(TASK_CONFIG_SECTION, 'nexus.log')

    # {{{ ZIPFILES
    # If the uploaded file is a archive, uncompress it.
    # Note that this step should be moved to the controller (or another task)
    # once we switch to a different workflow (upload file->select template->convert)
    # for each model. but for now, we are using the naive approach.
    # refactor this out

    # format ('infile.obj','outfile.x3d', ['outfile.xml'] or None)
    # it might make sense to create a class or at least a dict for this
    # in the future,
    models_to_convert = []

    if compression.is_archive(input_file):
        update_progress("Uncompressing archive")

        uncompressed_path = upload_directory + '.tmp'
        os.mkdir(uncompressed_path)

        compression.unzip(input_file, uncompressed_path)

        update_progress("Archive uncompressed")

        resources_to_copy = []  # entry format path/to/resource
        found_models = []
        found_metadata = []

        update_progress("Detecting models and resources")

        # detect and collect models, metadata, and resources
        for root, dirs, files in os.walk(uncompressed_path, topdown=False):
            for name in files:
                if security.is_model_file(name):
                    update_progress("Found model: {0}".format(name))
                    found_models.append(name)
                elif security.is_meta_file(name):
                    update_progress("Found meta data: {0}".format(name))
                    found_metadata.append(name)
                    resources_to_copy.append(name)
                else:
                    update_progress("Found resource: {0}".format(name))
                    resources_to_copy.append(name)
                    # just copy over

            for name in dirs:
                update_progress("Found directory: {0}".format(name))
                resources_to_copy.append(name)

        if not found_models:
            raise ConversionError(
                "No models found in archive. Be sure to put all models at the root level of your archive."
            )

        logger.info("****** FOUND_META: {0}".format(found_metadata))

        update_progress("Associating meta data to models")

        # FIXME: this could be improved
        for model in found_models:
            m_root = os.path.splitext(os.path.basename(model))[0]

            m_output_inline = m_root + '.x3d'
            m_output_html = m_root + '.html'

            # now we have a list of metas belonging to the current model
            # store that in the master list
            model_base_split = os.path.splitext(os.path.basename(model))
            model_info_dict = {
                'name': model_base_split[0],
                'input': model,
                'input_format': model_base_split[1]
                [1:],  # fixme, use magic|mime instead of extension
                'input_path': os.path.abspath(uncompressed_path),
                'output': model_base_split[0] + '.x3d',
                'output_format': 'x3d',
                'inline': model_base_split[0] + '.x3d',
                'preview': model_base_split[0] + '.html',
                'resources': resources_to_copy,
            }

            # then walk each metadata to find match for model
            m_metas = []
            for r in found_metadata:
                # found matching metadata file, mark it
                r_root = os.path.splitext(os.path.basename(r))[0]
                r_ext = os.path.splitext(os.path.basename(r))[1][1:]
                if r_root == m_root:
                    m_metas.append({
                        'file': r,
                        'type': r_ext,
                    })

            if m_metas:
                model_info_dict.update(metadata=m_metas)

            models_to_convert.append(model_info_dict)
#            models_to_convert.append( (model, m_output_inline, m_output_html, m_metas,) )

# we now have list of models with associated metadata
# and a list of plain resources that simply need to be copied
#   models_to_convert
#   resources_to_copy
        logger.info("****** MODELS: {0}".format(models_to_convert))
        logger.info("****** RESOURCES: {0}".format(resources_to_copy))

        ####### First copy the resources

        # simplified: we just copy everything that is dir blindly as well
        # as all files in the root level which are not models.
        for resource in resources_to_copy:
            src = os.path.join(uncompressed_path, resource)
            dest = os.path.join(output_directory, resource)

            if os.path.isdir(src):
                fs.copytree(src, dest)
            else:
                shutil.copy(src, dest)
    # }}}
    else:
        # no compression, no multimodel, no, textures..
        current_input_filename = os.path.splitext(
            os.path.basename(input_file))[0]

        model_base_split = os.path.splitext(os.path.basename(input_file))
        model_info_dict = {
            'name': model_base_split[0],
            'input': os.path.basename(input_file),
            'input_format': model_base_split[1]
            [1:],  # fixme, use magic|mime instead of extension
            'input_path': os.path.abspath(os.path.dirname(input_file)),
            'output': model_base_split[0] + '.x3d',
            'output_format': 'x3d',
            'inline': model_base_split[0] + '.x3d',
            'preview': model_base_split[0] + '.html',
            'resources': None,
        }

        # we have a meta file which could be named whatever, normalize
        # this is a mess - but for the review...
        # this should be handled by the zip code above, since its the same
        # but only one file.
        if meta_filename:

            meta_dest_filename = input_filename + os.path.splitext(
                meta_filename)[1]
            shutil.copy(meta_filename,
                        os.path.join(output_directory, meta_dest_filename))

            meta_data_list = []
            meta_data_list.append({
                'file':
                meta_dest_filename,
                'type':
                os.path.splitext(meta_filename)[1][1:]
            })

            model_info_dict.update(metadata=meta_data_list)

        models_to_convert.append(model_info_dict)
        logger.info("***** MODELS TO CONVERT: {0} ".format(models_to_convert))

#        models_to_convert = [ (input_file, current_input_filename+'.x3d', current_input_filename+'.html', meta_dest_filename) ]

    logger.info("***** MODELS TO CONVERT: {0} ".format(models_to_convert))

    # ------------------------------------------------------------------
    # The following steop only generates templates, for the uploaded
    # data. This can be refactored out. The reason this runs before aopt
    # is in order to allow live preview of partially optimized models later
    # on as well as reading configuration for aopt/meshlab

    # first copy static assets
    output_directory_static = os.path.join(output_directory, 'static')
    input_directory_static = os.path.join(bundles_path, template, 'static')
    input_directory_shared = os.path.join(bundles_path, '_shared')

    # copy shared resources
    fs.copytree(input_directory_shared, output_directory)

    # copy template resources
    fs.copytree(input_directory_static, output_directory_static)

    # init template engine
    jinja = Environment(
        loader=FileSystemLoader(os.path.join(bundles_path, template)))

    tpl_job_context = {
        # fixme assuming too much here
        'archive_uri': hash + '.zip'
    }

    list_template = 'list.html'
    model_template = 'view.html'

    # first render index template if it's present in the template bundle.
    # we always do this, even if there's only one model to convert
    try:
        update_progress("Starting to render list template")

        tpl = jinja.get_template(list_template)
        context = {}
        context.update(models=models_to_convert, job=tpl_job_context)
        # we need info on the models, probably a object would be nice

        tpl_output = os.path.join(output_directory, 'list.html')
        # finally render template bundle
        with open(tpl_output, 'w+') as f:
            f.write(tpl.render(context))

    except TemplateNotFound as tplnf:
        # not sure if we should stop here, for the moment we proceed
        # since the list.html list view is technically not necessary
        # to complete rendering
        update_progress("List template not found, proceeding without")
        logger.error("Template '{0}' not found - ignoring list view".format(
            list_template))
    finally:
        update_progress("Done processing list template")

    model_template_context = dict()
    try:
        model_template_renderer = jinja.get_template(model_template)

        for model in models_to_convert:
            # now render templates for individual models
            update_progress("Rendering template for model: {0}".format(
                model['name']))

            # write out template
            model_template_context.update(
                model=model,
                # the job this model belongs to (used for getting archive name)
                job=tpl_job_context)

            tpl_output = os.path.join(output_directory, model['preview'])
            with open(tpl_output, 'w+') as f:
                f.write(model_template_renderer.render(model_template_context))

    except TemplateNotFound:
        logger.error("Template '{0}'' not found - ignoring list view".format(
            view_template))

    ### temp
    output_filename = models_to_convert[0]['output']
    output_template_filename = models_to_convert[0]['preview']

    # end template generation
    # ------------------------------------------------------------------

    # ------------------------------------------------------------------
    # Meshlab doing it's thing, generating temproary outputs
    # this should live in its own task
    # ------------------------------------------------------------------
    working_directory = os.getcwd()
    os.chdir(output_directory)

    logger.info("Output filename:   {0}".format(output_filename))
    logger.info("Output directory: {0}".format(output_directory))
    logger.info("Working directory: {0}".format(working_directory))
    logger.info("Aopt binary: {0}".format(current_app.config['AOPT_BINARY']))
    logger.info("Meshlab binary: {0}".format(
        current_app.config['MESHLAB_BINARY']))
    logger.info("Nexus binary: {0}".format(current_app.config['NEXUS_BINARY']))

    if meshlab:
        #inputfile = outputfile could lead to problems later on

        update_progress("Meshlab optimization...")

        env = os.environ.copy()
        env['DISPLAY'] = current_app.config['MESHLAB_DISPLAY']

        meshlab_filter = ""
        meshlab_filter += "<!DOCTYPE FilterScript><FilterScript>"

        for item in meshlab:
            # fixme, parameterization could be dynamic, not hardcoded
            if item == "Remove Isolated pieces (wrt Face Num.)":
                meshlab_filter += '<filter name="' + item + '">'
                meshlab_filter += '<Param type="RichInt" value="2500" name="MinComponentSize"/>'
                meshlab_filter += '</filter>'
                meshlab_filter += '<filter name="Remove Unreferenced Vertex"/>'
            else:
                meshlab_filter += '<filter name="' + item + '"/>'

        meshlab_filter += "</FilterScript>"

        # todo-> name this after model
        meshlab_filter_filename = os.path.join(output_directory, hash + '.mlx')
        with open(meshlab_filter_filename, 'w+') as f:
            f.write(meshlab_filter)

        # Subprocess in combination with PIPE/STDOUT could deadlock
        # be careful with this. Prefer the Python 2.7 version below or

        for model in models_to_convert:
            update_progress("Meshlab optimization: {0}".format(model['input']))

            meshlab_input_file = os.path.join(model['input_path'],
                                              model['input'])

            # options to account for many different attributes: -om vc fc vn vt wt
            proc = subprocess.Popen([
                current_app.config['MESHLAB_BINARY'], "-i", meshlab_input_file,
                "-o", meshlab_input_file, "-s", meshlab_filter_filename, "-om",
                "vc" if model['input_format'] != "obj" else "", "vt", "wt",
                "ff"
            ],
                                    env=env,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT)

            output = proc.communicate()[0]
            returncode = proc.wait()

            # create a aopt log in debug mode
            if current_app.config['DEBUG'] or meshlab_log:
                with open(os.path.join(output_directory, 'meshlab.log'),
                          'a+') as f:
                    f.write(output)

            logger.info("Meshlab optimization model: {0} return: {1}".format(
                meshlab_input_file, returncode))
            logger.info(output)

            if returncode == 0:
                update_progress("Meshlab successfull")
            else:
                update_progress("Meshlab failed!")
                logger.error("Meshlab problem: {0} return: {1}".format(
                    meshlab_input_file, returncode))

        # Python 2.7
        # try:
        #     check_output([
        #         current_app.config['MESHLAB_BINARY'],
        #         "-i",
        #         input_file,
        #         "-o",
        #         input_file,
        #         "-s",
        #         meshlab_filter_filename,
        #         "-om",
        #         "ff"
        #         ], env=env)
        #
        # except CalledProcessError as e:
        #     logger.info("Meshlab problem exit code {0}".format(e.returncode))
        #     logger.error("Meshlab: " + e.output)

        # if status == 0:
        #     logger.info("Meshlab optimization {0}".format(status))
        # else:
        #     logger.info("Meshlab problem exit code {0}".format(status))

    else:
        update_progress("Skipping Meshlab optimization")

    # end Meshlab
    # ------------------------------------------------------------------

    # NEXUS TEMPLATE CONVERSION (a template which needs processing..)
    # ------------------------------------------------------------------

    if template == "nexus":
        update_progress("Starting NEXUS conversion")
        logger.info("Nexus processing is started!")

        env = os.environ.copy()
        for model in models_to_convert:
            nexus_input_file = os.path.join(model['input_path'],
                                            model['input'])
            name = model['name'] + ".nxs"
            output_file = os.path.join(output_directory, name)
            proc = subprocess.Popen([
                current_app.config['NEXUS_BINARY'], nexus_input_file, "-o",
                output_file
            ],
                                    env=env,
                                    stdout=subprocess.PIPE,
                                    stderr=subprocess.STDOUT)

            output = proc.communicate()[0]
            returncode = proc.wait()

            if returncode == 0:
                msg = "Model " + model['name'] + " successfully converted."
                update_progress(msg)
                logger.info(msg)
            else:
                update_progress("Nexus failed!")
                logger.error("Nexus problem: {0} return: {1}".format(
                    nexus_input_file, returncode))

    # END NEXUS PROCESSING
    # ------------------------------------------------------------------

    # ------------------------------------------------------------------
    # Aopt call
    # this should live in its own task (or maybe transcoder in the future)
    # ------------------------------------------------------------------
    update_progress("Starting AOPT conversion")

    status = -100
    if template != "nexus":
        for model in models_to_convert:

            update_progress("Converting: {0}".format(model['input']))

            aopt_geo_prefix = "{0}_bin".format(model['name'])
            os.mkdir(os.path.join(output_directory, aopt_geo_prefix))

            infile = os.path.join(model['input_path'], model['input'])
            outfile = os.path.join(output_directory, model['output'])

            ## Config aopt call {{{

            ### FIXME: naive impl for the config stuff
            ### build a custom config parser, set defaults on init
            ### maybe useing argparse to reconstruct arguments
            ### http://stackoverflow.com/questions/14823363/is-it-possible-to-reconstruct-a-command-line-with-pythons-argparse
            ### however, probably would make it less straight forward

            aopt_geo_output = bundle_config.get(TASK_CONFIG_SECTION,
                                                'aopt.geoOutput')

            aopt_geo_param = bundle_config.get(TASK_CONFIG_SECTION,
                                               'aopt.geoParams')
            # validation check not really necessary, since all combinations are possible and invalid ones are just ignored...
            aopt_geo_valid = ['sa', 'sac', 'sacp', 'i']
            if not aopt_geo_param in aopt_geo_valid:
                logger.warning(
                    "AOPT binGeo param {0} invalid, useing default 'sa'".
                    format(aopt_geo_param))
                #aopt_geo_param = 'sa'

            # set output mode
            if aopt_geo_output == 'pop':
                aopt_geo_switch = '-K'  # POP
            else:  # assume binary
                aopt_geo_switch = '-G'  # binary

            aopt_gencam = bundle_config.getboolean(TASK_CONFIG_SECTION,
                                                   'aopt.genCam')
            aopt_flatten_graph = bundle_config.getboolean(
                TASK_CONFIG_SECTION, 'aopt.flattenGraph')

            aopt_cmd = [current_app.config['AOPT_BINARY'], '-i', infile]

            if aopt_flatten_graph:
                aopt_cmd.append('-F')
                aopt_cmd.append('Scene:"cacheopt(true)"')

            aopt_cmd.append('-f')
            aopt_cmd.append('PrimitiveSet:creaseAngle:4')
            aopt_cmd.append('-f')
            aopt_cmd.append('PrimitiveSet:normalPerVertex:TRUE')

            if aopt_gencam:
                aopt_cmd.append('-V')

            aopt_cmd.append(aopt_geo_switch)
            aopt_cmd.append(aopt_geo_prefix + '/:' + aopt_geo_param)

            aopt_cmd.append('-x')
            aopt_cmd.append(outfile)

            # }}} end config aopt call

            # aopt_cmd = [
            #     current_app.config['AOPT_BINARY'],
            #     '-i',
            #     infile,
            #     '-F',
            #     'Scene:"cacheopt(true)"',
            #     '-f',
            #     'PrimitiveSet:creaseAngle:4',
            #     '-f',
            #     'PrimitiveSet:normalPerVertex:TRUE',
            #     '-V',
            #     '-G',
            #     aopt_bingeo + '/:sacp',
            #     '-x',
            #     outfile
            # ]

            try:

                update_progress("Running AOPT on: {0}".format(model['input']))
                #status = subprocess.call(aopt_cmd)

                process = subprocess.Popen(aopt_cmd,
                                           stdout=subprocess.PIPE,
                                           stderr=subprocess.STDOUT)
                output = process.communicate()[0]

                status = process.wait()

                # create a aopt log in debug mode
                # this is a secuirty concern as long as aopt does include
                # full path names in the log output
                if current_app.config['DEBUG'] or aopt_log:
                    with open(os.path.join(output_directory, 'aopt.log'),
                              'a+') as f:
                        f.write(output)

                logger.info("Aopt return: {0}".format(status))
                logger.info(output)

            except OSError:
                update_progress("Failure to execute AOPT")
                err_msg = "Error: AOPT not found or not executable {0}".format(
                    repr(aopt_cmd))
                logger.error(err_msg)
                raise ConversionError(err_msg)
    else:
        status = 0

    if status < 0:
        # FIXME error handling and cleanup (breaking early is good but
        # cleanup calls for try/catch/finally or contextmanager)
        os.chdir(working_directory)

        # fixme: put this in exception code
        update_progress("Error on conversion process")
        logger.error("Error converting file!!!!!!!!!!")
        raise ConversionError('AOPT RETURNS: {0}'.format(status))

    else:
        # ------------------------------------------------------------------
        # Final creation of deliverable, could live in it's own task
        # ------------------------------------------------------------------
        update_progress("Assembling deliverable...")
        zip_path = os.path.join(download_path, hash)
        compression.zipdir(zip_path, '%s.zip' % hash)

        os.chdir(working_directory)

    # ------------------------------------------------------------------
    # cleaning up
    # ------------------------------------------------------------------

    if not current_app.config['DEBUG']:
        # delete the uploaded file
        update_progress("Cleaning up...")

        # todo remove upload directory
        uncompressed_path = upload_directory + '.tmp'
        if os.path.exists(uncompressed_path):
            shutil.rmtree(uncompressed_path)
        if os.path.exists(upload_directory):
            shutil.rmtree(upload_directory)

    # import time
    # time.sleep(10)

    # hah: hackish as hell

    if len(models_to_convert) > 1:
        preview = 'list.html'
    else:
        preview = models_to_convert[0]['preview']

    # send mail if any
    # email address nees to be trusted, no checks are made
    if email_to:
        update_progress("Sending email")
        mail = Mail(current_app)
        msg = Message(subject="Your models are ready",
                      recipients=[email_to],
                      sender=current_app.config['DEFAULT_MAIL_SENDER'])
        msg.body = "Preview: {0}\nDownload:{1}".format(
            flask.url_for('frontend.preview',
                          _external=True,
                          hash=hash,
                          filename=preview),
            flask.url_for('frontend.download',
                          _external=True,
                          hash=hash,
                          filename='%s.zip' % hash))
        mail.send(msg)

    update_progress("Done")

    result_set = dict(
        hash=hash,
        filenames=[preview, '%s.zip' % hash],
        input_file=input_file,
    )
    return result_set
Beispiel #4
0
def convert_model(input_file, options=None):
    """
    Task to run the model convestion. This task is written in a manner
    that is can run independently from the web application. You can call
    this task from the command line or other rich gui. The only requirement
    is a settings module in the current package as well as the requierd 
    dependencies (celery, jinja, aopt, meshlab, etc.).
    
    This tasks is currently very monolithic and does too much.
    To make this more flexible, this task should really only convert
    and optimize one file. If multiple files are uploaded, tasks
    can be chained. Also assembly of templates should not go here.


    TODO:
      - make directories hash.tmp and rename after successfull transcoding
        this is to distinguish between orphaned files and completed ones
    """

    update_progress("Warming up...")

    # need this so pubsub will work, it's probably due to too fast processing
    # and the reconn timout of evensource
    # FIXME find out why we need to wait a bit and fix it
    import time
    time.sleep(4)

    #log = current_app.logger
    logger = current_app.logger


    # The current tasks id
    task_id = current_task.request.id
    
    # options:
    #   meshlab
    #   template
    #   hash
    #   meta
    
    # keep the generated hash for filenames
    # use the taskid for status only
    # delted uploaded file on successfull conversion, otherwise
    # log filename in error trace
    
    # the hash should always be provided, however if not
    # i.e. running outside a web application, we reuse the taskid
    hash = options.get('hash', task_id)

    # meshlab options
    meshlab = options.get('meshlab', None)
    
    # alternative template
    template = options.get('template', None)
    
    # aopt options
    aopt = options.get('aopt', None)

    update_progress("Starting to process uploaded file")
    logger.info("Uploaded file: {0}".format(input_file))

    download_path = current_app.config['DOWNLOAD_PATH']
    template_path = current_app.config['TEMPLATE_PATH']
    upload_path = current_app.config['UPLOAD_PATH']
    

    # {{{ start code which will be refactored to frontend 
    # If the uploaded file is a archive, uncompress it.
    # Note that this step should be moved to the controller once we support
    # a wizard style: upload file, select template, analyze contents and present
    # options for each model. but for now, we are using the naive approach.
    if compression.is_archive(input_file):
        update_progress("Umcompressing archive")
        
        uncompressed_path = os.path.join(upload_path, hash)
        compression.unzip(input_file, uncompressed_path)

        update_progress("Archive uncompressed")

        resources_to_copy =  []  # etnry format path/to/resource
        found_models = [] 
        found_metadata = []

        update_progress("Detecting models and resources")

        for root, dirs, files in os.walk(uncompressed_path, topdown=False):
            for name in files:

                if security.is_model_file(name):
                    update_progress("Found model: {0}".format(name))
                    found_models.append(os.path.join(root, name))
                elif security.is_meta_file(name):
                    update_progress("Found meta data: {0}".format(name))
                    found_metadata.append(os.path.join(root, name))
                else:
                    update_progress("Found resource: {0}".format(name))
                    resources_to_copy.append(os.path.join(root, name))
                    # just copy over
                    
            for name in dirs:
                dirpath = os.path.join(root, name)
                update_progress("Found directory: {0}".format(name))
                resources_to_copy.append(dirpath)

        models_to_convert = []  # entry format ('filename.x3d', ['meta.xml'] or None)

        logger.info("****** FOUND_META: {0}".format(found_metadata))


        update_progress("Associating meata data to models")

        # FIXME: this could be improved with map/reduce
        # going for explicit for now
        # walk each found model        
        for model in found_models:
            m_root = os.path.splitext(os.path.basename(model))[0]
            m_metas = []
            
            # then walk each metadata to find match for model
            for r in found_metadata:
                # found matching metadata file, mark it
                r_root = os.path.splitext(os.path.basename(r))[0]
                if r_root == m_root:
                    m_metas.append(r)

            # now we have a list of metas belonging to the current model
            # store that in the master list
            models_to_convert.append( (model, m_metas,) )

        # we now have list of models with associated metadata
        # and a list of plain resources that simply need to be copied
        #   models_to_convert
        #   resources_to_copy
        logger.info("****** MODELS: {0}".format(models_to_convert))
        logger.info("****** RESOURCES: {0}".format(resources_to_copy))
    # }}}


    # specifically not using the current_app.jinja_env in order to seperate
    # user templates entirely from the application. The user should not have
    # access to the request, global object and app configuration.
    jinja = Environment(loader=FileSystemLoader(template_path))

    output_directory = os.path.join(download_path, hash)
    os.mkdir(output_directory)
    
    if template and template == 'fullsize' or template == 'metadataBrowser':
        output_extension = '.x3d'
        aopt_output_switch = '-x'

        # render the template with inline
        output_template_filename = os.path.join(download_path, 
                                                hash, hash + '.html')
        user_tpl = jinja.get_template(template +'/' + template + '_template.html')
        
        with open(output_template_filename, 'w+') as f:
            f.write(user_tpl.render(X3D_INLINE_URL='%s.x3d' % hash))

        output_directory_static = os.path.join(output_directory, 'static')
        input_directory_static = os.path.join(template_path, template, 'static')

        shutil.copytree(input_directory_static, output_directory_static)
        
        # copy metadata to output dir if present
        meta_filename = options.get('meta_filename', None)
        if meta_filename:
            shutil.copy(meta_filename, output_directory)

    else:
        output_extension = '.html'
        aopt_output_switch = '-N'

    output_filename = hash + output_extension
    working_directory = os.getcwd()
    os.chdir(output_directory)
    
    logger.info("Output filename:   {0}".format(output_filename) )
    logger.info("Output directory: {0}".format(output_directory) )
    logger.info("Working directory: {0}".format(working_directory) )
    logger.info("Aopt binary: {0}".format(current_app.config['AOPT_BINARY']))
    logger.info("Meshlab binary: {0}".format(current_app.config['MESHLAB_BINARY']))

    #inputfile = outputfile warning
    
    
    if meshlab:
        
        update_progress("Meshlab optimization...")
        
        env = os.environ.copy()
        env['DISPLAY'] = current_app.config['MESHLAB_DISPLAY']
        
        mehlab_filter = ""
        mehlab_filter += "<!DOCTYPE FilterScript><FilterScript>"

        for item in meshlab:
            mehlab_filter += '<filter name="' + item + '"/>' 

        mehlab_filter += "</FilterScript>"

        mehlab_filter_filename = os.path.join(current_app.config['UPLOAD_PATH'], hash + '.mlx')
        filter_file = open(mehlab_filter_filename, "w") 
        filter_file.write(mehlab_filter) 
        filter_file.close()
        
        # Subprocess in combination with PIPE/STDOUT could deadlock
        # be careful with this. Prefer the Python 2.7 version below or
        # maybe switch to using envoy or similar.
        proc = subprocess.Popen([
            current_app.config['MESHLAB_BINARY'], 
            "-i", 
            input_file, 
            "-o",
            input_file, 
            "-s",
            mehlab_filter_filename,
            "-om",
            "ff"
            ],
            env=env, 
            stdout=subprocess.PIPE, 
            stderr=subprocess.STDOUT)
        
        out = proc.communicate()[0]
        returncode = proc.wait()

        logger.info("Meshlab optimization {0}".format(returncode))
        logger.info(out)

        if returncode == 0:
            update_progress("Meshlab successfull")
        else:
            update_progress("Meshlab failed!")
            logger.error("Meshlab problem exit code {0}".format(returncode))

        # Python 2.7
        # try:
        #     check_output([
        #         current_app.config['MESHLAB_BINARY'], 
        #         "-i", 
        #         input_file, 
        #         "-o",
        #         input_file, 
        #         "-s",
        #         mehlab_filter_filename,
        #         "-om",
        #         "ff"
        #         ], env=env)
        #         
        # except CalledProcessError as e:
        #     logger.info("Meshlab problem exit code {0}".format(e.returncode))
        #     logger.error("Meshlab: " + e.output)
            

        # if status == 0:
        #     logger.info("Meshlab optimization {0}".format(status))
        # else:
        #     logger.info("Meshlab problem exit code {0}".format(status))

    else:
       update_progress("Skipping Meshlab optimization")


    update_progress("Starting AOPT conversion")

    status = -100
    aopt_cmd = []

    if aopt == 'restuctedBinGeo':
        update_progress("AOPT restructured binary geo optimization...")

        output_directory_binGeo = os.path.join(output_directory, "binGeo")
        os.mkdir(output_directory_binGeo)

        aopt_cmd = [
            current_app.config['AOPT_BINARY'], 
            '-i', 
            input_file, 
            '-F',
            'Scene:"maxtris(23000)"', 
            '-f',
            'PrimitiveSet:creaseAngle:4',              
            '-f'
            'PrimitiveSet:normalPerVertex:TRUE',
            '-f',
            'PrimitiveSet:optimizationMode:none',
            '-V',
            '-G ',
            'binGeo/:sacp',
            aopt_output_switch, 
            output_filename
        ]

        #aopt -i input.ply -F Scene:"maxtris(23000)" -f PrimitiveSet:creaseAngle:4
        # -f PrimitiveSet:normalPerVertex:TRUE -f PrimitiveSet:optimizationMode:none
        # -V -G binGeo/:sacp -x model.x3d -N model.html
        # status = subprocess.call([
        #     current_app.config['AOPT_BINARY'], 
        #     '-i', 
        #     input_file, 
        #     '-F',
        #     'Scene:"maxtris(23000)"', 
        #     '-f',
        #     'PrimitiveSet:creaseAngle:4',              
        #     '-f'
        #     'PrimitiveSet:normalPerVertex:TRUE',
        #     '-f',
        #     'PrimitiveSet:optimizationMode:none',
        #     '-V',
        #     '-G ',
        #     'binGeo/:sacp',
        #     aopt_output_switch, 
        #     output_filename
        # ])
        
    elif aopt == 'binGeo':
        update_progress("AOPT binary geo optimization...")
        output_directory_binGeo = os.path.join(output_directory, "binGeo")
        os.mkdir(output_directory_binGeo)

        aopt_cmd = [
          current_app.config['AOPT_BINARY'], 
          "-i", 
          input_file, 
          "-G", 
          'binGeo/:saI', 
          aopt_output_switch, 
          output_filename
        ]

        # status = subprocess.call([
        #   current_app.config['AOPT_BINARY'], 
        #   "-i", 
        #   input_file, 
        #   "-G", 
        #   'binGeo/:saI', 
        #   aopt_output_switch, 
        #   output_filename
        # ])

    else:  

        update_progress("AOPT standard conversion in progress...")

        aopt_cmd = [
            current_app.config['AOPT_BINARY'], 
            "-i", 
            input_file, 
            aopt_output_switch, 
            output_filename
        ]


    
    try:
    
        update_progress("Running AOPT")
        status = subprocess.call(aopt_cmd)
    
    except OSError:
        update_progress("Failure to execute AOPT")
        err_msg = "Error: AOPT not found or not executable {0}".format(repr(aopt_cmd))
        logger.error(err_msg)
        raise ConversionError(err_msg)


    if status < 0:
        # FIXME error handling and cleanup (breaking early is good but
        # cleanup calls for try/catch/finally or contextmanager)
        os.chdir(working_directory)

        # fixme: put this in exception code
        update_progress("Error on conversion process")
        logger.error("Error converting file!!!!!!!!!!")
        raise ConversionError('AOPT RETURNS: {0}'.format(status))
    
    else:
    
        update_progress("Assembling deliverable...")
        zip_path = os.path.join(download_path, hash)
        compression.zipdir(zip_path, '%s.zip' % hash)
        os.chdir(working_directory)
    
    if not current_app.config['DEBUG']:
        # delete the uploaded file
        update_progress("Cleaning up...")
        # todo remove upload directory
        os.remove(input_file)
    
    # import time
    # time.sleep(10)
    result_set = dict(
        hash = hash,
        input_file = input_file,
    )
    return result_set