def process(cls, request): # Create a temporary directory for file uploads. if not os.path.exists(cls.dtemp): cls.dtemp = tempfile.mkdtemp() # request.POST['filename'] = the client-side filename. # request.FILES['file'] = the name of a part. # These will be equal for small files only. filename = request.POST['filename'] fullpath = os.path.join(cls.dtemp, filename) chunks = int(request.POST.get('chunks', 1)) chunk = int(request.POST.get('chunk', 0)) # Start a new file or append the next chunk. # NB: Django manages its own chunking. with open(fullpath, 'wb' if chunk == 0 else 'ab') as f: for b in request.FILES['file'].chunks(): f.write(b) # On successful parsing, store the uploaded file in its permanent # location. Some information will be stored into the database as # well for convenience and performance. Roll back on any error. if chunk == chunks - 1: with transaction.commit_on_success(): objects = [] parsers.parse(fullpath, objects) f = open(fullpath) upload = Upload() upload.the_file.save(filename, File(f)) f.close() for o in objects: o.upload = upload o.save() # Exactly one PUT in a SUFRIB must have been designated # as the sink. This is better handled by the parser? if upload.suffix.lower() == ".rib": try: Put.objects.filter(upload=upload).get(_CAR='Xs') except: msg = ("Precies 1 put in %s moet als sink zijn " + "gedefinieerd (CAR=Xs).") % upload.filename logger.error(msg) # Delete from filesystem. upload.the_file.delete() # Trigger rollback. raise Exception(msg) # Update the lost capacity percentages asynchronously tasks.compute_lost_capacity_async()
def _init_pool_and_graph(self): """Parse the file into a pool dict, and cache it.""" pool_cache_key = "pool_%d" % self.uploaded_file_id pool = cache.get(pool_cache_key, {}) graph_cache_key = "graph_%d" % self.uploaded_file_id graph = cache.get(graph_cache_key, {}) if not pool or not graph: parsers.parse(self.rmb_file.full_path, pool) graph = networkx.Graph() parsers.convert_to_graph(pool, graph) parsers.correct_z_values(pool) return pool, graph