def clipMesh(mesh_to_cut, clip_mesh, progress_callback=NullCallback()): return_mesh = mesh.Mesh() clip_polygons = _getMeshClipPolygons(clip_mesh) add_vertex_time = 0 clip_time = 0 number_of_faces = len(mesh_to_cut.getFaces()) for i, face in enumerate(mesh_to_cut.getFaces()): progress_callback( Progress(progress=i, message="Cutting mesh", max_progress=number_of_faces)) mesh_vertices = [ mesh_to_cut.getVertex(index).position for index in face ] polygon_to_clip = clip_polygon.Polygon(mesh_vertices) for clipper_polygon in clip_polygons: clip_time_start = time.time() clipped_polygon = clipper_polygon.clip(polygon_to_clip) clip_time += time.time() - clip_time_start if len(clipped_polygon.points) > 0: add_vertex_time_start = time.time() return_mesh.addFaceFromVertices([ mesh.MeshVertex(np.array(point)) for point in clipped_polygon.points ]) add_vertex_time += time.time() - add_vertex_time_start progress_callback( Progress(progress=1.0, message="Removing duplicate vertices")) return_mesh.removeDuplicates() return return_mesh
def createFromRaster(self, raster_lookup, geo_transform, heightmap_size, progress_callback=NullCallback()): pixel_count = heightmap_size[0] * heightmap_size[1] self.pixels = [0 for i in range(pixel_count)] for y in range(heightmap_size[1]): for x in range(heightmap_size[0]): geo_pos = geo_transform.transformPixelLocationToGeoLocation( x, y) pixel_index = x + y * heightmap_size[0] if raster_lookup.locationInBounds(geo_pos[0], geo_pos[1]): elevation = raster_lookup.getElevationAtPosition( geo_pos[0], geo_pos[1]) if elevation is not None: self.pixels[pixel_index] = elevation else: self.pixels[pixel_index] = self.nodata_fillin self.nodata_count += 1 else: self.out_of_bounds_count += 1 progress_callback( Progress( progress=pixel_index + 1, message="Creating heightmap", max_progress=heightmap_size[0] * heightmap_size[1], )) raster_matrix = np.array(self.pixels).reshape(heightmap_size) self.heightmap = raster_matrix return self
def displaceMesh(self, mesh, displacement_map, progress_callback=NullCallback()): self.mesh = mesh self.displacement_map = displacement_map for i, vertex in enumerate(mesh.vertices): progress_callback( Progress(i + 1, message='Displacing vertex', max_progress=len(mesh.vertices))) self.__displaceVertex(vertex)
def train(args): option = default_option() # predefined model names pathname, basename = os.path.split(args.model) modelname = get_filename(basename) autoname_format = os.path.join(pathname, modelname + ".iter{epoch}-{batch}.pkl") bestname = os.path.join(pathname, modelname + ".best.pkl") # load models if os.path.exists(args.model): opt, params = load_model(args.model) override(option, opt) init = False else: init = True if args.initialize: pretrain_params = load_model(args.initialize) pretrain_params = pretrain_params[1] pretrain = True else: pretrain = False override(option, args_to_dict(args)) # check external validation script ext_val_script = option['ext_val_script'] if not os.path.exists(ext_val_script): raise ValueError('File doesn\'t exist: %s' % ext_val_script) elif not os.access(ext_val_script, os.X_OK): raise ValueError('File is not executable: %s' % ext_val_script) # check references format ref_stem = None if option['validation'] and option['references']: ref_stem = misc.infer_ref_stem([option['validation']], option['references']) ref_stem = ref_stem[0] # .yaml for ultimate options yaml_name = "%s.settings.yaml" % modelname if init or not os.path.exists(yaml_name): with open(yaml_name, "w") as w: _opt = args.__dict__.copy() for k, v in _opt.iteritems(): if k in option: _opt[k] = option[k] yaml.dump(_opt, w, default_flow_style=False) del _opt print_option(option) # reader batch = option["batch"] sortk = option["sort"] shuffle = option["shuffle"] reader = textreader(option["corpus"], shuffle) processor = [data_length, data_length] stream = textiterator(reader, [batch, batch * sortk], processor, option["limit"], option["sort"]) # progress # initialize before building model progress = Progress(option["delay_val"], stream, option["seed"]) # create model regularizer = [] if option["l1_scale"]: regularizer.append(ops.l1_regularizer(option["l1_scale"])) if option["l2_scale"]: regularizer.append(ops.l2_regularizer(option["l2_scale"])) scale = option["scale"] initializer = ops.random_uniform_initializer(-scale, scale) regularizer = ops.sum_regularizer(regularizer) option["scope"] = "rnnsearch" model = build_model(initializer=initializer, regularizer=regularizer, **option) variables = None if pretrain: print "using pretrain" _pp1 = {} for name, val in pretrain_params: names = name.split('/')[1:] if "embedding" in names[0]: _pp1['/'.join(names)] = val else: _pp1['/'.join(names[1:])] = val matched = [] not_matched = [] for var in ops.trainable_variables(): names = var.name.split('/')[1:] if "decoder2" in var.name: not_matched.append((var.name, var.get_value().size)) continue if "embedding" in names[0]: match_name = '/'.join(names) var.set_value(_pp1[match_name]) else: match_name = '/'.join(names[1:]) var.set_value(_pp1[match_name]) matched.append((var.name, var.get_value().size)) print "------------------- matched -------------------" for name, size in matched: print name, size print "------------------- not matched -------------------" for name, size in not_matched: print name, size print "------------------- end -------------------\n" if not init: set_variables(ops.trainable_variables(), params) print "parameters: %d\n" % count_parameters(ops.trainable_variables()) # tuning option tune_opt = {} tune_opt["algorithm"] = option["optimizer"] tune_opt["constraint"] = ("norm", option["norm"]) tune_opt["norm"] = True tune_opt["variables"] = variables # create optimizer scopes = [".*"] trainer = optimizer(model.inputs, model.outputs, model.cost, scopes, **tune_opt) # vocabulary and special symbol svocabs, tvocabs = option["vocabulary"] svocab, isvocab = svocabs tvocab, itvocab = tvocabs unk_sym = option["unk"] eos_sym = option["eos"] alpha = option["alpha"] maxepoch = option["maxepoch"] # restore right before training to avoid randomness changing when trying to resume progress if not args.reset: if "#progress" in option: print 'Restore progress >>' progress = (option["#progress"]) stream = progress.iterator stream.set_processor(processor) for ttt in progress.task_manager.tasks: ttt.status = 4 ttt.result = 0.0 else: print 'New progress >>' else: print 'Discard progress >>' # setup progress progress.oldname = args.model progress.serializer = serialize stream = progress.iterator overwrite = not args.no_overwrite if progress.task_manager: print progress.task_manager try: while progress.epoch < maxepoch: epc = progress.epoch for data in stream: progress.tic() if progress.failed(): raise RuntimeError("progress failure") xdata, xmask = convert_data(data[0], svocab, unk_sym, eos_sym) ydata, ymask = convert_data(data[1], tvocab, unk_sym, eos_sym) bydata, _ = convert_data(data[1], tvocab, unk_sym, eos_sym, True) t1 = time.time() tot_cost, soft_cost, true_cost, norm = trainer.optimize( xdata, xmask, ydata, ymask, bydata) trainer.update(alpha=alpha) t2 = time.time() # per word cost w_cost = true_cost * ymask.shape[1] / ymask.sum() progress.batch_count += 1 progress.batch_total += 1 progress.loss_hist.append(w_cost) count = progress.batch_count if not args.pfreq or count % args.pfreq == 0: print epc + 1, progress.batch_count, w_cost, tot_cost, soft_cost, true_cost, norm, t2 - t1 if count % option["vfreq"] == 0 and not should_skip_val( args.skip_val, option["vfreq"], epc, progress.batch_total): if option["validation"] and option["references"]: progress.add_valid(option['scope'], option['validation'], ref_stem, ext_val_script, __file__, option, modelname, bestname, serialize) # save after validation progress.toc() if count % option["freq"] == 0: progress.save(option, autoname_format, overwrite) progress.tic() if count % option["sfreq"] == 0: n = len(data[0]) ind = numpy.random.randint(0, n) sdata = data[0][ind] tdata = data[1][ind] xdata = xdata[:, ind:ind + 1] xmask = xmask[:, ind:ind + 1] hls = beamsearch(model, xdata, xmask) best, score = hls[0] print "--", sdata print "--", tdata print "--", " ".join(best[:-1]) progress.toc() print "--------------------------------------------------" progress.tic() if option["validation"] and option["references"]: progress.add_valid(option['scope'], option['validation'], ref_stem, ext_val_script, __file__, option, modelname, bestname, serialize) print "--------------------------------------------------" progress.toc() # early stopping if epc + 1 >= option["stop"]: alpha = alpha * option["decay"] stream.reset() progress.epoch += 1 progress.batch_count = 0 # update autosave option["alpha"] = alpha progress.save(option, autoname_format, overwrite) stream.close() progress.tic() print "syncing ..." progress.barrier() # hangup and wait progress.toc() best_valid = max(progress.valid_hist, key=lambda item: item[1]) (epc, count), score = best_valid print "best bleu {}-{}: {:.4f}".format(epc + 1, count, score) if progress.delay_val: task_elapse = sum( [task.elapse for task in progress.task_manager.tasks]) print "training finished in {}({})".format( datetime.timedelta(seconds=int(progress.elapse)), datetime.timedelta(seconds=int(progress.elapse + task_elapse))) else: print "training finished in {}".format( datetime.timedelta(seconds=int(progress.elapse))) progress.save(option, autoname_format, overwrite) except KeyboardInterrupt: traceback.print_exc() progress.terminate() sys.exit(1) except Exception: traceback.print_exc() progress.terminate() sys.exit(1)
def train(args): option = default_option() # predefined model names pathname, basename = os.path.split(args.model) modelname = get_filename(basename) autoname_format = os.path.join(pathname, modelname + ".iter{epoch}-{batch}.pkl") bestname = os.path.join(pathname, modelname + ".best.pkl") # load models if os.path.exists(args.model): opt, params = load_model(args.model) override(option, opt) init = False else: init = True if args.initialize: print "initialize:", args.initialize pretrain_params = load_model(args.initialize) pretrain_params = pretrain_params[1] pretrain = True else: pretrain = False override(option, args_to_dict(args)) # check external validation script ext_val_script = option['ext_val_script'] if not os.path.exists(ext_val_script): raise ValueError('File doesn\'t exist: %s' % ext_val_script) elif not os.access(ext_val_script, os.X_OK): raise ValueError('File is not executable: %s' % ext_val_script) # check references format ref_stem = option['references'] if option['validation'] and option['references']: ref_stem = misc.infer_ref_stem([option['validation']], option['references']) ref_stem = ref_stem[0] # .yaml for ultimate options yaml_name = "%s.settings.yaml" % modelname if init or not os.path.exists(yaml_name): with open(yaml_name, "w") as w: _opt = args.__dict__.copy() for k, v in _opt.iteritems(): if k in option: _opt[k] = option[k] yaml.dump(_opt, w, default_flow_style=False) del _opt print_option(option) # reader batch = option["batch"] sortk = option["sort"] shuffle = option["shuffle"] reader = textreader(option["corpus"][:3], shuffle) processor = [data_length, data_length, data_length] stream = textiterator(reader, [batch, batch * sortk], processor, option["limit"], option["sort"]) reader = textreader(option["corpus"][3:], shuffle) processor = [data_length, data_length, data_length] dstream = textiterator(reader, [batch, batch * sortk], processor, None, option["sort"]) # progress # initialize before building model progress = Progress(option["delay_val"], stream, option["seed"]) # create model regularizer = [] if option["l1_scale"]: regularizer.append(ops.l1_regularizer(option["l1_scale"])) if option["l2_scale"]: regularizer.append(ops.l2_regularizer(option["l2_scale"])) scale = option["scale"] initializer = ops.random_uniform_initializer(-scale, scale) regularizer = ops.sum_regularizer(regularizer) option["scope"] = "rnnsearch" model = build_model(initializer=initializer, regularizer=regularizer, **option) variables = None if pretrain: matched, not_matched = match_variables(ops.trainable_variables(), pretrain_params) if args.finetune: variables = not_matched if not variables: raise RuntimeError("no variables to finetune") if pretrain: restore_variables(matched, not_matched) if not init: set_variables(ops.trainable_variables(), params) print "parameters: %d\n" % count_parameters(ops.trainable_variables()) # tuning option tune_opt = {} tune_opt["algorithm"] = option["optimizer"] tune_opt["constraint"] = ("norm", option["norm"]) tune_opt["norm"] = True tune_opt["variables"] = variables # create optimizer scopes = ["((?!Shared).)*$"] trainer = optimizer(model.inputs, model.outputs, model.cost, scopes, **tune_opt) clascopes = [".*(Shared).*"] clatrainer = optimizer(model.inputs_cla, model.outputs_cla, model.cost_cla, clascopes, **tune_opt) #scopes = [".*(DSAenc).*"] #domain_trainer = optimizer(model.inputs, model.toutputs, model.domaincost, scopes, **tune_opt) # vocabulary and special symbol svocabs, tvocabs = option["vocabulary"] svocab, isvocab = svocabs tvocab, itvocab = tvocabs unk_sym = option["unk"] eos_sym = option["eos"] alpha = option["alpha"] maxepoch = option["maxepoch"] # restore right before training to avoid randomness changing when trying to resume progress if not args.reset: if "#progress" in option: print 'Restore progress >>' progress = (option["#progress"]) stream = progress.iterator stream.set_processor(processor) else: print 'New progress >>' else: print 'Discard progress >>' if args.drop_tasks: print 'drop tasks' progress.drop_tasks() # setup progress progress.oldname = args.model progress.serializer = serialize stream = progress.iterator overwrite = not args.no_overwrite if progress.task_manager: print progress.task_manager register_killer() tagvocab = {} for idx, d in enumerate(option["dvocab"]): tagvocab[d] = idx if len(tagvocab) != option["dnum"]: raise ValueError('length of domain vocab %f not equal to domain num %f!' % (len(tagvocab), option["dnum"])) try: while progress.epoch < maxepoch: epc = progress.epoch for data in stream: progress.tic() if progress.failed(): raise RuntimeError("progress failure") # data = _stream.next() xdata, xmask = convert_data(data[0], svocab, unk_sym, eos_sym) ydata, ymask = convert_data(data[1], tvocab, unk_sym, eos_sym) tag = convert_tag(data[2], tagvocab) t1 = time.time() cost, dcost, scost, tdcost, norm = trainer.optimize(xdata, xmask, ydata, ymask, tag) clacost, _ = clatrainer.optimize(xdata, xmask, tag) trainer.update(alpha=alpha) clatrainer.update(alpha=alpha) t2 = time.time() # per word cost w_cost = cost * ymask.shape[1] / ymask.sum() progress.batch_count += 1 progress.batch_total += 1 progress.loss_hist.append(w_cost) if not args.pfreq or count % args.pfreq == 0: print epc + 1, progress.batch_count, w_cost, dcost, tdcost, scost, clacost, norm, t2 - t1 count = progress.batch_count if count % option["sfreq"] == 0: dright = 0.0 sright = 0.0 tdright = 0.0 total = 0.0 for ddata in dstream: txdata, txmask = convert_data(ddata[0], svocab, unk_sym, eos_sym) tydata, tymask = convert_data(ddata[1], tvocab, unk_sym, eos_sym) txtag = convert_tag(ddata[2], tagvocab) dtag_pred, stag_pred = model.tag_predict(txdata, txmask) txtag = txtag[0] dpretag = [] for i in dtag_pred: dpretag.append(int(i)) spretag = [] for i in stag_pred: spretag.append(int(i)) tdtag_pred = model.tgt_tag_predict(txdata, txmask, tydata, tymask) tdpretag = [] for i in tdtag_pred[0]: tdpretag.append(int(i)) dright = dright + sum([m == n for m, n in zip(txtag, dpretag)]) sright = sright + sum([m == n for m, n in zip(txtag, spretag)]) tdright = tdright + sum([m == n for m, n in zip(txtag, tdpretag)]) total = total + len(dpretag) dstream.reset() dacc = dright * 1.0 / total sacc = sright * 1.0 / total tdacc = tdright * 1.0 / total print "dacc:", dright, dacc print "sacc", sright, sacc print "tdacc", tdright, tdacc if count % option["vfreq"] == 0 and not should_skip_val(args.skip_val, option["vfreq"], epc, progress.batch_total): if option["validation"] and option["references"]: progress.add_valid(option['scope'], option['validation'], ref_stem, ext_val_script, __file__, option, modelname, bestname, serialize) # save after validation progress.toc() if count % option["freq"] == 0: progress.save(option, autoname_format, overwrite) progress.tic() if count % option["sfreq"] == 0: n = len(data[0]) ind = numpy.random.randint(0, n) sdata = data[0][ind] tdata = data[1][ind] xdata = xdata[:, ind: ind + 1] xmask = xmask[:, ind: ind + 1] hls = beamsearch(model, xdata, xmask) best, score = hls[0] print "--", sdata print "--", tdata print "--", " ".join(best[:-1]) progress.toc() print "--------------------------------------------------" progress.tic() if option["validation"] and option["references"]: progress.add_valid(option['scope'], option['validation'], ref_stem, ext_val_script, __file__, option, modelname, bestname, serialize) print "--------------------------------------------------" progress.toc() print "epoch cost {}".format(numpy.mean(progress.loss_hist)) progress.loss_hist = [] # early stopping if epc + 1 >= option["stop"]: alpha = alpha * option["decay"] stream.reset() progress.epoch += 1 progress.batch_count = 0 # update autosave option["alpha"] = alpha progress.save(option, autoname_format, overwrite) stream.close() progress.tic() print "syncing ..." progress.barrier() # hangup and wait progress.toc() best_valid = max(progress.valid_hist, key=lambda item: item[1]) (epc, count), score = best_valid print "best bleu {}-{}: {:.4f}".format(epc + 1, count, score) if progress.delay_val: task_elapse = sum([task.elapse for task in progress.task_manager.tasks]) print "training finished in {}({})".format(datetime.timedelta(seconds=int(progress.elapse)), datetime.timedelta(seconds=int(progress.elapse + task_elapse))) else: print "training finished in {}".format(datetime.timedelta(seconds=int(progress.elapse))) progress.save(option, autoname_format, overwrite) except KeyboardInterrupt: traceback.print_exc() progress.terminate() sys.exit(1) except Exception: traceback.print_exc() progress.terminate() sys.exit(1)
def add_routes(config): """ Called once per thread start, in order to call :func:`solute.epfl.core.epflcomponentbase.ComponentBase.add_pyramid_routes` for every component provided by epfl through this package. """ Box.add_pyramid_routes(config) LoginBox.add_pyramid_routes(config) ModalBox.add_pyramid_routes(config) TabsLayout.add_pyramid_routes(config) NavLayout.add_pyramid_routes(config) ColLayout.add_pyramid_routes(config) CardinalLayout.add_pyramid_routes(config) Link.add_pyramid_routes(config) RecursiveTree.add_pyramid_routes(config) ListLayout.add_pyramid_routes(config) PrettyListLayout.add_pyramid_routes(config) PaginatedListLayout.add_pyramid_routes(config) LinkListLayout.add_pyramid_routes(config) GroupedLinkListLayout.add_pyramid_routes(config) HoverLinkListLayout.add_pyramid_routes(config) ContextListLayout.add_pyramid_routes(config) TableLayout.add_pyramid_routes(config) SelectableList.add_pyramid_routes(config) TypeAhead.add_pyramid_routes(config) Form.add_pyramid_routes(config) Button.add_pyramid_routes(config) TextInput.add_pyramid_routes(config) TextEditor.add_pyramid_routes(config) CodeEditor.add_pyramid_routes(config) NumberInput.add_pyramid_routes(config) Textarea.add_pyramid_routes(config) Radio.add_pyramid_routes(config) ButtonRadio.add_pyramid_routes(config) Toggle.add_pyramid_routes(config) SimpleToggle.add_pyramid_routes(config) Checkbox.add_pyramid_routes(config) Select.add_pyramid_routes(config) Upload.add_pyramid_routes(config) Download.add_pyramid_routes(config) ColorPicker.add_pyramid_routes(config) ColorThief.add_pyramid_routes(config) DatetimeInput.add_pyramid_routes(config) AutoCompleteInput.add_pyramid_routes(config) PasswordInput.add_pyramid_routes(config) Badge.add_pyramid_routes(config) Diagram.add_pyramid_routes(config) Progress.add_pyramid_routes(config) StackedProgress.add_pyramid_routes(config) Image.add_pyramid_routes(config) Text.add_pyramid_routes(config) Placeholder.add_pyramid_routes(config) PlainHtml.add_pyramid_routes(config) Breadcrumb.add_pyramid_routes(config) Carousel.add_pyramid_routes(config) Popover.add_pyramid_routes(config) TextList.add_pyramid_routes(config) Dropdown.add_pyramid_routes(config) EmbeddedVideo.add_pyramid_routes(config)
def createTileFromTileConfig(tile_config): modifiers_set = modifiers.modifiers_loader.setupModifiersFromParameters(tile_config.modifiers) progress_printer = progress.default_printer.Printer() progress_printer(Progress(0.0, "Creating grid mesh " + str(tile_config.mesh_resolution_x) + "x" + str(tile_config.mesh_resolution_y))) grid_mesh = grid.generate((tile_config.mesh_resolution_x, tile_config.mesh_resolution_y)) progress_printer(Progress(0.05, "Creating cut shape (default hexagon)")) tile_shape = getHexagon() mesh_clipper_callback = progress.callback.Callback(progress_printer, start_at=0.05, end_at=0.5, message='Carving tile shape') clipped_mesh = clipMesh(grid_mesh, tile_shape, mesh_clipper_callback) progress_printer(Progress(0.51, "Computing geo transform")) geo_transform = geo_utils.computeGdalGeoTransformFrom2Points( tile_config.geo_top_left, tile_config.geo_top_right, (1, 1)) progress_printer(Progress(0.55, "Loading geotiff files")) raster_files = geotiff_raster.createRastersFromFiles(findGeoTiffFiles(tile_config.tiff_directory)) heightmap_raster = raster_lookup.MultiGeoRaster(raster_files) elevation_multiplier = tile_config.elevation_multiplier * (1 / tile_config.geo_distance) displacement_lookup = HeightmapDisplaceLookup( heightmap_raster, geo_transform, elevation_multiplier) mesh_displace_callback = progress.callback.Callback(progress_printer, start_at=0.60, end_at=0.89, message='Computing tile displacement') MeshDisplace().displaceMesh(clipped_mesh, displacement_lookup, mesh_displace_callback) progress_printer(Progress(0.501, "Creating uv coordinates")) UVProjector().topViewProject(clipped_mesh) progress_printer(Progress(0.8999, 'Creating texture')) input_texture_image = Image.open(tile_config.texture_path) output_texture_image = Image.new('RGB', (tile_config.texture_resolution_x, tile_config.texture_resolution_y)) input_image_geo_transform = geo_utils.computeGdalGeoTransformFrom2Points(tile_config.texture_geo_top_left, tile_config.texture_geo_top_right, input_texture_image.size) output_texture_geo_transform = geo_utils.computeGdalGeoTransformFrom2Points(tile_config.geo_top_left, tile_config.geo_top_right, output_texture_image.size) output_texture = image_raster.ImageRaster(output_texture_image, output_texture_geo_transform) input_texture = image_raster.ImageRaster(input_texture_image, input_image_geo_transform) image_maker = image_creator.ImageCreator(input_texture, output_texture) progress_printer(Progress(0.8999, 'Creating image')) image_maker.createImage() output_texture_image.save(tile_config.texture_result_path) progress_printer(Progress(0.9, 'Scaling mesh')) clipped_mesh.scale(tile_config.size) progress_printer(Progress(0.92, 'Applying modifiers')) modifiers_set.apply_modifiers_to_mesh(clipped_mesh) progress_printer(Progress(0.93, 'Extruding tile edges')) extruder = EdgeExtruder(thickness=tile_config.tile_thickness) extruder.extrudeMesh(clipped_mesh) progress_printer(Progress(0.97, 'Converting to obj')) obj_exporter = obj_mesh.ObjMesh(mesh=clipped_mesh, object_name='Heightmap tile', texture_path=tile_config.texture_result_path) obj_exporter.save(tile_config.output_path) progress_printer.finish()