def retrival(self): ###########UI########### size = bimpy.Vec2(500, 750) bimpy.set_next_window_size(size, bimpy.Condition.Once) if bimpy.begin_popup_modal('{}: {}'.format(LANG.retrieve, self.select_label)) \ and self.select_label != '' and self.first_init: for idx, file in enumerate( self.r.label2pic[self.select_label.lower()]): if idx != 0: bimpy.separator() img = self.r.get_thumbnail(file) bimpy.text(file) bimpy.image(img) bimpy.separator() if bimpy.button(LANG.retrieve_close): bimpy.clode_current_popup() bimpy.end_popup() ######################## t = { 'x': bimpy.get_window_pos().x, 'y': bimpy.get_window_pos().y, 'w': bimpy.get_window_size().x, 'h': bimpy.get_window_size().y, 'self': self, } return t
sel_str = "" img = None fbo = glGenFramebuffers(1) _converted = glGenTextures(1) while not ctx.should_close(): ctx.new_frame() try: tex1 = make_tex(val1) server.publish_frame_texture(tex1, syphonpy.MakeRect(0, 0, 640, 480), syphonpy.MakeSize(640, 480), False) val1 += 1 bimpy.image(tex1, bimpy.Vec2(640, 480)) bimpy.text(str(_texture)) bimpy.text(server.context()) pass except Exception as e: bimpy.text(str(e)) bimpy.begin("Client") if bimpy.begin_combo("Source", sel_str): lst = syphonpy.ServerDirectory.servers() for k in lst: if bimpy.selectable(k.name + " : " + k.app_name): client = syphonpy.SyphonClient(k) sel_str = k.name + " : " + k.app_name
c = b.Context() c.init(1200, 1200, "bimpy test") img = Image.new( "RGBA", (512, 512), ) px = img.load() for x in range(512): for y in range(512): r = int(255.0 * float(x) / 512.0) g = int(255.0 * float(y) / 512.0) px[x, y] = (r, g, max(255 - r - g, 0), 255) b_img = b.Image(img) b_f1 = b.Float() b_f2 = b.Float() b_f3 = b.Float() while not c.should_close(): with c: b.text("hi") if b.button("cat"): print("mew") b.input_float("float1", b_f1, 0.0, 1.0) b.image(b_img) b.slider_float3("float", b_f1, b_f2, b_f3, 0.0, 1.0)
bimpy.same_line() if bimpy.button(" Next > (x) ") or bimpy.is_key_released( ord('X')) or is_autoplay.value: b_i.value += 1 if display_frame != b_i.value: simulate_to_frame(b_i.value) bimpy.combo('Label used for annotation', current_label_idx, all_labels) img_display_w = bimpy.get_window_content_region_width() img_display_h = img_display_w * video_h / video_w # Get the zero coordinate of the actual image rendering img_display_zero = bimpy.get_cursor_pos() + bimpy.get_window_pos() bimpy.image(img_view, bimpy.Vec2(img_display_w, img_display_h)) # Logic for deleting rects. Click an active rect to make inactive, click to delete. delete_rect = None for key, r in rect_db[display_frame].items(): color = 0xFF00FF00 if is_rect_active(r) else 0xFFFFFF00 x1, y1, x2, y2 = ui_get_screen_rect(r.x1, r.y1, r.x2, r.y2) if bimpy.is_mouse_hovering_rect(bimpy.Vec2(x1, y1), bimpy.Vec2(x2, y2), 0): color = 0xFFFFFF00 if is_rect_active(r) else 0xFF0000FF is_placing_rect = False bimpy.begin_tooltip() bimpy.text(r.label) bimpy.text('Idx: %d' % r.idx) bimpy.end_tooltip()
def sample(cfg, logger): model = Model( startf=cfg.MODEL.START_CHANNEL_COUNT, layer_count= cfg.MODEL.LAYER_COUNT, maxf=cfg.MODEL.MAX_CHANNEL_COUNT, latent_size=cfg.MODEL.LATENT_SPACE_SIZE, truncation_psi=cfg.MODEL.TRUNCATIOM_PSI, truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF, mapping_layers=cfg.MODEL.MAPPING_LAYERS, channels=3) model.eval() logger.info("Trainable parameters generator:") count_parameters(model.generator) model_dict = { 'generator_s': model.generator, 'mapping_fl_s': model.mapping, 'dlatent_avg': model.dlatent_avg, } checkpointer = Checkpointer(cfg, model_dict, logger=logger, save=True) checkpointer.load() ctx = bimpy.Context() remove = bimpy.Bool(False) layers = bimpy.Int(8) ctx.init(1800, 1600, "Styles") rnd = np.random.RandomState(5) latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) sample = torch.tensor(latents).float().cuda() def update_image(sample): with torch.no_grad(): torch.manual_seed(0) model.eval() x_rec = model.generate(layers.value, remove.value, z=sample) #model.generator.set(l.value, c.value) resultsample = ((x_rec * 0.5 + 0.5) * 255).type(torch.long).clamp(0, 255) resultsample = resultsample.cpu()[0, :, :, :] return resultsample.type(torch.uint8).transpose(0, 2).transpose(0, 1) with torch.no_grad(): save_image(model.generate(8, True, z=sample) * 0.5 + 0.5, 'sample.png') im = bimpy.Image(update_image(sample)) while(not ctx.should_close()): with ctx: bimpy.set_window_font_scale(2.0) if bimpy.checkbox('REMOVE BLOB', remove): im = bimpy.Image(update_image(sample)) if bimpy.button('NEXT'): latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) sample = torch.tensor(latents).float().cuda() im = bimpy.Image(update_image(sample)) if bimpy.slider_int("Layers", layers, 0, 8): im = bimpy.Image(update_image(sample)) bimpy.image(im, bimpy.Vec2(1024, 1024))
def sample(cfg, logger): torch.cuda.set_device(0) model = Model(startf=cfg.MODEL.START_CHANNEL_COUNT, layer_count=cfg.MODEL.LAYER_COUNT, maxf=cfg.MODEL.MAX_CHANNEL_COUNT, latent_size=cfg.MODEL.LATENT_SPACE_SIZE, truncation_psi=cfg.MODEL.TRUNCATIOM_PSI, truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF, mapping_layers=cfg.MODEL.MAPPING_LAYERS, channels=cfg.MODEL.CHANNELS, generator=cfg.MODEL.GENERATOR, encoder=cfg.MODEL.ENCODER) model.cuda(0) model.eval() model.requires_grad_(False) decoder = model.decoder encoder = model.encoder mapping_tl = model.mapping_d mapping_fl = model.mapping_f dlatent_avg = model.dlatent_avg logger.info("Trainable parameters generator:") count_parameters(decoder) logger.info("Trainable parameters discriminator:") count_parameters(encoder) arguments = dict() arguments["iteration"] = 0 model_dict = { 'discriminator_s': encoder, 'generator_s': decoder, 'mapping_tl_s': mapping_tl, 'mapping_fl_s': mapping_fl, 'dlatent_avg': dlatent_avg } checkpointer = Checkpointer(cfg, model_dict, {}, logger=logger, save=False) extra_checkpoint_data = checkpointer.load() model.eval() layer_count = cfg.MODEL.LAYER_COUNT def encode(x): Z, _ = model.encode(x, layer_count - 1, 1) Z = Z.repeat(1, model.mapping_f.num_layers, 1) return Z def decode(x): layer_idx = torch.arange(2 * layer_count)[np.newaxis, :, np.newaxis] ones = torch.ones(layer_idx.shape, dtype=torch.float32) coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones) # x = torch.lerp(model.dlatent_avg.buff.data, x, coefs) return model.decoder(x, layer_count - 1, 1, noise=True) path = 'dataset_samples/faces/realign1024x1024' paths = list(os.listdir(path)) paths.sort() paths_backup = paths[:] randomize = bimpy.Bool(True) current_file = bimpy.String("") ctx = bimpy.Context() attribute_values = [bimpy.Float(0) for i in indices] W = [ torch.tensor(np.load("principal_directions/direction_%d.npy" % i), dtype=torch.float32) for i in indices ] rnd = np.random.RandomState(5) def loadNext(): img = np.asarray(Image.open(path + '/' + paths[0])) current_file.value = paths[0] paths.pop(0) if len(paths) == 0: paths.extend(paths_backup) if img.shape[2] == 4: img = img[:, :, :3] im = img.transpose((2, 0, 1)) x = torch.tensor(np.asarray(im, dtype=np.float32), device='cpu', requires_grad=True).cuda() / 127.5 - 1. if x.shape[0] == 4: x = x[:3] needed_resolution = model.decoder.layer_to_resolution[-1] while x.shape[2] > needed_resolution: x = F.avg_pool2d(x, 2, 2) if x.shape[2] != needed_resolution: x = F.adaptive_avg_pool2d(x, (needed_resolution, needed_resolution)) img_src = ((x * 0.5 + 0.5) * 255).type(torch.long).clamp( 0, 255).cpu().type(torch.uint8).transpose(0, 2).transpose(0, 1).numpy() latents_original = encode(x[None, ...].cuda()) latents = latents_original[0, 0].clone() latents -= model.dlatent_avg.buff.data[0] for v, w in zip(attribute_values, W): v.value = (latents * w).sum() for v, w in zip(attribute_values, W): latents = latents - v.value * w return latents, latents_original, img_src def loadRandom(): latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) lat = torch.tensor(latents).float().cuda() dlat = mapping_fl(lat) layer_idx = torch.arange(2 * layer_count)[np.newaxis, :, np.newaxis] ones = torch.ones(layer_idx.shape, dtype=torch.float32) coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones) dlat = torch.lerp(model.dlatent_avg.buff.data, dlat, coefs) x = decode(dlat)[0] img_src = ((x * 0.5 + 0.5) * 255).type(torch.long).clamp( 0, 255).cpu().type(torch.uint8).transpose(0, 2).transpose(0, 1).numpy() latents_original = dlat latents = latents_original[0, 0].clone() latents -= model.dlatent_avg.buff.data[0] for v, w in zip(attribute_values, W): v.value = (latents * w).sum() for v, w in zip(attribute_values, W): latents = latents - v.value * w return latents, latents_original, img_src latents, latents_original, img_src = loadNext() ctx.init(1800, 1600, "Styles") def update_image(w, latents_original): with torch.no_grad(): w = w + model.dlatent_avg.buff.data[0] w = w[None, None, ...].repeat(1, model.mapping_f.num_layers, 1) layer_idx = torch.arange(model.mapping_f.num_layers)[np.newaxis, :, np.newaxis] cur_layers = (7 + 1) * 2 mixing_cutoff = cur_layers styles = torch.where(layer_idx < mixing_cutoff, w, latents_original) x_rec = decode(styles) resultsample = ((x_rec * 0.5 + 0.5) * 255).type(torch.long).clamp( 0, 255) resultsample = resultsample.cpu()[0, :, :, :] return resultsample.type(torch.uint8).transpose(0, 2).transpose(0, 1) im_size = 2**(cfg.MODEL.LAYER_COUNT + 1) im = update_image(latents, latents_original) print(im.shape) im = bimpy.Image(im) display_original = True seed = 0 while not ctx.should_close(): with ctx: new_latents = latents + sum( [v.value * w for v, w in zip(attribute_values, W)]) if display_original: im = bimpy.Image(img_src) else: im = bimpy.Image(update_image(new_latents, latents_original)) bimpy.begin("Principal directions") bimpy.columns(2) bimpy.set_column_width(0, im_size + 20) bimpy.image(im) bimpy.next_column() for v, label in zip(attribute_values, labels): bimpy.slider_float(label, v, -40.0, 40.0) bimpy.checkbox("Randomize noise", randomize) if randomize.value: seed += 1 torch.manual_seed(seed) if bimpy.button('Next'): latents, latents_original, img_src = loadNext() display_original = True if bimpy.button('Display Reconstruction'): display_original = False if bimpy.button('Generate random'): latents, latents_original, img_src = loadRandom() display_original = False if bimpy.input_text( "Current file", current_file, 64) and os.path.exists(path + '/' + current_file.value): paths.insert(0, current_file.value) latents, latents_original, img_src = loadNext() bimpy.end()
def sample(cfg, logger): model = Model(startf=cfg.MODEL.START_CHANNEL_COUNT, layer_count=cfg.MODEL.LAYER_COUNT, maxf=cfg.MODEL.MAX_CHANNEL_COUNT, latent_size=cfg.MODEL.LATENT_SPACE_SIZE, truncation_psi=cfg.MODEL.TRUNCATIOM_PSI, truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF, mapping_layers=cfg.MODEL.MAPPING_LAYERS, channels=3) del model.discriminator model.eval() #torch.cuda.manual_seed_all(110) logger.info("Trainable parameters generator:") count_parameters(model.generator) if False: model_dict = { 'generator': model.generator, 'mapping': model.mapping, 'dlatent_avg': model.dlatent_avg, } else: model_dict = { 'generator_s': model.generator, 'mapping_s': model.mapping, 'dlatent_avg': model.dlatent_avg, } checkpointer = Checkpointer(cfg, model_dict, logger=logger, save=True) file_name = 'karras2019stylegan-ffhq' # file_name = 'results/model_final' checkpointer.load(file_name=file_name + '.pth') # checkpointer.save('final_stripped') #sample_b = torch.randn(1, cfg.MODEL.LATENT_SPACE_SIZE).view(-1, cfg.MODEL.LATENT_SPACE_SIZE) # for i in range(100): # if i % 20 == 0: # sample_a = sample_b # sample_b = torch.randn(1, cfg.MODEL.LATENT_SPACE_SIZE).view(-1, cfg.MODEL.LATENT_SPACE_SIZE) # x = (i % 20) / 20.0 # sample = sample_a * (1.0 - x) + sample_b * x # save_sample(model, sample, i) print(model.generator.get_statistics(8)) # print(model.discriminator.get_statistics(8)) ctx = bimpy.Context() ctx.init(1800, 1600, "Styles") rnd = np.random.RandomState(5) latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) sample = torch.tensor(latents).float().cuda() def update_image(sample): with torch.no_grad(): model.eval() x_rec = model.generate(model.generator.layer_count - 1, 1, z=sample) resultsample = ((x_rec * 0.5 + 0.5) * 255).type(torch.long).clamp( 0, 255) resultsample = resultsample.cpu()[0, :, :, :] return resultsample.type(torch.uint8).transpose(0, 2).transpose(0, 1) im = update_image(sample) print(im.shape) im = bimpy.Image(im) while (not ctx.should_close()): with ctx: im = bimpy.Image(update_image(sample)) bimpy.image(im) # if bimpy.button('Ok'): if bimpy.button('NEXT'): latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) sample = torch.tensor(latents).float().cuda() # im = bimpy.Image(update_image(sample)) #bimpy.set_window_font_scale(2.0) exit() rnd = np.random.RandomState(111011) latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE) sample = torch.tensor(latents).float().cuda( ) # torch.randn(16, cfg.MODEL.LATENT_SPACE_SIZE).view(-1, cfg.MODEL.LATENT_SPACE_SIZE) save_sample(model, sample, 0) im_count = 16 canvas = np.zeros([3, im_size * (im_count + 2), im_size * (im_count + 2)]) cut_layer_b = 0 cut_layer_e = 2 styles = model.mapping(sample) styles = list(styles.split(1, 1)) for i in range(im_count): torch.cuda.manual_seed_all(110) style = [x[i] for x in styles] style = torch.cat(style, dim=0)[None, ...] rec = model.generator.decode(style, cfg.MODEL.LAYER_COUNT - 1, 0.7) place(canvas, rec[0], 1, 2 + i) place(canvas, rec[0], 2 + i, 1) for i in range(im_count): for j in range(im_count): style_a = [x[i] for x in styles[:cut_layer_b]] style_b = [x[j] for x in styles[cut_layer_b:cut_layer_e]] style_c = [x[i] for x in styles[cut_layer_e:]] style = style_a + style_b + style_c torch.cuda.manual_seed_all(110) style = torch.cat(style, dim=0)[None, ...] rec = model.generator.decode(style, cfg.MODEL.LAYER_COUNT - 1, 0.7) place(canvas, rec[0], 2 + i, 2 + j) save_image(torch.Tensor(canvas), 'reconstruction.png')
facestr = "" while (not ctx.should_close()): with ctx: TakeWebCamIamge() image, width, height = getCamImage() faces = DetectFace(image) if (len(faces) > 0): facestr, headx, heady = DrawFaces(image, faces) dirvecx, dirvecy, dirvecxnorm, dirvecynorm, dirvecmag, dirvecmagnorm = DrawHUD( image, width, height, headx, heady) im = bimpy.Image(image) bimpy.begin("Image") bimpy.image(im) bimpy.end() bimpy.begin("Analysis") bimpy.text("Hello, world!") f1.value = abs(dirvecxnorm) f2.value = abs(dirvecynorm) f3.value = abs(dirvecmag) bimpy.slider_float3("Face Vector", f1, f2, f3, 0.0, 1.0) bimpy.end() bimpy.begin("Console") bimpy.text("Image read:" + str(f1.value) + str(f2.value) +
def main(): parser = ArgumentParser(description="Preview animations") parser.add_argument("--version", action="version", version="%(prog)s " + __version__) parser.add_argument("--width", dest="width", type=int, default=DEF_WIDTH, help="frame width (default: %s)" % DEF_WIDTH) parser.add_argument("--height", dest="height", type=int, default=DEF_HEIGHT, help="frame height (default: %s)" % DEF_HEIGHT) parser.add_argument("--scale", dest="scale", type=int, default=DEF_SCALE, help="scale preview (default: %s)" % DEF_SCALE) parser.add_argument("--double-w", dest="dw", action="store_true", help="double width for 2:1") parser.add_argument( "--mtime", dest="mtime", type=int, default=DEF_MTIME, help="seconds between checks for changes (default: %s)" % DEF_MTIME) parser.add_argument("image", help="image to convert") args = parser.parse_args() def load_image(filename): @with_retry def load(): return Image.open(filename).convert("RGB") try: image = load() except IOError: parser.error("failed to open the image") (w, h) = image.size if w % args.width or h % args.height: parser.error("%s size is not multiple of tile size (%s, %s)" % (filename, args.width, args.height)) frames = [] for y in range(0, h, args.height): for x in range(0, w, args.width): frames.append((x, y, x + args.width, y + args.height)) return image, frames image, frames = load_image(args.image) frame_list = list(range(len(frames))) def scale_image(scale, frameno): scale_w = scale if not args.dw else scale * 2 current = image.resize((args.width * scale_w, args.height * scale), box=frames[frame_list[frameno]], resample=0) return bimpy.Image(current) ctx = bimpy.Context() ctx.init(320, 420, "Preview animation") orig = bimpy.Image(image) scale = bimpy.Int(args.scale) fps = bimpy.Int(args.scale) frame_list_str = bimpy.String(','.join(map(str, frame_list))) im = scale_image(scale.value, 0) cur_frame = 0 paused = False start_time = time() check_mtime = time() last_mtime = os.stat(args.image).st_mtime while (not ctx.should_close()): if time() - check_mtime > args.mtime: if os.stat(args.image).st_mtime != last_mtime: last_mtime = os.stat(args.image).st_mtime image, frames = load_image(args.image) cur_frame = 0 start_time = time() if any([f >= len(frames) for f in frame_list]): frame_list = list(range(len(frames))) frame_list_str = bimpy.String(','.join(map( str, frame_list))) ctx.new_frame() bimpy.set_next_window_pos(bimpy.Vec2(10, 10), bimpy.Condition.Once) bimpy.set_next_window_size(bimpy.Vec2(300, 400), bimpy.Condition.Once) bimpy.begin("Image: %s" % args.image) if not paused: if time() - start_time >= 1. / fps.value: start_time = time() cur_frame += 1 if cur_frame == len(frame_list): cur_frame = 0 im = scale_image(scale.value, cur_frame) bimpy.image(orig) bimpy.image(im) bimpy.text("Frame: %02d" % frame_list[cur_frame]) if bimpy.slider_int("Scale", scale, 1, 20): im = scale_image(scale.value, cur_frame) if bimpy.slider_int("FPS", fps, 1, 30): start_time = time() cur_frame = 0 if bimpy.input_text("Frames", frame_list_str, 64, bimpy.InputTextFlags.EnterReturnsTrue): try: new_frame_list = [ int(i.strip()) for i in frame_list_str.value.split(",") ] frame_list = new_frame_list start_time = time() cur_frame = 0 except Exception as ex: print("Error parsing frame list: %s" % ex) if bimpy.button("Play" if paused else "Pause"): paused = not paused bimpy.end() ctx.render()
print("saved:", saved) if time.clock() - saved_time <= 2: bimpy.same_line() bimpy.text(f"Saved to {saved}.png") width, height = 64, 64 first = True col_count = 0 row_count = 0 padding = 0 disp_count = 0 for url in img_urls: if first: padding = bimpy.get_cursor_pos().y first = False bimpy.set_cursor_pos( bimpy.Vec2(col_count * width, row_count * height + padding)) col_count += 1 if col_count == b_col_count.value: row_count += 1 col_count = 0 b_img = bimpy_imgdict.get(url, None) if b_img: bimpy.image(b_img) disp_count += 1 bimpy.same_line() bimpy.end() bimpy.end()
def render(self, ctx, windows_info): # calculate autoly self.pos = bimpy.Vec2( windows_info['file_brewswer_ui']['x'] + windows_info['file_brewswer_ui']['w'] + conf.margin, conf.margin) self.size = bimpy.Vec2( ctx.width() - self.pos.x - conf.margin, ctx.height() - 3 * conf.margin - conf.meta_info_height) bimpy.set_next_window_pos(self.pos, bimpy.Condition.Always) bimpy.set_next_window_size(self.size, bimpy.Condition.Always) bimpy.begin( LANG.image_shower_ui_title, bimpy.Bool(True), bimpy.WindowFlags.NoCollapse | bimpy.WindowFlags.NoMove | bimpy.WindowFlags.NoResize | bimpy.WindowFlags.HorizontalScrollbar) ###########UI########### # modal part if self.im is not None: bimpy.set_cursor_pos(bimpy.Vec2(0.0, conf.margin * 3)) bimpy.image(self.im) # if image is loaded if self.labels is not None: for i, label in enumerate(self.labels): color = self.COLORS[self.classes.index(label)] # print((self.bbox[i][0], self.bbox[i][1] - 10)) # show on the left bottom of the picture bimpy.set_cursor_pos( bimpy.Vec2(self.bbox[i][0] + 10, self.bbox[i][3] + 10)) # set style bimpy.push_id_int(i) if conf.show_yolo_confience: bimpy.button( label + ' ' + str(format(self.confidence[i] * 100, '.2f')) + '%') else: bimpy.button(label) if bimpy.is_item_hovered(i): s = "{} ({})\n{}" label = label[0].upper() + label[1:] s = s.format( label, str(format(self.confidence[i] * 100, '.2f')) + '%', LANG.click_to_view_more) bimpy.set_tooltip(s) if bimpy.is_item_active(): self.select_label = label bimpy.pop_id() # bimpy.set_cursor_pos(bimpy.Vec2(conf.margin, self.size.y - conf.margin * 2)) bimpy.set_cursor_pos(bimpy.Vec2(conf.margin, conf.margin * 1.5)) if bimpy.button(LANG.smart_analyse) == True: self.object_detection() bimpy.same_line() bimpy.checkbox(LANG.auto, self.auto) ### Resize ### bimpy.same_line() bimpy.push_item_width(150) bimpy.drag_float(LANG.drag, self.scale, 1.0, 10, 1000) bimpy.pop_item_width() if abs(self.last_scale - self.scale.value) > 4.: xx = self.size.x * self.scale.value / 100. yy = (self.size.y - 45 - 40) * self.scale.value / 100. im = self.i_s.resize(self.raw_im, xx, yy) self.now_im = im self.set_im(im) # set to save computation self.last_scale = self.scale.value # if selected obj if self.select_label != '': # print(self.select_label) windows_info['retrival_ui'][ 'self'].select_label = self.select_label bimpy.open_popup('{}: {}'.format(LANG.retrieve, self.select_label)) # reset self.select_label = '' windows_info['retrival_ui']['self'].retrival() ######################## t = { 'x': bimpy.get_window_pos().x, 'y': bimpy.get_window_pos().y, 'w': bimpy.get_window_size().x, 'h': bimpy.get_window_size().y, 'self': self, } bimpy.end() return t