Beispiel #1
0
    def get_thumbnail(self, file):
        if file not in self.thumbnail_cache:
            img = Image.open(file)
            img = self.i_s.resize(img, 445, 400)
            self.thumbnail_cache[file] = bimpy.Image(img)

        return self.thumbnail_cache[file]
Beispiel #2
0
    def build_cache(self):
        # cache thumbnail
        for k, v in self.pic2label.items():

            # if not cache
            if k not in self.thumbnail_cache:
                img = Image.open(k)
                img = self.i_s.resize(img, 50, 50)
                self.thumbnail_cache[k] = bimpy.Image(img)
Beispiel #3
0
def set_video_frame(frame_idx=None):
    global display_frame
    global img_view
    global img_data

    if frame_idx is None:
        frame_idx = display_frame + 1
    if frame_idx < 0 or frame_idx >= video_len:
        return None
    if frame_idx != display_frame:
        video_file.set(cv2.CAP_PROP_POS_FRAMES, frame_idx)

    display_frame = frame_idx
    _, frame = video_file.read()
    img_data = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
    img_view = bimpy.Image(img_data)
Beispiel #4
0
c = b.Context()
c.init(1200, 1200, "bimpy test")

img = Image.new(
    "RGBA",
    (512, 512),
)
px = img.load()
for x in range(512):
    for y in range(512):
        r = int(255.0 * float(x) / 512.0)
        g = int(255.0 * float(y) / 512.0)
        px[x, y] = (r, g, max(255 - r - g, 0), 255)

b_img = b.Image(img)

b_f1 = b.Float()
b_f2 = b.Float()
b_f3 = b.Float()

while not c.should_close():
    with c:
        b.text("hi")
        if b.button("cat"):
            print("mew")

        b.input_float("float1", b_f1, 0.0, 1.0)
        b.image(b_img)
        b.slider_float3("float", b_f1, b_f2, b_f3, 0.0, 1.0)
Beispiel #5
0
def sample(cfg, logger):
    model = Model(
        startf=cfg.MODEL.START_CHANNEL_COUNT,
        layer_count= cfg.MODEL.LAYER_COUNT,
        maxf=cfg.MODEL.MAX_CHANNEL_COUNT,
        latent_size=cfg.MODEL.LATENT_SPACE_SIZE,
        truncation_psi=cfg.MODEL.TRUNCATIOM_PSI,
        truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF,
        mapping_layers=cfg.MODEL.MAPPING_LAYERS,
        channels=3)
    model.eval()

    logger.info("Trainable parameters generator:")
    count_parameters(model.generator)

    model_dict = {
        'generator_s': model.generator,
        'mapping_fl_s': model.mapping,
        'dlatent_avg': model.dlatent_avg,
    }

    checkpointer = Checkpointer(cfg,
                                model_dict,
                                logger=logger,
                                save=True)

    checkpointer.load()

    ctx = bimpy.Context()
    remove = bimpy.Bool(False)
    layers = bimpy.Int(8)

    ctx.init(1800, 1600, "Styles")

    rnd = np.random.RandomState(5)
    latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE)
    sample = torch.tensor(latents).float().cuda()

    def update_image(sample):
        with torch.no_grad():
            torch.manual_seed(0)
            model.eval()
            x_rec = model.generate(layers.value, remove.value, z=sample)
            #model.generator.set(l.value, c.value)
            resultsample = ((x_rec * 0.5 + 0.5) * 255).type(torch.long).clamp(0, 255)
            resultsample = resultsample.cpu()[0, :, :, :]

            return resultsample.type(torch.uint8).transpose(0, 2).transpose(0, 1)

    with torch.no_grad():
        save_image(model.generate(8, True, z=sample) * 0.5 + 0.5, 'sample.png')

    im = bimpy.Image(update_image(sample))
    while(not ctx.should_close()):
        with ctx:

            bimpy.set_window_font_scale(2.0)

            if bimpy.checkbox('REMOVE BLOB', remove):
                im = bimpy.Image(update_image(sample))
            if bimpy.button('NEXT'):
                latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE)
                sample = torch.tensor(latents).float().cuda()
                im = bimpy.Image(update_image(sample))
            if bimpy.slider_int("Layers", layers, 0, 8):
                im = bimpy.Image(update_image(sample))
            bimpy.image(im, bimpy.Vec2(1024, 1024))
Beispiel #6
0
def sample(cfg, logger):
    torch.cuda.set_device(0)
    model = Model(startf=cfg.MODEL.START_CHANNEL_COUNT,
                  layer_count=cfg.MODEL.LAYER_COUNT,
                  maxf=cfg.MODEL.MAX_CHANNEL_COUNT,
                  latent_size=cfg.MODEL.LATENT_SPACE_SIZE,
                  truncation_psi=cfg.MODEL.TRUNCATIOM_PSI,
                  truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF,
                  mapping_layers=cfg.MODEL.MAPPING_LAYERS,
                  channels=cfg.MODEL.CHANNELS,
                  generator=cfg.MODEL.GENERATOR,
                  encoder=cfg.MODEL.ENCODER)
    model.cuda(0)
    model.eval()
    model.requires_grad_(False)

    decoder = model.decoder
    encoder = model.encoder
    mapping_tl = model.mapping_d
    mapping_fl = model.mapping_f
    dlatent_avg = model.dlatent_avg

    logger.info("Trainable parameters generator:")
    count_parameters(decoder)

    logger.info("Trainable parameters discriminator:")
    count_parameters(encoder)

    arguments = dict()
    arguments["iteration"] = 0

    model_dict = {
        'discriminator_s': encoder,
        'generator_s': decoder,
        'mapping_tl_s': mapping_tl,
        'mapping_fl_s': mapping_fl,
        'dlatent_avg': dlatent_avg
    }

    checkpointer = Checkpointer(cfg, model_dict, {}, logger=logger, save=False)

    extra_checkpoint_data = checkpointer.load()

    model.eval()

    layer_count = cfg.MODEL.LAYER_COUNT

    def encode(x):
        Z, _ = model.encode(x, layer_count - 1, 1)
        Z = Z.repeat(1, model.mapping_f.num_layers, 1)
        return Z

    def decode(x):
        layer_idx = torch.arange(2 * layer_count)[np.newaxis, :, np.newaxis]
        ones = torch.ones(layer_idx.shape, dtype=torch.float32)
        coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones)
        # x = torch.lerp(model.dlatent_avg.buff.data, x, coefs)
        return model.decoder(x, layer_count - 1, 1, noise=True)

    path = 'dataset_samples/faces/realign1024x1024'

    paths = list(os.listdir(path))
    paths.sort()
    paths_backup = paths[:]
    randomize = bimpy.Bool(True)
    current_file = bimpy.String("")

    ctx = bimpy.Context()

    attribute_values = [bimpy.Float(0) for i in indices]

    W = [
        torch.tensor(np.load("principal_directions/direction_%d.npy" % i),
                     dtype=torch.float32) for i in indices
    ]

    rnd = np.random.RandomState(5)

    def loadNext():
        img = np.asarray(Image.open(path + '/' + paths[0]))
        current_file.value = paths[0]
        paths.pop(0)
        if len(paths) == 0:
            paths.extend(paths_backup)

        if img.shape[2] == 4:
            img = img[:, :, :3]
        im = img.transpose((2, 0, 1))
        x = torch.tensor(np.asarray(im, dtype=np.float32),
                         device='cpu',
                         requires_grad=True).cuda() / 127.5 - 1.
        if x.shape[0] == 4:
            x = x[:3]

        needed_resolution = model.decoder.layer_to_resolution[-1]
        while x.shape[2] > needed_resolution:
            x = F.avg_pool2d(x, 2, 2)
        if x.shape[2] != needed_resolution:
            x = F.adaptive_avg_pool2d(x,
                                      (needed_resolution, needed_resolution))

        img_src = ((x * 0.5 + 0.5) * 255).type(torch.long).clamp(
            0, 255).cpu().type(torch.uint8).transpose(0,
                                                      2).transpose(0,
                                                                   1).numpy()

        latents_original = encode(x[None, ...].cuda())
        latents = latents_original[0, 0].clone()
        latents -= model.dlatent_avg.buff.data[0]

        for v, w in zip(attribute_values, W):
            v.value = (latents * w).sum()

        for v, w in zip(attribute_values, W):
            latents = latents - v.value * w

        return latents, latents_original, img_src

    def loadRandom():
        latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE)
        lat = torch.tensor(latents).float().cuda()
        dlat = mapping_fl(lat)
        layer_idx = torch.arange(2 * layer_count)[np.newaxis, :, np.newaxis]
        ones = torch.ones(layer_idx.shape, dtype=torch.float32)
        coefs = torch.where(layer_idx < model.truncation_cutoff, ones, ones)
        dlat = torch.lerp(model.dlatent_avg.buff.data, dlat, coefs)
        x = decode(dlat)[0]
        img_src = ((x * 0.5 + 0.5) * 255).type(torch.long).clamp(
            0, 255).cpu().type(torch.uint8).transpose(0,
                                                      2).transpose(0,
                                                                   1).numpy()
        latents_original = dlat
        latents = latents_original[0, 0].clone()
        latents -= model.dlatent_avg.buff.data[0]

        for v, w in zip(attribute_values, W):
            v.value = (latents * w).sum()

        for v, w in zip(attribute_values, W):
            latents = latents - v.value * w

        return latents, latents_original, img_src

    latents, latents_original, img_src = loadNext()

    ctx.init(1800, 1600, "Styles")

    def update_image(w, latents_original):
        with torch.no_grad():
            w = w + model.dlatent_avg.buff.data[0]
            w = w[None, None, ...].repeat(1, model.mapping_f.num_layers, 1)

            layer_idx = torch.arange(model.mapping_f.num_layers)[np.newaxis, :,
                                                                 np.newaxis]
            cur_layers = (7 + 1) * 2
            mixing_cutoff = cur_layers
            styles = torch.where(layer_idx < mixing_cutoff, w,
                                 latents_original)

            x_rec = decode(styles)
            resultsample = ((x_rec * 0.5 + 0.5) * 255).type(torch.long).clamp(
                0, 255)
            resultsample = resultsample.cpu()[0, :, :, :]
            return resultsample.type(torch.uint8).transpose(0,
                                                            2).transpose(0, 1)

    im_size = 2**(cfg.MODEL.LAYER_COUNT + 1)
    im = update_image(latents, latents_original)
    print(im.shape)
    im = bimpy.Image(im)

    display_original = True

    seed = 0

    while not ctx.should_close():
        with ctx:
            new_latents = latents + sum(
                [v.value * w for v, w in zip(attribute_values, W)])

            if display_original:
                im = bimpy.Image(img_src)
            else:
                im = bimpy.Image(update_image(new_latents, latents_original))

            bimpy.begin("Principal directions")
            bimpy.columns(2)
            bimpy.set_column_width(0, im_size + 20)
            bimpy.image(im)
            bimpy.next_column()

            for v, label in zip(attribute_values, labels):
                bimpy.slider_float(label, v, -40.0, 40.0)

            bimpy.checkbox("Randomize noise", randomize)

            if randomize.value:
                seed += 1

            torch.manual_seed(seed)

            if bimpy.button('Next'):
                latents, latents_original, img_src = loadNext()
                display_original = True
            if bimpy.button('Display Reconstruction'):
                display_original = False
            if bimpy.button('Generate random'):
                latents, latents_original, img_src = loadRandom()
                display_original = False

            if bimpy.input_text(
                    "Current file", current_file,
                    64) and os.path.exists(path + '/' + current_file.value):
                paths.insert(0, current_file.value)
                latents, latents_original, img_src = loadNext()

            bimpy.end()
Beispiel #7
0
def sample(cfg, logger):
    model = Model(startf=cfg.MODEL.START_CHANNEL_COUNT,
                  layer_count=cfg.MODEL.LAYER_COUNT,
                  maxf=cfg.MODEL.MAX_CHANNEL_COUNT,
                  latent_size=cfg.MODEL.LATENT_SPACE_SIZE,
                  truncation_psi=cfg.MODEL.TRUNCATIOM_PSI,
                  truncation_cutoff=cfg.MODEL.TRUNCATIOM_CUTOFF,
                  mapping_layers=cfg.MODEL.MAPPING_LAYERS,
                  channels=3)
    del model.discriminator
    model.eval()

    #torch.cuda.manual_seed_all(110)

    logger.info("Trainable parameters generator:")
    count_parameters(model.generator)

    if False:
        model_dict = {
            'generator': model.generator,
            'mapping': model.mapping,
            'dlatent_avg': model.dlatent_avg,
        }
    else:
        model_dict = {
            'generator_s': model.generator,
            'mapping_s': model.mapping,
            'dlatent_avg': model.dlatent_avg,
        }

    checkpointer = Checkpointer(cfg, model_dict, logger=logger, save=True)

    file_name = 'karras2019stylegan-ffhq'
    # file_name = 'results/model_final'

    checkpointer.load(file_name=file_name + '.pth')
    # checkpointer.save('final_stripped')

    #sample_b = torch.randn(1, cfg.MODEL.LATENT_SPACE_SIZE).view(-1, cfg.MODEL.LATENT_SPACE_SIZE)

    # for i in range(100):
    #     if i % 20 == 0:
    #         sample_a = sample_b
    #         sample_b = torch.randn(1, cfg.MODEL.LATENT_SPACE_SIZE).view(-1, cfg.MODEL.LATENT_SPACE_SIZE)
    #     x = (i % 20) / 20.0
    #     sample = sample_a * (1.0 - x) + sample_b * x
    #     save_sample(model, sample, i)

    print(model.generator.get_statistics(8))
    # print(model.discriminator.get_statistics(8))

    ctx = bimpy.Context()

    ctx.init(1800, 1600, "Styles")

    rnd = np.random.RandomState(5)
    latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE)
    sample = torch.tensor(latents).float().cuda()

    def update_image(sample):
        with torch.no_grad():
            model.eval()
            x_rec = model.generate(model.generator.layer_count - 1,
                                   1,
                                   z=sample)
            resultsample = ((x_rec * 0.5 + 0.5) * 255).type(torch.long).clamp(
                0, 255)
            resultsample = resultsample.cpu()[0, :, :, :]

            return resultsample.type(torch.uint8).transpose(0,
                                                            2).transpose(0, 1)

    im = update_image(sample)
    print(im.shape)
    im = bimpy.Image(im)

    while (not ctx.should_close()):
        with ctx:
            im = bimpy.Image(update_image(sample))
            bimpy.image(im)
            # if bimpy.button('Ok'):
            if bimpy.button('NEXT'):
                latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE)
                sample = torch.tensor(latents).float().cuda()
                # im = bimpy.Image(update_image(sample))
            #bimpy.set_window_font_scale(2.0)

    exit()

    rnd = np.random.RandomState(111011)
    latents = rnd.randn(1, cfg.MODEL.LATENT_SPACE_SIZE)
    sample = torch.tensor(latents).float().cuda(
    )  # torch.randn(16, cfg.MODEL.LATENT_SPACE_SIZE).view(-1, cfg.MODEL.LATENT_SPACE_SIZE)
    save_sample(model, sample, 0)

    im_count = 16
    canvas = np.zeros([3, im_size * (im_count + 2), im_size * (im_count + 2)])
    cut_layer_b = 0
    cut_layer_e = 2

    styles = model.mapping(sample)
    styles = list(styles.split(1, 1))

    for i in range(im_count):
        torch.cuda.manual_seed_all(110)
        style = [x[i] for x in styles]
        style = torch.cat(style, dim=0)[None, ...]
        rec = model.generator.decode(style, cfg.MODEL.LAYER_COUNT - 1, 0.7)
        place(canvas, rec[0], 1, 2 + i)

        place(canvas, rec[0], 2 + i, 1)

    for i in range(im_count):
        for j in range(im_count):
            style_a = [x[i] for x in styles[:cut_layer_b]]
            style_b = [x[j] for x in styles[cut_layer_b:cut_layer_e]]
            style_c = [x[i] for x in styles[cut_layer_e:]]
            style = style_a + style_b + style_c
            torch.cuda.manual_seed_all(110)
            style = torch.cat(style, dim=0)[None, ...]
            rec = model.generator.decode(style, cfg.MODEL.LAYER_COUNT - 1, 0.7)
            place(canvas, rec[0], 2 + i, 2 + j)

    save_image(torch.Tensor(canvas), 'reconstruction.png')
Beispiel #8
0
WebCamInit()
TakeWebCamIamge()
image, width, height = getCamImage()
dirvecx = 0
dirvecy = 0
dirvecxnorm = 0
dirvecynorm = 0
dirvecmag = 0
dirvecmagnorm = 0

#if (dirvecx<0):

##################################################
#UI
im = bimpy.Image(image)

f1 = bimpy.Float()
f2 = bimpy.Float()
f3 = bimpy.Float()

facestr = ""

while (not ctx.should_close()):
    with ctx:
        TakeWebCamIamge()
        image, width, height = getCamImage()
        faces = DetectFace(image)
        if (len(faces) > 0):
            facestr, headx, heady = DrawFaces(image, faces)
            dirvecx, dirvecy, dirvecxnorm, dirvecynorm, dirvecmag, dirvecmagnorm = DrawHUD(
Beispiel #9
0

ctx = bimpy.Context()

ctx.init(800, 800, "Image")

im = None

while (not ctx.should_close()):
    with ctx:
        bimpy.text(
            "Example showing how to display images from PIL Image and numpy array"
        )

        if bimpy.button("RGB image. Cat"):
            im = bimpy.Image(download_image(img_urls[0]))
        if bimpy.button("RGB image. Turtle"):
            im = bimpy.Image(download_image(img_urls[1]))
        if bimpy.button("RGB with alpha"):
            im = bimpy.Image(download_image(img_urls[2]))

        if bimpy.button("Generate mandelbrot set"):
            import numpy as np

            m = 480
            n = 320

            x = np.linspace(-2, 1, num=m).reshape((1, m))
            y = np.linspace(-1, 1, num=n).reshape((n, 1))
            C = np.tile(x, (n, 1)) + 1j * np.tile(y, (1, m))
Beispiel #10
0
 def scale_image(scale, frameno):
     scale_w = scale if not args.dw else scale * 2
     current = image.resize((args.width * scale_w, args.height * scale),
                            box=frames[frame_list[frameno]],
                            resample=0)
     return bimpy.Image(current)
Beispiel #11
0
def main():

    parser = ArgumentParser(description="Preview animations")

    parser.add_argument("--version",
                        action="version",
                        version="%(prog)s " + __version__)
    parser.add_argument("--width",
                        dest="width",
                        type=int,
                        default=DEF_WIDTH,
                        help="frame width (default: %s)" % DEF_WIDTH)
    parser.add_argument("--height",
                        dest="height",
                        type=int,
                        default=DEF_HEIGHT,
                        help="frame height (default: %s)" % DEF_HEIGHT)
    parser.add_argument("--scale",
                        dest="scale",
                        type=int,
                        default=DEF_SCALE,
                        help="scale preview (default: %s)" % DEF_SCALE)
    parser.add_argument("--double-w",
                        dest="dw",
                        action="store_true",
                        help="double width for 2:1")
    parser.add_argument(
        "--mtime",
        dest="mtime",
        type=int,
        default=DEF_MTIME,
        help="seconds between checks for changes (default: %s)" % DEF_MTIME)

    parser.add_argument("image", help="image to convert")

    args = parser.parse_args()

    def load_image(filename):
        @with_retry
        def load():
            return Image.open(filename).convert("RGB")

        try:
            image = load()
        except IOError:
            parser.error("failed to open the image")

        (w, h) = image.size

        if w % args.width or h % args.height:
            parser.error("%s size is not multiple of tile size (%s, %s)" %
                         (filename, args.width, args.height))

        frames = []
        for y in range(0, h, args.height):
            for x in range(0, w, args.width):
                frames.append((x, y, x + args.width, y + args.height))

        return image, frames

    image, frames = load_image(args.image)
    frame_list = list(range(len(frames)))

    def scale_image(scale, frameno):
        scale_w = scale if not args.dw else scale * 2
        current = image.resize((args.width * scale_w, args.height * scale),
                               box=frames[frame_list[frameno]],
                               resample=0)
        return bimpy.Image(current)

    ctx = bimpy.Context()

    ctx.init(320, 420, "Preview animation")
    orig = bimpy.Image(image)
    scale = bimpy.Int(args.scale)
    fps = bimpy.Int(args.scale)
    frame_list_str = bimpy.String(','.join(map(str, frame_list)))
    im = scale_image(scale.value, 0)

    cur_frame = 0
    paused = False
    start_time = time()
    check_mtime = time()
    last_mtime = os.stat(args.image).st_mtime
    while (not ctx.should_close()):

        if time() - check_mtime > args.mtime:
            if os.stat(args.image).st_mtime != last_mtime:
                last_mtime = os.stat(args.image).st_mtime
                image, frames = load_image(args.image)
                cur_frame = 0
                start_time = time()
                if any([f >= len(frames) for f in frame_list]):
                    frame_list = list(range(len(frames)))
                    frame_list_str = bimpy.String(','.join(map(
                        str, frame_list)))

        ctx.new_frame()
        bimpy.set_next_window_pos(bimpy.Vec2(10, 10), bimpy.Condition.Once)
        bimpy.set_next_window_size(bimpy.Vec2(300, 400), bimpy.Condition.Once)
        bimpy.begin("Image: %s" % args.image)

        if not paused:
            if time() - start_time >= 1. / fps.value:
                start_time = time()
                cur_frame += 1
                if cur_frame == len(frame_list):
                    cur_frame = 0
                im = scale_image(scale.value, cur_frame)

        bimpy.image(orig)
        bimpy.image(im)
        bimpy.text("Frame: %02d" % frame_list[cur_frame])

        if bimpy.slider_int("Scale", scale, 1, 20):
            im = scale_image(scale.value, cur_frame)
        if bimpy.slider_int("FPS", fps, 1, 30):
            start_time = time()
            cur_frame = 0
        if bimpy.input_text("Frames", frame_list_str, 64,
                            bimpy.InputTextFlags.EnterReturnsTrue):
            try:
                new_frame_list = [
                    int(i.strip()) for i in frame_list_str.value.split(",")
                ]
                frame_list = new_frame_list
                start_time = time()
                cur_frame = 0
            except Exception as ex:
                print("Error parsing frame list: %s" % ex)

        if bimpy.button("Play" if paused else "Pause"):
            paused = not paused

        bimpy.end()
        ctx.render()
Beispiel #12
0
        try:
            (url, img) = q.get(block=False)
        except queue.Empty:
            pass
        else:
            if url is None or img is None:
                pass
            if refresh == True:
                bimpy_imgdict = {}
                if len(img_urls) < COL_COUNT:
                    b_col_count.value = len(img_urls)
                else:
                    b_col_count.value = COL_COUNT
                refresh = False
            bimpy_imgdict[url] = bimpy.Image(
                img.resize((64, 64), Image.ANTIALIAS))
            q.task_done()

        if img_urls:
            bimpy.set_next_window_pos(bimpy.Vec2(625, 20),
                                      bimpy.Condition.Once)
            bimpy.set_next_window_size(bimpy.Vec2(532, 600),
                                       bimpy.Condition.Once)
            bimpy.begin('Collage', bimpy.Bool(True),
                        bimpy.WindowFlags.HorizontalScrollbar)
            if imgs_downloading:
                bimpy.text("Downloading thumbnails")
                bimpy.progress_bar(percent_downloaded, bimpy.Vec2(-1, 0),
                                   f"{imgs_downloaded}/{imgs_total}")
            else:
                bimpy.text("Collage columns")
 def set_im(self, im):
     self.im = bimpy.Image(im)