Beispiel #1
0
def gen_images(latents,
               truncation_psi_val,
               outfile=None,
               display=False,
               labels=None,
               randomize_noise=False,
               is_validation=True,
               network=None,
               numpy=False):
    if outfile:
        Path(outfile).parent.mkdir(exist_ok=True, parents=True)

    if network is None:
        network = Gs
    n = latents.shape[0]
    grid_size = get_grid_size(n)
    drange_net = [-1, 1]
    with tflex.device('/gpu:0'):
        result = network.run(latents,
                             labels,
                             truncation_psi_val=truncation_psi_val,
                             is_validation=is_validation,
                             randomize_noise=randomize_noise,
                             minibatch_size=sched.minibatch_gpu)
        result = result[:, 0:3, :, :]
        img = misc.convert_to_pil_image(
            misc.create_image_grid(result, grid_size), drange_net)
        if outfile is not None:
            img.save(outfile)
        if display:
            f = BytesIO()
            img.save(f, 'png')
            IPython.display.display(IPython.display.Image(data=f.getvalue()))
    return result if numpy else img
Beispiel #2
0
    def gen():
        proj = loadProjector()
        #proj.regularize_noise_weight = regularizeNoiseWeight
        proj.start([image_array])
        for step in proj.runSteps(steps):
            print('\rProjecting: %d / %d' % (step, steps), end='', flush=True)

            if step % yieldInterval == 0:
                dlatents = proj.get_dlatents()
                images = proj.get_images()
                pilImage = misc.convert_to_pil_image(
                    misc.create_image_grid(images), drange=[-1, 1])

                fp = io.BytesIO()
                pilImage.save(fp, PIL.Image.registered_extensions()['.png'])

                imgUrl = 'data:image/png;base64,%s' % base64.b64encode(
                    fp.getvalue()).decode('ascii')

                #latentsList = list(dlatents.reshape((-1, dlatents.shape[2])))
                #latentCodes = list(map(lambda latents: latentCode.encodeFloat32(latents).decode('ascii'), latentsList))
                latentCodes = latentCode.encodeFixed16(
                    dlatents.flatten()).decode('ascii')

                yield json.dumps(
                    dict(step=step, img=imgUrl,
                         latentCodes=latentCodes)) + '\n\n'

        print('\rProjecting finished.%s' % (' ' * 8))
Beispiel #3
0
def get_images(tags, seed=0, mu=0, sigma=0, truncation=None):
    print("Generating mammos...")

    Gs_kwargs = dnnlib.EasyDict()
    Gs_kwargs.output_transform = dict(func=tflib.convert_images_to_uint8,
                                      nchw_to_nhwc=True)
    Gs_kwargs.randomize_noise = False
    if truncation is not None:
        Gs_kwargs.truncation_psi = truncation
    rnd = np.random.RandomState(seed)

    all_seeds = [seed] * batch_size
    all_z = np.stack([
        np.random.RandomState(seed).randn(*tflex.Gs.input_shape[1:])
        for seed in all_seeds
    ])  # [minibatch, component]
    print(all_z.shape)

    drange_net = [-1, 1]
    with tflex.device('/gpu:0'):
        result = tflex.Gs.run(all_z,
                              None,
                              is_validation=True,
                              randomize_noise=False,
                              minibatch_size=sched.minibatch_gpu)
        if result.shape[1] > 3:
            final = result[:, 3, :, :]
        else:
            final = None
        result = result[:, 0:3, :, :]
        img = misc.convert_to_pil_image(misc.create_image_grid(result, (1, 1)),
                                        drange_net)
        img.save('mammos.png')
        return result, img
Beispiel #4
0
def generate():
    latentsStr = flask.request.args.get("latents")
    latentsStrX = flask.request.args.get("xlatents")
    psi = float(flask.request.args.get("psi", 0.5))
    # use_noise = bool(flask.request.args.get('use_noise', True))
    randomize_noise = int(flask.request.args.get("randomize_noise", 0))
    fromW = int(flask.request.args.get("fromW", 0))

    global model_name
    global g_Session
    global g_dLatentsIn
    # print('g_Session.1:', g_Session)

    fetched_model_name = flask.request.args.get("model_name", "ffhq")
    if model_name != fetched_model_name:
        model_name = fetched_model_name
    gs, synthesis = loadGs()

    latent_len = gs.input_shape[1]
    if latentsStrX:
        latents = latentCode.decodeFixed16(latentsStrX, g_dLatentsIn.shape[0])
    else:
        latents = latentCode.decodeFloat32(latentsStr, latent_len)

    t0 = time.time()

    # Generate image.
    fmt = dict(func=dnnlib.tflib.convert_images_to_uint8, nchw_to_nhwc=True)
    with g_Session.as_default():
        if fromW != 0:
            # print('latentsStr:', latentsStr)
            # print('shapes:', g_dLatentsIn.shape, latents.shape)

            if latents.shape[0] < g_dLatentsIn.shape[0]:
                latents = np.tile(latents,
                                  g_dLatentsIn.shape[0] // latents.shape[0])
            images = dnnlib.tflib.run(synthesis, {g_dLatentsIn: latents})
            image = misc.convert_to_pil_image(misc.create_image_grid(images),
                                              drange=[-1, 1])
        else:
            latents = latents.reshape([1, latent_len])
            images = gs.run(
                latents,
                None,
                truncation_psi=psi,
                randomize_noise=randomize_noise != 0,
                output_transform=fmt,
            )
            image = PIL.Image.fromarray(images[0], "RGB")

    print("generation cost:", time.time() - t0)

    # encode to PNG
    fp = io.BytesIO()
    image.save(fp, PIL.Image.registered_extensions()[".png"])

    return flask.Response(fp.getvalue(), mimetype="image/png")
Beispiel #5
0
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
        latents = all_latents[frame_idx]
        labels = np.zeros([latents.shape[0], 0], np.float32)
        images = Gs.run(latents, None, truncation_psi=psi, randomize_noise=randomize_noise, output_transform=fmt)

        images = images.transpose(0, 3, 1, 2)  # NHWC -> NCHW
        grid = create_image_grid(images, grid_size).transpose(1, 2, 0)  # HWC
        # if image_zoom > 1:
        #     grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1], order=0)
        # if grid.shape[2] == 1:
        #     grid = grid.repeat(3, 2)  # grayscale => RGB
        return grid
Beispiel #6
0
def main():
    t0 = time.time()
    print('t0:', t0)

    # Initialize TensorFlow.
    tflib.init_tf()  # 0.82s

    print('t1:', time.time() - t0)

    # Load pre-trained network.
    with open('./models/stylegan2-ffhq-config-f.pkl', 'rb') as f:
        print('t2:', time.time() - t0)

        _G, _D, Gs = pickle.load(f)  # 13.09s

        print('t3:', time.time() - t0)

    with open('./models/vgg16_zhang_perceptual.pkl', 'rb') as f:
        lpips = pickle.load(f)

        print('t4:', time.time() - t0)

    proj = Projector()
    proj.set_network(Gs, lpips)

    image = PIL.Image.open('./images/example.png')
    #image = image.resize((Di.input_shape[2], Di.input_shape[3]), PIL.Image.ANTIALIAS)
    image_array = np.array(image).swapaxes(0, 2).swapaxes(1, 2)
    image_array = misc.adjust_dynamic_range(image_array, [0, 255], [-1, 1])

    print('t5:', time.time() - t0)

    proj.start([image_array])
    for step in proj.runSteps(1000):
        print('\rstep: %d' % step, end='', flush=True)
        if step % 10 == 0:
            results = proj.get_images()
            pilImage = misc.convert_to_pil_image(
                misc.create_image_grid(results), drange=[-1, 1])
            pilImage.save('./images/project-%d.png' % step)

    print('t6:', time.time() - t0)

    dlatents = proj.get_dlatents()
    noises = proj.get_noises()
    print('dlatents:', dlatents.shape)
    print('noises:', len(noises), noises[0].shape, noises[-1].shape)
Beispiel #7
0
 def make_frame(t):
     frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))
     latents = all_latents[frame_idx]
     labels = np.zeros([latents.shape[0], 0], np.float32)
     images = Gs.run(latents,
                     labels,
                     minibatch_size=minibatch_size,
                     num_gpus=1,
                     out_mul=127.5,
                     out_add=127.5,
                     out_shrink=image_shrink,
                     out_dtype=np.uint8,
                     truncation_psi=truncation_psi,
                     randomize_noise=randomize_noise)
     grid = misc.create_image_grid(images, grid_size).transpose(1, 2,
                                                                0)  # HWC
     if image_zoom > 1:
         grid = scipy.ndimage.zoom(grid, [image_zoom, image_zoom, 1],
                                   order=0)
     if grid.shape[2] == 1:
         grid = grid.repeat(3, 2)  # grayscale => RGB
     return grid
Beispiel #8
0
def create_interp_video_row(images,
                            interp_steps,
                            save_path=None,
                            norm=False,
                            px=0,
                            py=0,
                            transpose=False):
    """
    Creates (and optionally saves) a row showing side-by-side videos.
    We use this to generate a comparison of, e.g., interpolating a z component from -2 to +2
    for "batch_size" different samples of the fixed z components.

    :param images: Tensor of shape (interp_steps*batch_size, C, H, W) (rank 4 tensor)
    :param interp_steps: Number of frames in each video
    :param save_path: If specified, directly saves the video with the specified file path
    :param norm: If True, normalizes images before returning them (overrided by save_path)
    :return: If norm is True: (interp_steps, H, W * batch_size, C) tensor in uint8 [0, 255] (frames of video)
             If norm is False: (interp_steps, C, H, W * batch_size) tensor in float [-1, 1] (frames of video)
    """
    N = images.shape[0]
    bs = N // interp_steps
    assert N == bs * interp_steps
    frames = []
    grid_size = (1, bs) if transpose else (bs, 1)
    for i in range(interp_steps):  # Create each frame of the video:
        ixs = list(range(i, N, interp_steps))
        frame_i = create_image_grid(images[ixs],
                                    grid_size=grid_size,
                                    pad_val=-1,
                                    px=px,
                                    py=py)
        frames.append(frame_i)
    frames = np.stack(frames)
    if norm or save_path:  # Normalize the frames before returning?
        frames = prepro_imgs(frames)
    if save_path:  # Optionally directly save the frames as a video with 24fps
        imageio.mimsave(save_path, frames, duration=1 / 24)
    return frames
Beispiel #9
0
    def make_frame(t):
        frame_idx = int(np.clip(np.round(t * mp4_fps), 0, num_frames - 1))

        # TIP: Oddebugovat run_generator,  co mu sem leze, zejmena len(zx)
        z_idx = frame_idx
        z = points[z_idx]

        # Puvodni loop
        if isinstance(z, list):
            z = np.array(z).reshape(1, 512)
        elif isinstance(z, np.ndarray):
            z.reshape(1, 512)

        Gs_kwargs.truncation_psi = psi
        noise_rnd = np.random.RandomState(1)  # fix noise
        tflib.set_vars({var: noise_rnd.randn(*var.shape.as_list()) for var in noise_vars})  # [height, width]
        images = Gs.run(z, None, **Gs_kwargs)  # [minibatch, height, width, channel]

        # @todo Zbavit se gridu, kdyz potrebujeme jen jeden obrazek
        images = images.transpose(0, 3, 1, 2)  # NHWC -> NCHW
        grid = create_image_grid(images, [1, 1]).transpose(1, 2, 0)  # HWC
        # if grid.shape[2] == 1:
        #     grid = grid.repeat(3, 2)  # grayscale => RGB
        return grid
Beispiel #10
0
        try:
          channel = [x for x in list(client.get_all_channels()) if channel_name in x.name]
          assert len(channel) == 1
          channel = channel[0]
          print(channel)
          await send_picture(channel, image, kind=kind, name=name, text=text)
        finally:
          await client.logout()

    #@client.event
    #async def on_message(message):
    #    print('Message from {0.author}: {0.content}'.format(message))

    client.run(token)

all_labels = np.array([[1.0 if i == j else 0.0 for j in range(1000)] for i in range(1000)], dtype=np.float32)
n = grid_image_size
labels = [all_labels[i] for i in range(n)]
labels2 = [all_labels[0] for i in range(n)]

for ckpt in tf.train.checkpoints_iterator(model_dir, 1.0):
    print('posting ' + ckpt)
    saver.restore(sess, ckpt)
    seed = np.random.randint(10000)
    for i in range(seed,seed + count):
      print('------- %d -------' % i)
      result = grab_grid(i, n=n, labels=labels, numpy=True)
      post_picture(channel, misc.create_image_grid(result, get_grid_size(n)), "`" + ckpt + ' seed %d`' % i)


Beispiel #11
0
def make_h264_mp4_video(G,
                        interp_z,
                        interp_labels,
                        minibatch_size,
                        interp_steps,
                        interp_batch_size,
                        vis_path,
                        nz_per_vid=1,
                        fps=60,
                        perfect_loop=False,
                        use_pixel_norm=True,
                        n_frames=7,
                        stills_only=False,
                        pad_x=0,
                        pad_y=0,
                        transpose=False):
    """
    Generates interpolation videos using G and interp_z, then saves them in "vis_path".
    This function returns a *much* higher quality video than make_and_save_interpolation_gifs.
    Additionally, if nz_per_vid > 1, this function "stitches" multiple z rows together to
    make visualization easier. The main entry point for using this function is visualize.py.
    """
    assert nz_per_vid > 0
    duration = (1 + perfect_loop) * interp_steps / fps
    nz = interp_z.shape[1]
    interp_grid_fakes = G.run(interp_z,
                              interp_labels,
                              is_validation=True,
                              minibatch_size=minibatch_size,
                              normalize_latents=use_pixel_norm)
    if n_frames > 0:  # Save frames before we make the full video:
        save_flattened_frames(interp_grid_fakes, interp_steps,
                              interp_batch_size, nz, n_frames, vis_path)
        if stills_only:
            return
    interp_grid_fakes = create_multiple_interp_video_rows(interp_grid_fakes,
                                                          interp_steps,
                                                          interp_batch_size,
                                                          norm=False,
                                                          px=pad_x,
                                                          py=pad_y // 2,
                                                          transpose=transpose)
    grid_size = (nz_per_vid, 1) if transpose else (1, nz_per_vid)
    print(f'Saving mp4 visualizations to {vis_path}.')
    for z_start in range(
            0, nz, nz_per_vid):  # Iterate over mp4s we are going to create:
        z_end = z_start + nz_per_vid
        row_videos = interp_grid_fakes[z_start:z_end]
        stitched_video = [
        ]  # Represents the "final" mp4 video we are going to save
        for i in range(interp_steps):  # iterate over frames
            row_frames = [row_video[i] for row_video in row_videos]
            row_frames = np.stack(row_frames)
            stitched_frame = create_image_grid(row_frames,
                                               grid_size=grid_size,
                                               px=0,
                                               py=pad_y // 2,
                                               pad_val=-1)
            stitched_video.append(stitched_frame)
        if perfect_loop:  # Add a reversed copy of the video onto the end so it "loops"
            stitched_video = stitched_video + stitched_video[::-1]
        stitched_video.append(stitched_video[-1])  # Extra frame at the end
        stitched_video = prepro_imgs(
            np.asarray(stitched_video))  # Convert to np for normalization
        stitched_video = [frame for frame in stitched_video
                          ]  # Convert back to list for making the video
        video_path = os.path.join(vis_path,
                                  f'z{z_start:03}_to_z{z_end:03}.mp4')
        save_video(stitched_video, duration, fps, video_path)
    print(f'Done. Visualizations can be found in {vis_path}.')