def generate_training_video(run_id, duration_sec=20.0, time_warp=1.5, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M'):
    src_result_subdir = misc.locate_result_subdir(run_id)
    if mp4 is None:
        mp4 = os.path.basename(src_result_subdir) + '-train.mp4'

    # Parse log.
    times = []
    snaps = [] # [(png, kimg, lod), ...]
    with open(os.path.join(src_result_subdir, 'log.txt'), 'rt') as log:
        for line in log:
            k = re.search(r'kimg ([\d\.]+) ', line)
            l = re.search(r'lod ([\d\.]+) ', line)
            t = re.search(r'time (\d+d)? *(\d+h)? *(\d+m)? *(\d+s)? ', line)
            if k and l and t:
                k = float(k.group(1))
                l = float(l.group(1))
                t = [int(t.group(i)[:-1]) if t.group(i) else 0 for i in range(1, 5)]
                t = t[0] * 24*60*60 + t[1] * 60*60 + t[2] * 60 + t[3]
                png = os.path.join(src_result_subdir, 'fakes%06d.png' % int(np.floor(k)))
                if os.path.isfile(png):
                    times.append(t)
                    snaps.append((png, k, l))
    assert len(times)

    # Frame generation func for moviepy.
    png_cache = [None, None] # [png, img]
    def make_frame(t):
        wallclock = ((t / duration_sec) ** time_warp) * times[-1]
        png, kimg, lod = snaps[max(bisect.bisect(times, wallclock) - 1, 0)]
        if png_cache[0] == png:
            img = png_cache[1]
        else:
            img = scipy.misc.imread(png)
            # Check if the img is greyscale. If so, it needs to be converted to RGB, since MoviePy expects it in that format
            if img.ndim == 2:
                tmpimg = img
                img = np.zeros([tmpimg.shape[0], tmpimg.shape[1], 3], tmpimg.dtype)
                img[:,:,0] = tmpimg
                img[:,:,1] = tmpimg
                img[:,:,2] = tmpimg
            while img.shape[1] > 1920 or img.shape[0] > 1080:
                img = img.astype(np.float32).reshape(img.shape[0]//2, 2, img.shape[1]//2, 2, -1).mean(axis=(1,3))
            png_cache[:] = [png, img]
        img = misc.draw_text_label(img, 'lod %.2f' % lod, 16, img.shape[0]-4, alignx=0.0, aligny=1.0)
        img = misc.draw_text_label(img, misc.format_time(int(np.rint(wallclock))), img.shape[1]//2, img.shape[0]-4, alignx=0.5, aligny=1.0)
        img = misc.draw_text_label(img, '%.0f kimg' % kimg, img.shape[1]-16, img.shape[0]-4, alignx=1.0, aligny=1.0)
        return img

    # Generate video.
    import moviepy.editor # pip install moviepy
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def generate_training_video(run_id, duration_sec=20.0, time_warp=1.5, mp4=None, mp4_fps=30, mp4_codec='libx265', mp4_bitrate='16M'):
    src_result_subdir = misc.locate_result_subdir(run_id)
    if mp4 is None:
        mp4 = os.path.basename(src_result_subdir) + '-train.mp4'

    # Parse log.
    times = []
    snaps = [] # [(png, kimg, lod), ...]
    with open(os.path.join(src_result_subdir, 'log.txt'), 'rt') as log:
        for line in log:
            k = re.search(r'kimg ([\d\.]+) ', line)
            l = re.search(r'lod ([\d\.]+) ', line)
            t = re.search(r'time (\d+d)? *(\d+h)? *(\d+m)? *(\d+s)? ', line)
            if k and l and t:
                k = float(k.group(1))
                l = float(l.group(1))
                t = [int(t.group(i)[:-1]) if t.group(i) else 0 for i in range(1, 5)]
                t = t[0] * 24*60*60 + t[1] * 60*60 + t[2] * 60 + t[3]
                png = os.path.join(src_result_subdir, 'fakes%06d.png' % int(np.floor(k)))
                if os.path.isfile(png):
                    times.append(t)
                    snaps.append((png, k, l))
    assert len(times)

    # Frame generation func for moviepy.
    png_cache = [None, None] # [png, img]
    def make_frame(t):
        wallclock = ((t / duration_sec) ** time_warp) * times[-1]
        png, kimg, lod = snaps[max(bisect.bisect(times, wallclock) - 1, 0)]
        if png_cache[0] == png:
            img = png_cache[1]
        else:
            img = scipy.misc.imread(png)
            while img.shape[1] > 1920 or img.shape[0] > 1080:
                img = img.astype(np.float32).reshape(img.shape[0]//2, 2, img.shape[1]//2, 2, -1).mean(axis=(1,3))
            png_cache[:] = [png, img]
        img = misc.draw_text_label(img, 'lod %.2f' % lod, 16, img.shape[0]-4, alignx=0.0, aligny=1.0)
        img = misc.draw_text_label(img, misc.format_time(int(np.rint(wallclock))), img.shape[1]//2, img.shape[0]-4, alignx=0.5, aligny=1.0)
        img = misc.draw_text_label(img, '%.0f kimg' % kimg, img.shape[1]-16, img.shape[0]-4, alignx=1.0, aligny=1.0)
        return img

    # Generate video.
    import moviepy.editor # pip install moviepy
    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    moviepy.editor.VideoClip(make_frame, duration=duration_sec).write_videofile(os.path.join(result_subdir, mp4), fps=mp4_fps, codec='libx264', bitrate=mp4_bitrate)
    open(os.path.join(result_subdir, '_done.txt'), 'wt').close()
def anomaly_detection_encoder(run_id,
                              test_data_folder,
                              log,
                              test_batch_size=10,
                              start_at_batch=0,
                              end_at_batch=10):

    result_subdir = misc.locate_result_subdir(run_id)
    snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=True)
    dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(
        result_subdir, verbose=True, shuffle_mb=0)
    print('# snapshot_pkls: ' + str(len(snapshot_pkls)))

    with tf.Graph().as_default(), tfutil.create_session(
            config.tf_config).as_default():
        #Load network from specific run
        G, D, Gs, E = misc.load_pkl(snapshot_pkls[-1])

        # Take off the requirement of the generator having labels
        Ga = tfutil.Network('G_anomaly',
                            num_channels=Gs.output_shapes[0][1],
                            resolution=Gs.output_shapes[0][2],
                            label_size=dataset_obj.label_size,
                            **config.G_anomaly)
        Ga.copy_vars_from(Gs)

        print("Initializing Anomaly detector")
        anoGAN = tfutil.AnomalyDetectorEncoder(config,
                                               Ga,
                                               E,
                                               test_data_folder,
                                               test_batch_size=test_batch_size)
        print('# AnoGAN test data names: ' + str(len(anoGAN.test_data_names)))

        for batch in range(anoGAN.filename_batches.__len__()):
            if batch < start_at_batch:
                continue
            test_data = anoGAN.preprocess_img(anoGAN.filename_batches[batch])
            test_input = test_data
            test_name = anoGAN.filename_batches[batch]
            anoGAN.find_closest_match(test_input, test_name)
            print(f'Batch {batch} complete..')
def evaluate_metrics(run_id, log, metrics, num_images, real_passes, minibatch_size=None):
    metric_class_names = {
        'swd':      'metrics.sliced_wasserstein.API',
        'fid':      'metrics.frechet_inception_distance.API',
        'is':       'metrics.inception_score.API',
        'msssim':   'metrics.ms_ssim.API',
    }

    # Locate training run and initialize logging.
    result_subdir = misc.locate_result_subdir(run_id)
    snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False)
    assert len(snapshot_pkls) >= 1
    log_file = os.path.join(result_subdir, log)
    print('Logging output to', log_file)
    misc.set_output_log_file(log_file)

    # Initialize dataset and select minibatch size.
    dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(result_subdir, verbose=True, shuffle_mb=0)
    if minibatch_size is None:
        minibatch_size = np.clip(8192 // dataset_obj.shape[1], 4, 256)

    # Initialize metrics.
    metric_objs = []
    for name in metrics:
        class_name = metric_class_names.get(name, name)
        print('Initializing %s...' % class_name)
        class_def = tfutil.import_obj(class_name)
        image_shape = [3] + dataset_obj.shape[1:]
        obj = class_def(num_images=num_images, image_shape=image_shape, image_dtype=np.uint8, minibatch_size=minibatch_size)
        tfutil.init_uninited_vars()
        mode = 'warmup'
        obj.begin(mode)
        for idx in range(10):
            obj.feed(mode, np.random.randint(0, 256, size=[minibatch_size]+image_shape, dtype=np.uint8))
        obj.end(mode)
        metric_objs.append(obj)

    # Print table header.
    print()
    print('%-10s%-12s' % ('Snapshot', 'Time_eval'), end='')
    for obj in metric_objs:
        for name, fmt in zip(obj.get_metric_names(), obj.get_metric_formatting()):
            print('%-*s' % (len(fmt % 0), name), end='')
    print()
    print('%-10s%-12s' % ('---', '---'), end='')
    for obj in metric_objs:
        for fmt in obj.get_metric_formatting():
            print('%-*s' % (len(fmt % 0), '---'), end='')
    print()

    # Feed in reals.
    for title, mode in [('Reals', 'reals'), ('Reals2', 'fakes')][:real_passes]:
        print('%-10s' % title, end='')
        time_begin = time.time()
        labels = np.zeros([num_images, dataset_obj.label_size], dtype=np.float32)
        [obj.begin(mode) for obj in metric_objs]
        for begin in range(0, num_images, minibatch_size):
            end = min(begin + minibatch_size, num_images)
            images, labels[begin:end] = dataset_obj.get_minibatch_np(end - begin)
            if mirror_augment:
                images = misc.apply_mirror_augment(images)
            if images.shape[1] == 1:
                images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB
            [obj.feed(mode, images) for obj in metric_objs]
        results = [obj.end(mode) for obj in metric_objs]
        print('%-12s' % misc.format_time(time.time() - time_begin), end='')
        for obj, vals in zip(metric_objs, results):
            for val, fmt in zip(vals, obj.get_metric_formatting()):
                print(fmt % val, end='')
        print()

    # Evaluate each network snapshot.
    for snapshot_idx, snapshot_pkl in enumerate(reversed(snapshot_pkls)):
        prefix = 'network-snapshot-'; postfix = '.pkl'
        snapshot_name = os.path.basename(snapshot_pkl)
        assert snapshot_name.startswith(prefix) and snapshot_name.endswith(postfix)
        snapshot_kimg = int(snapshot_name[len(prefix) : -len(postfix)])

        print('%-10d' % snapshot_kimg, end='')
        mode ='fakes'
        [obj.begin(mode) for obj in metric_objs]
        time_begin = time.time()
        with tf.Graph().as_default(), tfutil.create_session(config.tf_config).as_default():
            G, D, Gs = misc.load_pkl(snapshot_pkl)
            for begin in range(0, num_images, minibatch_size):
                end = min(begin + minibatch_size, num_images)
                latents = misc.random_latents(end - begin, Gs)
                images = Gs.run(latents, labels[begin:end], num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_dtype=np.uint8)
                if images.shape[1] == 1:
                    images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB
                [obj.feed(mode, images) for obj in metric_objs]
        results = [obj.end(mode) for obj in metric_objs]
        print('%-12s' % misc.format_time(time.time() - time_begin), end='')
        for obj, vals in zip(metric_objs, results):
            for val, fmt in zip(vals, obj.get_metric_formatting()):
                print(fmt % val, end='')
        print()
    print()
def generate_training_video(
    run_id,
    duration_sec=20.0,
    time_warp=1.5,
    mp4=None,
    mp4_fps=30,
    mp4_codec="libx265",
    mp4_bitrate="16M",
):
    src_result_subdir = misc.locate_result_subdir(run_id)
    if mp4 is None:
        mp4 = os.path.basename(src_result_subdir) + "-train.mp4"

    # Parse log.
    times = []
    snaps = []  # [(png, kimg, lod), ...]
    with open(os.path.join(src_result_subdir, "log.txt"), "rt") as log:
        for line in log:
            k = re.search(r"kimg ([\d\.]+) ", line)
            l = re.search(r"lod ([\d\.]+) ", line)
            t = re.search(r"time (\d+d)? *(\d+h)? *(\d+m)? *(\d+s)? ", line)
            if k and l and t:
                k = float(k.group(1))
                l = float(l.group(1))
                t = [
                    int(t.group(i)[:-1]) if t.group(i) else 0
                    for i in range(1, 5)
                ]
                t = t[0] * 24 * 60 * 60 + t[1] * 60 * 60 + t[2] * 60 + t[3]
                png = os.path.join(src_result_subdir,
                                   "fakes%06d.png" % int(np.floor(k)))
                if os.path.isfile(png):
                    times.append(t)
                    snaps.append((png, k, l))
    assert len(times)

    # Frame generation func for moviepy.
    png_cache = [None, None]  # [png, img]

    def make_frame(t):
        wallclock = ((t / duration_sec)**time_warp) * times[-1]
        png, kimg, lod = snaps[max(bisect.bisect(times, wallclock) - 1, 0)]
        if png_cache[0] == png:
            img = png_cache[1]
        else:
            img = scipy.misc.imread(png)
            while img.shape[1] > 1920 or img.shape[0] > 1080:
                img = (img.astype(np.float32).reshape(img.shape[0] // 2, 2,
                                                      img.shape[1] // 2, 2,
                                                      -1).mean(axis=(1, 3)))
            png_cache[:] = [png, img]
        img = misc.draw_text_label(img,
                                   "lod %.2f" % lod,
                                   16,
                                   img.shape[0] - 4,
                                   alignx=0.0,
                                   aligny=1.0)
        img = misc.draw_text_label(
            img,
            misc.format_time(int(np.rint(wallclock))),
            img.shape[1] // 2,
            img.shape[0] - 4,
            alignx=0.5,
            aligny=1.0,
        )
        img = misc.draw_text_label(
            img,
            "%.0f kimg" % kimg,
            img.shape[1] - 16,
            img.shape[0] - 4,
            alignx=1.0,
            aligny=1.0,
        )
        return img

    # Generate video.
    import moviepy.editor  # pip install moviepy

    result_subdir = misc.create_result_subdir(config.result_dir, config.desc)
    moviepy.editor.VideoClip(make_frame,
                             duration=duration_sec).write_videofile(
                                 os.path.join(result_subdir, mp4),
                                 fps=mp4_fps,
                                 codec="libx264",
                                 bitrate=mp4_bitrate,
                             )
    open(os.path.join(result_subdir, "_done.txt"), "wt").close()
def evaluate_metrics_swd_distributions_training_trad_prog(
        run_id,
        network_dir_conv,
        network_dir_prog,
        log,
        metrics,
        num_images_per_group,
        num_groups,
        real_passes,
        minibatch_size=None):
    metric_class_names = {
        'swd_distri_training_trad_prog':
        'metrics.swd_distributions_training_trad_prog.API',
    }
    # Locate training run and initialize logging.
    result_subdir = misc.locate_result_subdir(run_id)
    log_file = os.path.join(result_subdir, log)
    print('Logging output to', log_file)
    misc.set_output_log_file(log_file)

    # Initialize dataset and select minibatch size.
    dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(
        result_subdir, verbose=True, shuffle_mb=0)

    # Initialize metrics.
    metric_objs = []
    for name in metrics:
        class_name = metric_class_names.get(name, name)
        print('Initializing %s...' % class_name)
        class_def = tfutil.import_obj(class_name)
        image_shape = [3] + dataset_obj.shape[1:]
        obj = class_def(image_shape=image_shape, image_dtype=np.uint8)
        tfutil.init_uninited_vars()
        metric_objs.append(obj)

        mode = 'fakes'
        [obj.begin(mode) for obj in metric_objs]
        images_real, labels = dataset_obj.get_minibatch_np(
            num_groups * num_images_per_group)

        with tf.Graph().as_default(), tfutil.create_session(
                config.tf_config).as_default():
            G, D, Gs = misc.load_pkl(network_dir_conv)
            #G, D, Gs = pickle.load(file)
            latents = misc.random_latents(num_groups * num_images_per_group,
                                          Gs)
            images = images_real
            for k in range(
                    10
            ):  # because Gs can not generate lots of (>3000 around) images at one time. Make sure /10 = int
                nn = int(num_groups * num_images_per_group / 10)
                images_fake = Gs.run(latents[k * nn:(k + 1) * nn],
                                     labels[k * nn:(k + 1) * nn],
                                     num_gpus=config.num_gpus,
                                     out_mul=127.5,
                                     out_add=127.5,
                                     out_dtype=np.uint8)
                images = np.concatenate((images, images_fake), axis=0)

        with tf.Graph().as_default(), tfutil.create_session(
                config.tf_config).as_default():
            G, D, Gs = misc.load_pkl(network_dir_prog)
            #  G, D, Gs = pickle.load(file)
            latents = misc.random_latents(num_groups * num_images_per_group,
                                          Gs)
            for k in range(
                    10
            ):  # because Gs can not generate lots of (>3000 around) images at one time. Make sure /10 = int
                nn = int(num_groups * num_images_per_group / 10)
                images_fake = Gs.run(latents[k * nn:(k + 1) * nn],
                                     labels[k * nn:(k + 1) * nn],
                                     num_gpus=config.num_gpus,
                                     out_mul=127.5,
                                     out_add=127.5,
                                     out_dtype=np.uint8)
                images = np.concatenate((images, images_fake), axis=0)

        if images.shape[1] == 1:
            images = np.tile(images, [1, 3, 1, 1])  # grayscale => RGB
        [
            obj.feed(mode, images, num_images_per_group, num_groups,
                     result_subdir) for obj in metric_objs
        ]
def evaluate_metrics_swd_distributions(run_id,
                                       log,
                                       metrics,
                                       num_images_per_group,
                                       num_groups,
                                       real_passes,
                                       minibatch_size=None):
    metric_class_names = {
        'swd_distri': 'metrics.swd_distributions.API',
    }
    # Locate training run and initialize logging.
    result_subdir = misc.locate_result_subdir(run_id)
    snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False)
    assert len(snapshot_pkls) >= 1
    log_file = os.path.join(result_subdir, log)
    print('Logging output to', log_file)
    misc.set_output_log_file(log_file)

    # Initialize dataset and select minibatch size.
    dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(
        result_subdir, verbose=True, shuffle_mb=0)

    # Initialize metrics.
    metric_objs = []
    for name in metrics:
        class_name = metric_class_names.get(name, name)
        print('Initializing %s...' % class_name)
        class_def = tfutil.import_obj(class_name)
        image_shape = [3] + dataset_obj.shape[1:]
        obj = class_def(image_shape=image_shape, image_dtype=np.uint8)
        tfutil.init_uninited_vars()
        metric_objs.append(obj)

    # Evaluate each network snapshot.
    for snapshot_idx, snapshot_pkl in enumerate(reversed(snapshot_pkls)):
        prefix = 'network-snapshot-'
        postfix = '.pkl'
        snapshot_name = os.path.basename(snapshot_pkl)
        assert snapshot_name.startswith(prefix) and snapshot_name.endswith(
            postfix)
        snapshot_kimg = int(snapshot_name[len(prefix):-len(postfix)])

        print('%-10d' % snapshot_kimg, end='')
        mode = 'fakes'
        [obj.begin(mode) for obj in metric_objs]

        images_real, labels = dataset_obj.get_minibatch_np(
            num_groups * num_images_per_group)

        with tf.Graph().as_default(), tfutil.create_session(
                config.tf_config).as_default():
            G, D, Gs = misc.load_pkl(snapshot_pkl)

            latents = misc.random_latents(num_groups * num_images_per_group,
                                          Gs)
            images = images_real
            for k in range(
                    10
            ):  # because Gs can not generate lots of (>3000 around) images at one time. Make sure /10 = int
                nn = int(num_groups * num_images_per_group / 10)
                images_fake = Gs.run(latents[k * nn:(k + 1) * nn],
                                     labels[k * nn:(k + 1) * nn],
                                     num_gpus=config.num_gpus,
                                     out_mul=127.5,
                                     out_add=127.5,
                                     out_dtype=np.uint8)
                images = np.concatenate((images, images_fake), axis=0)

            if images.shape[1] == 1:
                images = np.tile(images, [1, 3, 1, 1])  # grayscale => RGB
            [
                obj.feed(mode, images, num_images_per_group, num_groups,
                         snapshot_kimg, result_subdir) for obj in metric_objs
            ]
def anomaly_detection_encoder(run_id,
                              log,
                              test_data_folder,
                              test_batch_size=64,
                              n_samples=1000):

    result_subdir = misc.locate_result_subdir(run_id)
    snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False)
    print('# snapshot_pkls: ' + str(len(snapshot_pkls)))

    for idx in range(0, n_samples, test_batch_size):

        with tf.Graph().as_default(), tfutil.create_session(
                config.tf_config).as_default():
            #Load network from specific run
            G, D, Gs, E = misc.load_pkl(snapshot_pkls[-1])
            print(snapshot_pkls[-1])

            dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(
                result_subdir, verbose=True, shuffle_mb=0)

            Ga = tfutil.Network('G_anomaly',
                                num_channels=G.output_shapes[0][1],
                                resolution=G.output_shapes[0][2],
                                label_size=dataset_obj.label_size,
                                **config.G_anomaly)
            Ga.copy_vars_from(Gs)

            Da_Gout = tfutil.Network('D_anomaly_Gout',
                                     num_channels=G.output_shapes[0][1],
                                     resolution=G.output_shapes[0][2],
                                     label_size=dataset_obj.label_size,
                                     images_in=Ga.output_templates[0],
                                     **config.D_anomaly_Gout)
            image_dims = [
                G.output_shapes[0][1], G.output_shapes[0][2],
                G.output_shapes[0][3]
            ]
            Da_test = tfutil.Network('D_anomaly_test',
                                     num_channels=G.output_shapes[0][1],
                                     resolution=G.output_shapes[0][2],
                                     label_size=dataset_obj.label_size,
                                     **config.D_anomaly_test)

            Da_Gout.copy_vars_from(D)
            Da_test.copy_vars_from(D)

            Da_Gout.print_layers()
            Da_test.print_layers()
            E.print_layers()

            print("Initializing Anomaly detector")
            anoGAN = tfutil.AnomalyDetectorEncoder(config, Ga, Da_Gout,
                                                   Da_test, E,
                                                   test_data_folder)
            print('# AnoGAN test data names: ' +
                  str(len(anoGAN.test_data_names)))
            assert len(anoGAN.test_data_names) > 0

            test_input = anoGAN.test_data[idx:idx + test_batch_size]
            test_name = anoGAN.test_data_names[idx:idx + test_batch_size]
            anoGAN.find_closest_match(test_input, test_name)
    tf.reset_default_graph()
def evaluate_metrics(run_id, log, metrics, num_images, real_passes, minibatch_size=None):
    metric_class_names = {
        'swd':      'metrics.sliced_wasserstein.API',
        'fid':      'metrics.frechet_inception_distance.API',
        'is':       'metrics.inception_score.API',
        'msssim':   'metrics.ms_ssim.API',
    }

    # Locate training run and initialize logging.
    result_subdir = misc.locate_result_subdir(run_id)
    snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False)
    assert len(snapshot_pkls) >= 1
    log_file = os.path.join(result_subdir, log)
    print('Logging output to', log_file)
    misc.set_output_log_file(log_file)

    # Initialize dataset and select minibatch size.
    dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(result_subdir, verbose=True, shuffle_mb=0)
    if minibatch_size is None:
        minibatch_size = np.clip(8192 // dataset_obj.shape[1], 4, 256)

    # Initialize metrics.
    metric_objs = []
    for name in metrics:
        class_name = metric_class_names.get(name, name)
        print('Initializing %s...' % class_name)
        class_def = tfutil.import_obj(class_name)
        image_shape = [3] + dataset_obj.shape[1:]
        obj = class_def(num_images=num_images, image_shape=image_shape, image_dtype=np.uint8, minibatch_size=minibatch_size)
        tfutil.init_uninited_vars()
        mode = 'warmup'
        obj.begin(mode)
        for idx in range(10):
            obj.feed(mode, np.random.randint(0, 256, size=[minibatch_size]+image_shape, dtype=np.uint8))
        obj.end(mode)
        metric_objs.append(obj)

    # Print table header.
    print()
    print('%-10s%-12s' % ('Snapshot', 'Time_eval'), end='')
    for obj in metric_objs:
        for name, fmt in zip(obj.get_metric_names(), obj.get_metric_formatting()):
            print('%-*s' % (len(fmt % 0), name), end='')
    print()
    print('%-10s%-12s' % ('---', '---'), end='')
    for obj in metric_objs:
        for fmt in obj.get_metric_formatting():
            print('%-*s' % (len(fmt % 0), '---'), end='')
    print()

    # Feed in reals.
    for title, mode in [('Reals', 'reals'), ('Reals2', 'fakes')][:real_passes]:
        print('%-10s' % title, end='')
        time_begin = time.time()
        labels = np.zeros([num_images, dataset_obj.label_size], dtype=np.float32)
        [obj.begin(mode) for obj in metric_objs]
        for begin in range(0, num_images, minibatch_size):
            end = min(begin + minibatch_size, num_images)
            images, labels[begin:end] = dataset_obj.get_minibatch_np(end - begin)
            if mirror_augment:
                images = misc.apply_mirror_augment(images)
            if images.shape[1] == 1:
                images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB
            [obj.feed(mode, images) for obj in metric_objs]
        results = [obj.end(mode) for obj in metric_objs]
        print('%-12s' % misc.format_time(time.time() - time_begin), end='')
        for obj, vals in zip(metric_objs, results):
            for val, fmt in zip(vals, obj.get_metric_formatting()):
                print(fmt % val, end='')
        print()

    # Evaluate each network snapshot.
    for snapshot_idx, snapshot_pkl in enumerate(reversed(snapshot_pkls)):
        prefix = 'network-snapshot-'; postfix = '.pkl'
        snapshot_name = os.path.basename(snapshot_pkl)
        assert snapshot_name.startswith(prefix) and snapshot_name.endswith(postfix)
        snapshot_kimg = int(snapshot_name[len(prefix) : -len(postfix)])

        print('%-10d' % snapshot_kimg, end='')
        mode ='fakes'
        [obj.begin(mode) for obj in metric_objs]
        time_begin = time.time()
        with tf.Graph().as_default(), tfutil.create_session(config.tf_config).as_default():
            G, D, Gs = misc.load_pkl(snapshot_pkl)
            for begin in range(0, num_images, minibatch_size):
                end = min(begin + minibatch_size, num_images)
                latents = misc.random_latents(end - begin, Gs)
                images = Gs.run(latents, labels[begin:end], num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_dtype=np.uint8)
                if images.shape[1] == 1:
                    images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB
                [obj.feed(mode, images) for obj in metric_objs]
        results = [obj.end(mode) for obj in metric_objs]
        print('%-12s' % misc.format_time(time.time() - time_begin), end='')
        for obj, vals in zip(metric_objs, results):
            for val, fmt in zip(vals, obj.get_metric_formatting()):
                print(fmt % val, end='')
        print()
    print()