def anomaly_detection_encoder(run_id, test_data_folder, log, test_batch_size=10, start_at_batch=0, end_at_batch=10): result_subdir = misc.locate_result_subdir(run_id) snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=True) dataset_obj, mirror_augment = misc.load_dataset_for_previous_run( result_subdir, verbose=True, shuffle_mb=0) print('# snapshot_pkls: ' + str(len(snapshot_pkls))) with tf.Graph().as_default(), tfutil.create_session( config.tf_config).as_default(): #Load network from specific run G, D, Gs, E = misc.load_pkl(snapshot_pkls[-1]) # Take off the requirement of the generator having labels Ga = tfutil.Network('G_anomaly', num_channels=Gs.output_shapes[0][1], resolution=Gs.output_shapes[0][2], label_size=dataset_obj.label_size, **config.G_anomaly) Ga.copy_vars_from(Gs) print("Initializing Anomaly detector") anoGAN = tfutil.AnomalyDetectorEncoder(config, Ga, E, test_data_folder, test_batch_size=test_batch_size) print('# AnoGAN test data names: ' + str(len(anoGAN.test_data_names))) for batch in range(anoGAN.filename_batches.__len__()): if batch < start_at_batch: continue test_data = anoGAN.preprocess_img(anoGAN.filename_batches[batch]) test_input = test_data test_name = anoGAN.filename_batches[batch] anoGAN.find_closest_match(test_input, test_name) print(f'Batch {batch} complete..')
def evaluate_metrics(run_id, log, metrics, num_images, real_passes, minibatch_size=None): metric_class_names = { 'swd': 'metrics.sliced_wasserstein.API', 'fid': 'metrics.frechet_inception_distance.API', 'is': 'metrics.inception_score.API', 'msssim': 'metrics.ms_ssim.API', } # Locate training run and initialize logging. result_subdir = misc.locate_result_subdir(run_id) snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False) assert len(snapshot_pkls) >= 1 log_file = os.path.join(result_subdir, log) print('Logging output to', log_file) misc.set_output_log_file(log_file) # Initialize dataset and select minibatch size. dataset_obj, mirror_augment = misc.load_dataset_for_previous_run(result_subdir, verbose=True, shuffle_mb=0) if minibatch_size is None: minibatch_size = np.clip(8192 // dataset_obj.shape[1], 4, 256) # Initialize metrics. metric_objs = [] for name in metrics: class_name = metric_class_names.get(name, name) print('Initializing %s...' % class_name) class_def = tfutil.import_obj(class_name) image_shape = [3] + dataset_obj.shape[1:] obj = class_def(num_images=num_images, image_shape=image_shape, image_dtype=np.uint8, minibatch_size=minibatch_size) tfutil.init_uninited_vars() mode = 'warmup' obj.begin(mode) for idx in range(10): obj.feed(mode, np.random.randint(0, 256, size=[minibatch_size]+image_shape, dtype=np.uint8)) obj.end(mode) metric_objs.append(obj) # Print table header. print() print('%-10s%-12s' % ('Snapshot', 'Time_eval'), end='') for obj in metric_objs: for name, fmt in zip(obj.get_metric_names(), obj.get_metric_formatting()): print('%-*s' % (len(fmt % 0), name), end='') print() print('%-10s%-12s' % ('---', '---'), end='') for obj in metric_objs: for fmt in obj.get_metric_formatting(): print('%-*s' % (len(fmt % 0), '---'), end='') print() # Feed in reals. for title, mode in [('Reals', 'reals'), ('Reals2', 'fakes')][:real_passes]: print('%-10s' % title, end='') time_begin = time.time() labels = np.zeros([num_images, dataset_obj.label_size], dtype=np.float32) [obj.begin(mode) for obj in metric_objs] for begin in range(0, num_images, minibatch_size): end = min(begin + minibatch_size, num_images) images, labels[begin:end] = dataset_obj.get_minibatch_np(end - begin) if mirror_augment: images = misc.apply_mirror_augment(images) if images.shape[1] == 1: images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB [obj.feed(mode, images) for obj in metric_objs] results = [obj.end(mode) for obj in metric_objs] print('%-12s' % misc.format_time(time.time() - time_begin), end='') for obj, vals in zip(metric_objs, results): for val, fmt in zip(vals, obj.get_metric_formatting()): print(fmt % val, end='') print() # Evaluate each network snapshot. for snapshot_idx, snapshot_pkl in enumerate(reversed(snapshot_pkls)): prefix = 'network-snapshot-'; postfix = '.pkl' snapshot_name = os.path.basename(snapshot_pkl) assert snapshot_name.startswith(prefix) and snapshot_name.endswith(postfix) snapshot_kimg = int(snapshot_name[len(prefix) : -len(postfix)]) print('%-10d' % snapshot_kimg, end='') mode ='fakes' [obj.begin(mode) for obj in metric_objs] time_begin = time.time() with tf.Graph().as_default(), tfutil.create_session(config.tf_config).as_default(): G, D, Gs = misc.load_pkl(snapshot_pkl) for begin in range(0, num_images, minibatch_size): end = min(begin + minibatch_size, num_images) latents = misc.random_latents(end - begin, Gs) images = Gs.run(latents, labels[begin:end], num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_dtype=np.uint8) if images.shape[1] == 1: images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB [obj.feed(mode, images) for obj in metric_objs] results = [obj.end(mode) for obj in metric_objs] print('%-12s' % misc.format_time(time.time() - time_begin), end='') for obj, vals in zip(metric_objs, results): for val, fmt in zip(vals, obj.get_metric_formatting()): print(fmt % val, end='') print() print()
def anomaly_detection_encoder(run_id, log, test_data_folder, test_batch_size=64, n_samples=1000): result_subdir = misc.locate_result_subdir(run_id) snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False) print('# snapshot_pkls: ' + str(len(snapshot_pkls))) for idx in range(0, n_samples, test_batch_size): with tf.Graph().as_default(), tfutil.create_session( config.tf_config).as_default(): #Load network from specific run G, D, Gs, E = misc.load_pkl(snapshot_pkls[-1]) print(snapshot_pkls[-1]) dataset_obj, mirror_augment = misc.load_dataset_for_previous_run( result_subdir, verbose=True, shuffle_mb=0) Ga = tfutil.Network('G_anomaly', num_channels=G.output_shapes[0][1], resolution=G.output_shapes[0][2], label_size=dataset_obj.label_size, **config.G_anomaly) Ga.copy_vars_from(Gs) Da_Gout = tfutil.Network('D_anomaly_Gout', num_channels=G.output_shapes[0][1], resolution=G.output_shapes[0][2], label_size=dataset_obj.label_size, images_in=Ga.output_templates[0], **config.D_anomaly_Gout) image_dims = [ G.output_shapes[0][1], G.output_shapes[0][2], G.output_shapes[0][3] ] Da_test = tfutil.Network('D_anomaly_test', num_channels=G.output_shapes[0][1], resolution=G.output_shapes[0][2], label_size=dataset_obj.label_size, **config.D_anomaly_test) Da_Gout.copy_vars_from(D) Da_test.copy_vars_from(D) Da_Gout.print_layers() Da_test.print_layers() E.print_layers() print("Initializing Anomaly detector") anoGAN = tfutil.AnomalyDetectorEncoder(config, Ga, Da_Gout, Da_test, E, test_data_folder) print('# AnoGAN test data names: ' + str(len(anoGAN.test_data_names))) assert len(anoGAN.test_data_names) > 0 test_input = anoGAN.test_data[idx:idx + test_batch_size] test_name = anoGAN.test_data_names[idx:idx + test_batch_size] anoGAN.find_closest_match(test_input, test_name) tf.reset_default_graph()
def evaluate_metrics_swd_distributions(run_id, log, metrics, num_images_per_group, num_groups, real_passes, minibatch_size=None): metric_class_names = { 'swd_distri': 'metrics.swd_distributions.API', } # Locate training run and initialize logging. result_subdir = misc.locate_result_subdir(run_id) snapshot_pkls = misc.list_network_pkls(result_subdir, include_final=False) assert len(snapshot_pkls) >= 1 log_file = os.path.join(result_subdir, log) print('Logging output to', log_file) misc.set_output_log_file(log_file) # Initialize dataset and select minibatch size. dataset_obj, mirror_augment = misc.load_dataset_for_previous_run( result_subdir, verbose=True, shuffle_mb=0) # Initialize metrics. metric_objs = [] for name in metrics: class_name = metric_class_names.get(name, name) print('Initializing %s...' % class_name) class_def = tfutil.import_obj(class_name) image_shape = [3] + dataset_obj.shape[1:] obj = class_def(image_shape=image_shape, image_dtype=np.uint8) tfutil.init_uninited_vars() metric_objs.append(obj) # Evaluate each network snapshot. for snapshot_idx, snapshot_pkl in enumerate(reversed(snapshot_pkls)): prefix = 'network-snapshot-' postfix = '.pkl' snapshot_name = os.path.basename(snapshot_pkl) assert snapshot_name.startswith(prefix) and snapshot_name.endswith( postfix) snapshot_kimg = int(snapshot_name[len(prefix):-len(postfix)]) print('%-10d' % snapshot_kimg, end='') mode = 'fakes' [obj.begin(mode) for obj in metric_objs] images_real, labels = dataset_obj.get_minibatch_np( num_groups * num_images_per_group) with tf.Graph().as_default(), tfutil.create_session( config.tf_config).as_default(): G, D, Gs = misc.load_pkl(snapshot_pkl) latents = misc.random_latents(num_groups * num_images_per_group, Gs) images = images_real for k in range( 10 ): # because Gs can not generate lots of (>3000 around) images at one time. Make sure /10 = int nn = int(num_groups * num_images_per_group / 10) images_fake = Gs.run(latents[k * nn:(k + 1) * nn], labels[k * nn:(k + 1) * nn], num_gpus=config.num_gpus, out_mul=127.5, out_add=127.5, out_dtype=np.uint8) images = np.concatenate((images, images_fake), axis=0) if images.shape[1] == 1: images = np.tile(images, [1, 3, 1, 1]) # grayscale => RGB [ obj.feed(mode, images, num_images_per_group, num_groups, snapshot_kimg, result_subdir) for obj in metric_objs ]