コード例 #1
0
def foe_collect_rec_images(classifier, select_layers=None, select_images=None, rescaled=False):
    # makes one [layers, imgs, h, w, c] mat for all rec3500 images
    tgt_paths = subset10_paths(classifier)
    _, img_hw, layer_names = classifier_stats(classifier)
    log_path = '../logs/opt_inversion/{}/image_rec/'.format(classifier)
    layer_subdirs = select_layers or [l.replace('/', '_') for l in layer_names]
    img_subdirs = select_images or [p.split('/')[-1].split('.')[0] for p in tgt_paths]

    # tgt_images = [np.expand_dims(load_image(p), axis=0) for p in tgt_paths]
    scale_subdir = 'imgs_rescaled' if rescaled else 'imgs'
    rec_filename = 'full512/{}/rec_10000.png'.format(scale_subdir)
    img_list = []
    for layer_subdir in layer_subdirs:
        layer_log_path = '{}{}/'.format(log_path, layer_subdir)
        layer_list = []
        for idx, img_subdir in enumerate(img_subdirs):
            img_log_path = '{}{}/'.format(layer_log_path, img_subdir)
            rec_image = load_image(img_log_path + rec_filename)
            if rec_image.shape[0] == 1:
                rec_image = np.squeeze(rec_image, axis=0)
            layer_list.append(rec_image)

        img_list.append(layer_list)

    return np.asarray(img_list)
コード例 #2
0
def run_mv_scripts(classifier, custom_layers=None):
    _, img_hw, layer_names = classifier_stats(classifier)
    layer_names = custom_layers or layer_names
    img_paths = subset10_paths(classifier)
    # img_paths = img_paths[3:]  # to continue previous run
    for img_path in img_paths:
        for layer_name in layer_names:
            layer_subdir = layer_name.replace('/', '_')
            img_subdir = img_path.split('/')[-1].split('.')[0]
            log_path = '../logs/mahendran_vedaldi/2016/{}/{}/{}/'.format(classifier, layer_subdir, img_subdir)
            mv_script_fun(layer_name, img_path, log_path, classifier)
コード例 #3
0
def inv_mse_and_vgg_scores(classifier):
    tgt_paths = subset10_paths(classifier)
    _, img_hw, layer_names = classifier_stats(classifier)
    log_path = '../logs/opt_inversion/{}/image_rec/'.format(classifier)
    layer_subdirs = [n.replace('/', '_') for n in layer_names]
    img_subdirs = ['val{}'.format(i) for i in selected_img_ids()]

    tgt_images = [np.expand_dims(load_image(p), axis=0) for p in tgt_paths]
    rec_filename = 'imgs/rec_5000.png'

    vgg_loss = VggScoreLoss(('tgt_224:0', 'rec_224:0'), weighting=1.0, name=None, input_scaling=1.0)
    mse_loss = MSELoss('tgt_pl:0', 'rec_pl:0')
    nmse_loss = NormedMSELoss('tgt_pl:0', 'rec_pl:0')
    loss_mods = [vgg_loss, mse_loss, nmse_loss]

    found_layers = []
    score_list = []

    with tf.Graph().as_default():
        tgt_pl = tf.placeholder(dtype=tf.float32, shape=(1, img_hw, img_hw, 3), name='tgt_pl')
        rec_pl = tf.placeholder(dtype=tf.float32, shape=(1, img_hw, img_hw, 3), name='rec_pl')
        _ = tf.slice(tgt_pl, begin=[0, 0, 0, 0], size=[-1, 224, 224, -1], name='tgt_224')
        _ = tf.slice(rec_pl, begin=[0, 0, 0, 0], size=[-1, 224, 224, -1], name='rec_224')

        for lmod in loss_mods:
            lmod.build()
        loss_tsr_list = [m.get_loss() for m in loss_mods]

        with tf.Session() as sess:

            for layer_subdir in layer_subdirs:
                layer_log_path = '{}{}/imgs/'.format(log_path, layer_subdir)
                if not os.path.exists(layer_log_path):
                    continue
                found_layers.append(layer_subdir)
                layer_score_list = []

                for idx, img_subdir in enumerate(img_subdirs):
                    img_log_path = '{}{}/'.format(layer_log_path, img_subdir)
                    rec_image = np.expand_dims(load_image(img_log_path + rec_filename), axis=0)
                    if np.max(rec_image) < 2.:
                        rec_image = rec_image * 255.
                    scores = sess.run(loss_tsr_list, feed_dict={tgt_pl: tgt_images[idx], rec_pl: rec_image})
                    layer_score_list.append(scores)
                score_list.append(layer_score_list)

    score_mat = np.asarray(score_list)
    print(score_mat.shape)
    print(found_layers)
    np.save('{}score_mat.npy'.format(log_path), score_mat)
コード例 #4
0
def mv_plot_mats(classifier):
    _, img_hw, layer_names = classifier_stats(classifier)
    img_paths = subset10_paths(classifier)
    img_paths = img_paths[3:]  # to continue previous run
    log_points = (1750, 3500)
    for img_path in img_paths:
        for layer_name in layer_names:
            layer_subdir = layer_name.replace('/', '_')
            img_subdir = img_path.split('/')[-1].split('.')[0]
            log_path = '../logs/mahendran_vedaldi/2016/{}/{}/{}/'.format(classifier, layer_subdir, img_subdir)
            if not os.path.exists(log_path + 'imgs/'):
                os.makedirs(log_path + 'imgs/')
            for log_point in log_points:
                img_mat = np.load('{}mats/rec_{}.npy'.format(log_path, log_point))
                skimage.io.imsave('{}imgs/rec_{}.png'.format(log_path, log_point), img_mat)
コード例 #5
0
def run_stacked_module(classifier,
                       start_layer,
                       rec_layer,
                       use_solotrain=False,
                       subdir_name=None,
                       retrieve_special=None):

    subdir_name = subdir_name or '{}_stack_{}_to_{}'.format(
        classifier, start_layer, rec_layer)
    alt_load_subdir = 'solotrain' if use_solotrain else subdir_name
    module_list = get_stacked_module(classifier,
                                     start_layer,
                                     rec_layer,
                                     alt_load_subdir=alt_load_subdir,
                                     subdir_name=subdir_name,
                                     trainable=False)
    save_subdir = 'stacked/' if use_solotrain else 'merged/'
    log_path = cnn_inv_log_path(classifier, start_layer,
                                rec_layer) + save_subdir
    if not os.path.exists(log_path):
        os.makedirs(log_path)
    ni = NetInversion(module_list, log_path, classifier=classifier)
    img_paths = subset10_paths(classifier)
    img_mat = load_and_stack_imgs(img_paths)
    to_fetch = retrieve_special or ('{}/{}:0'.format(
        module_list[-1].name, module_list[-1].rec_name), )
    recs = ni.run_model_on_images(img_mat, to_fetch)
    for name, rec in zip(to_fetch, recs):
        print(rec.shape)
        np.save('{}cnn_rec_{}.npy'.format(log_path, name.replace('/', '_')),
                rec)

    if retrieve_special is None:
        images = [
            np.squeeze(k, axis=0) for k in np.split(
                recs[0], indices_or_sections=recs[0].shape[0], axis=0)
        ]
        images = [np.minimum(np.maximum(k, 0.), 255.) / 255. for k in images]
        image_ids = [
            p.split('/')[-1].split('.')[0][len('val'):] for p in img_paths
        ]
        image_save_paths = [
            log_path + 'img_rec_{}.png'.format(i) for i in image_ids
        ]
        for path, img in zip(image_save_paths, images):
            skimage.io.imsave(path, img)
コード例 #6
0
def mv_collect_rec_images(classifier, select_layers=None, select_images=None):
    # makes one [layers, imgs, h, w, c] mat for all rec3500 images
    tgt_paths = subset10_paths(classifier)
    _, img_hw, layer_names = classifier_stats(classifier)
    log_path = '../logs/mahendran_vedaldi/2016/{}/'.format(classifier)
    layer_subdirs = select_layers or [l.replace('/', '_') for l in layer_names]
    img_subdirs = select_images or [p.split('/')[-1].split('.')[0] for p in tgt_paths]

    # tgt_images = [np.expand_dims(load_image(p), axis=0) for p in tgt_paths]
    rec_filename = 'imgs/rec_3500.png'
    img_list = []
    for layer_subdir in layer_subdirs:
        layer_log_path = '{}{}/'.format(log_path, layer_subdir)
        layer_list = []
        for idx, img_subdir in enumerate(img_subdirs):
            img_log_path = '{}{}/'.format(layer_log_path, img_subdir)
            rec_image = load_image(img_log_path + rec_filename)
            if rec_image.shape[0] == 1:
                rec_image = np.squeeze(rec_image, axis=0)
            layer_list.append(rec_image)

        img_list.append(layer_list)

    return np.asarray(img_list)
コード例 #7
0
def db_img_mse_and_vgg_scores(classifier,
                              select_modules=None,
                              select_images=None,
                              merged=True):
    tgt_paths = subset10_paths(classifier)
    _, img_hw, layer_names = classifier_stats(classifier)
    tgt_images = [np.expand_dims(load_image(p), axis=0) for p in tgt_paths]

    _, img_hw, layer_names = classifier_stats(classifier)
    log_path = '../logs/cnn_inversion/{}/'.format(classifier)

    stack_mode = 'merged' if merged else 'stacked'
    start_module_ids = select_modules or (1, 4, 7, 8, 9)
    img_subdirs = select_images or selected_img_ids()

    vgg_loss = VggScoreLoss(('tgt_224:0', 'rec_224:0'),
                            weighting=1.0,
                            name=None,
                            input_scaling=1.0)
    mse_loss = MSELoss('tgt_pl:0', 'rec_pl:0')
    nmse_loss = NormedMSELoss('tgt_pl:0', 'rec_pl:0')
    loss_mods = [vgg_loss, mse_loss, nmse_loss]

    found_layers = []
    score_list = []

    with tf.Graph().as_default():
        tgt_pl = tf.placeholder(dtype=tf.float32,
                                shape=(1, img_hw, img_hw, 3),
                                name='tgt_pl')
        rec_pl = tf.placeholder(dtype=tf.float32,
                                shape=(1, img_hw, img_hw, 3),
                                name='rec_pl')
        _ = tf.slice(tgt_pl,
                     begin=[0, 0, 0, 0],
                     size=[-1, 224, 224, -1],
                     name='tgt_224')
        _ = tf.slice(rec_pl,
                     begin=[0, 0, 0, 0],
                     size=[-1, 224, 224, -1],
                     name='rec_224')
        for lmod in loss_mods:
            lmod.build()
        loss_tsr_list = [m.get_loss() for m in loss_mods]

        with tf.Session() as sess:

            img_list = []
            for module_id in start_module_ids:
                layer_log_path = '{}stack_{}_to_1/{}/'.format(
                    log_path, module_id, stack_mode)
                layer_list = []
                for idx, img_subdir in enumerate(img_subdirs):

                    rec_image = load_image(layer_log_path +
                                           'img_rec_{}.png'.format(img_subdir))
                    if rec_image.shape[0] != 1:
                        rec_image = np.expand_dims(rec_image, axis=0)

                    scores = sess.run(loss_tsr_list,
                                      feed_dict={
                                          tgt_pl: tgt_images[idx],
                                          rec_pl: rec_image
                                      })
                    layer_list.append(scores)

                score_list.append(layer_list)

    score_mat = np.asarray(score_list)
    print(score_mat.shape)
    print(found_layers)
    np.save('../logs/cnn_inversion/{}/score_mat.npy'.format(classifier),
            score_mat)
コード例 #8
0
def run_image_opt_inversions(classifier, prior_mode, layer_select=None, lr=1., pre_featmap_name='input',
                             do_plot=True, mse_iterations=5000, opt_iterations=5000, jitterations=3200,
                             summary_freq=50, print_freq=500, log_freq=500, grad_clip=10000., select_img=None,
                             custom_images=None):

    _, img_hw, layer_names = classifier_stats(classifier)

    if layer_select is not None:
        layer_names = [n for n in layer_names if layer_select in n]

    tgt_paths = subset10_paths(classifier)
    layer_subdirs = [n.replace('/', '_') for n in layer_names]
    img_subdirs = ['val{}'.format(i) for i in selected_img_ids()]

    if select_img is not None:
        tgt_paths = tgt_paths[select_img:select_img+1]
        img_subdirs = img_subdirs[select_img:select_img+1]

    if custom_images is not None:
        tgt_paths, img_subdirs = custom_images

    log_path = '../logs/opt_inversion/{}/image_rec/'.format(classifier)
    print(layer_names)
    for idx, layer_subdir in enumerate(layer_subdirs):
        cutoff = None  # layer_names[idx] if layer_names[idx].startswith('conv') else None
        jitter_t, weight = get_imagerec_jitter_and_prior_weight(classifier, layer_names[idx])
        print('jitter', jitter_t, 'prior_weight', weight)
        for idy, img_subdir in enumerate(img_subdirs):
            target_image = tgt_paths[idy]
            exp_log_path = '{}{}/{}/'.format(log_path, layer_subdir, img_subdir)
            if not os.path.exists(log_path):
                os.makedirs(log_path)

            lr_lower_points = ((1e+0, lr),)
            print(layer_subdir)
            split = SplitModule(name_to_split=layer_names[idx] + ':0', img_slice_name=layer_subdir + '_img',
                                rec_slice_name=layer_subdir + '_rec')
            feat_mse = MSELoss(target=layer_subdir + '_img:0', reconstruction=layer_subdir + '_rec:0',
                               name='MSE_' + layer_subdir)
            img_mse = MSELoss(target='target_featmap/read:0', reconstruction='pre_featmap/read:0',
                              name='MSE_Reconstruction')
            img_mse.add_loss = False

            modules = [split, feat_mse, img_mse]
            pure_mse_path = exp_log_path + 'pure_mse/'
            ni = NetInversion(modules, pure_mse_path, classifier=classifier, summary_freq=summary_freq,
                              print_freq=print_freq, log_freq=log_freq)

            pre_featmap_init = None
            ni.train_pre_featmap(target_image, n_iterations=mse_iterations, grad_clip=grad_clip,
                                 lr_lower_points=lr_lower_points, jitter_t=jitter_t, range_clip=False,
                                 bound_plots=True,
                                 optim_name='adam', save_as_plot=do_plot, jitter_stop_point=jitterations,
                                 pre_featmap_init=pre_featmap_init, ckpt_offset=0,
                                 pre_featmap_name=pre_featmap_name, classifier_cutoff=cutoff,
                                 featmap_names_to_plot=(), max_n_featmaps_to_plot=10)

            for mod in modules:
                if isinstance(mod, LossModule):
                    mod.reset()

            prior = get_default_prior(prior_mode, custom_weighting=weight, custom_target='pre_featmap:0')
            modules = [split, feat_mse, img_mse, prior]
            prior_path = exp_log_path + prior_mode + '/'
            ni = NetInversion(modules, prior_path, classifier=classifier, summary_freq=summary_freq,
                              print_freq=print_freq, log_freq=log_freq)
            pre_featmap_init = np.load(pure_mse_path + '/mats/rec_{}.npy'.format(mse_iterations))
            ni.train_pre_featmap(target_image, n_iterations=opt_iterations, grad_clip=grad_clip,
                                 lr_lower_points=lr_lower_points, jitter_t=jitter_t, range_clip=False,
                                 bound_plots=True,
                                 optim_name='adam', save_as_plot=do_plot,
                                 jitter_stop_point=mse_iterations + jitterations,
                                 pre_featmap_init=pre_featmap_init, ckpt_offset=mse_iterations,
                                 pre_featmap_name=pre_featmap_name, classifier_cutoff=cutoff,
                                 featmap_names_to_plot=(), max_n_featmaps_to_plot=10)