Ejemplo n.º 1
0
def make_vis_T(
    model,
    objective_per_image,
    objective_additional_global,
    param_f=None,
    optimizer=None,
    transforms=None,
    relu_gradient_override=False,
):
    """Even more flexible optimization-base feature vis.

    This function is the inner core of render_vis(), and can be used
    when render_vis() isn't flexible enough. Unfortunately, it's a bit more
    tedious to use:

    >    with tf.Graph().as_default() as graph, tf.Session() as sess:
    >
    >        T = make_vis_T(model, "mixed4a_pre_relu:0")
    >        tf.initialize_all_variables().run()
    >
    >        for i in range(10):
    >            T("vis_op").run()
    >            showarray(T("input").eval()[0])

    This approach allows more control over how the visualizaiton is displayed
    as it renders. It also allows a lot more flexibility in constructing
    objectives / params because the session is already in scope.


    Args:
        model: The model to be visualized, from Alex' modelzoo.
        objective_f: The objective our visualization maximizes.
            See the objectives module for more details.
        param_f: Paramaterization of the image we're optimizing.
            See the paramaterization module for more details.
            Defaults to a naively paramaterized [1, 128, 128, 3] image.
        optimizer: Optimizer to optimize with. Either tf.train.Optimizer instance,
            or a function from (graph, sess) to such an instance.
            Defaults to Adam with lr .05.
        transforms: A list of stochastic transformations that get composed,
            which our visualization should robustly activate the network against.
            See the transform module for more details.
            Defaults to [transform.jitter(8)].

    Returns:
        A function T, which allows access to:
            * T("vis_op") -- the operation for to optimize the visualization
            * T("input") -- the visualization itself
            * T("loss") -- the loss for the visualization
            * T(layer) -- any layer inside the network
    """

    # pylint: disable=unused-variable
    t_image = make_t_image(param_f)
    objective_per_image = objectives.as_objective(objective_per_image)
    objective_additional_global = objectives.as_objective(
        objective_additional_global)
    transform_f = make_transform_f(transforms)
    optimizer = make_optimizer(optimizer, [])

    global_step = tf.train.get_or_create_global_step()
    init_global_step = tf.variables_initializer([global_step])
    init_global_step.run()

    if relu_gradient_override:
        with gradient_override_map({
                "Relu": redirected_relu_grad,
                "Relu6": redirected_relu6_grad
        }):
            T = import_model(model, transform_f(t_image), t_image)
    else:
        T = import_model(model, transform_f(t_image), t_image)
    loss_per_image = objective_per_image(T)
    loss_additional_global = objective_additional_global(T)
    loss = tf.reduce_mean(loss_per_image) + loss_additional_global

    vis_op = optimizer.minimize(-loss, global_step=global_step)

    local_vars = locals()

    # pylint: enable=unused-variable

    def T2(name):
        if name in local_vars:
            return local_vars[name]
        else:
            return T(name)

    return T2
Ejemplo n.º 2
0
def lbfgs_min(model,
              objective_f,
              param_f=None,
              optimizer=None,
              transforms=None,
              thresholds=(512, ),
              print_objectives=None,
              verbose=True,
              relu_gradient_override=True,
              use_fixed_seed=False):
    with tf.Graph().as_default() as graph, tf.Session() as sess:

        if use_fixed_seed:  # does not mean results are reproducible, see Args doc
            tf.set_random_seed(0)


#        T = render.make_vis_T(model, objective_f, param_f, optimizer, transforms,
#                       relu_gradient_override)

        t_image = param_f()
        #        placeholder = tf.placeholder(tf.float32, shape=init_img.shape)
        #        placeholder_clip = tf.placeholder(tf.float32, shape=init_img.shape)
        #
        #        assign_op = net['input'].assign(placeholder)
        #assert(isinstance(t_image, tf.Tensor))
        objective_f = objectives.as_objective(objective_f)
        transform_f = render.make_transform_f(transforms)
        #optimizer = make_optimizer(optimizer, [])

        #global_step = tf.train.get_or_create_global_step()
        #init_global_step = tf.variables_initializer([global_step])
        #init_global_step.run()

        if relu_gradient_override:
            with gradient_override_map({
                    'Relu': redirected_relu_grad,
                    'Relu6': redirected_relu6_grad
            }):
                T = render.import_model(model, transform_f(t_image), t_image)
        else:
            T = render.import_model(model, transform_f(t_image), t_image)
        loss = objective_f(T)
        t_image = T("input")

        #print_objective_func = render.make_print_objective_func(print_objectives, T)
        #loss, vis_op, t_image = T("loss"), T("vis_op"), T("input")

        gradient = tf.gradients(loss, t_image)

        i = 0

        def callback(loss, var, grad):
            nonlocal i
            print('Loss evaluation #', i, ', loss:', loss, 'var max',
                  np.max(var), 'grad max', np.max(grad))
            i += 1

        maxcor = 30
        print_disp = 1
        optimizer_kwargs = {'maxiter':max(thresholds),'maxcor': maxcor, \
                    'disp': print_disp}
        #bnds = get_lbfgs_bnds(init_img,clip_value_min,clip_value_max,BGR)
        trainable_variables = tf.trainable_variables()[0]
        print(trainable_variables)
        #        var_eval = trainable_variables.eval()
        #        print('initialization before variable init',np.max(var_eval),np.min(var_eval))
        #var_to_bounds = {trainable_variables: bnds}
        #        optimizer = tf.contrib.opt.ScipyOptimizerInterface(loss_total,var_to_bounds=var_to_bounds,
        #                        method='L-BFGS-B',options=optimizer_kwargs)
        optimizer = tf.contrib.opt.ScipyOptimizerInterface(
            -loss, method='L-BFGS-B', options=optimizer_kwargs)

        tf.global_variables_initializer().run()

        var_eval = trainable_variables.eval()
        print('initialization after variable init', np.max(var_eval),
              np.min(var_eval))
        var_eval = t_image.eval()
        print('initialization', np.max(var_eval), np.min(var_eval))
        images = []
        loss_ = sess.run([loss])
        print("beginning loss :", loss_)
        optimizer.minimize(sess,
                           fetches=[loss, t_image, gradient],
                           loss_callback=callback)
        vis = t_image.eval()
        images.append(vis)
        loss_ = sess.run([loss])
        print("End loss :", loss_)
        #        try:
        #          #sess.run(assign_op, {placeholder: init_img})
        #          optimizer.minimize(sess,step_callback=callback)
        #          for i in range(max(thresholds)+1):
        #            loss_, _ = sess.run([loss, vis_op])
        #            if i in thresholds:
        #              vis = t_image.eval()
        #              images.append(vis)
        #              if verbose:
        #                print(i, loss_)
        #                print_objective_func(sess)
        #                show(np.hstack(vis))
        #        except KeyboardInterrupt:
        #          log.warning("Interrupted optimization at step {:d}.".format(i+1))
        #          vis = t_image.eval()
        #          show(np.hstack(vis))

        return images