Example #1
0
def uncon(func, x0, epsilon_g, options=None):
    """An algorithm for unconstrained optimization.

    Parameters
    ----------
    func : function handle
        function handle to a function of the form: f, g = func(x)
        where f is the function value and g is a numpy array containing
        the gradient. x are design variables only.
    x0 : ndarray
        starting point
    epsilon_g : float
        convergence tolerance.  you should terminate when
        np.max(np.abs(g)) <= epsilon_g.  (the infinity norm of the gradient)
    options : dict
        a dictionary containing options.  You can use this to try out different
        algorithm choices.  I will not pass anything in, so if the input is None
        you should setup some defaults.

    Outputs
    -------
    xopt : ndarray
        the optimal solution
    fopt : float
        the corresponding function value
    outputs : list
        other miscelaneous outputs that you might want, for example an array
        containing a convergence metric at each iteration.
    """

    line_type = LineType.BRACKET
    dir_type = DirType.QUASI
    if options is not None:
        line_type = options['line_type']
        dir_type = options['dir_type']
        # set defaults here for how you want me to run it.

    # Your code goes here!  You can (and should) call other functions, but make
    # sure you do not change the function signature for this file.  This is the
    # file I will call to test your algorithm.

    opt = Optimizer(func, x0, line_type, dir_type)
    opt.tau_converge = epsilon_g
    opt.minimize()

    xopt = opt.Xk_1
    fopt = opt.phi0
    outputs = {}
    outputs['iterations'] = opt.iterations
    outputs['function_calls'] = opt.function_calls
    outputs['list_norms'] = opt.list_norm
    outputs['list_function_calls'] = opt.list_funct_calls
    outputs['list_function_values'] = opt.list_funct_values

    return xopt, fopt, outputs
Example #2
0
def viz_activation(model,
                   layer_idx,
                   filter_indices=None,
                   seed_seq=None,
                   act_max_weight=1,
                   lp_norm_weight=1,
                   **optimizer_params):

    print("Working on filters: {}".format(pprint.pformat(filter_indices)))

    optimizer_params_default = {
        'seed_seq': seed_seq,
        'max_iter': 1000,
        'lr': 0.1,
        'verbose': True
    }

    optimizer_params_default.update(optimizer_params)
    optimizer_params = optimizer_params_default

    losses = [(ActivationMaximization(model.layers[layer_idx],
                                      filter_indices), act_max_weight),
              (LPNorm(model.input, 2), lp_norm_weight)]

    opt = Optimizer(model.input, losses)
    seq = opt.minimize(**optimizer_params)

    return seq
Example #3
0
def visualize_saliency(img, layer, filter_indices, seed_img, overlay=True):
    """Generates a heatmap indicating the pixels that contributed the most towards
    maximizing `filter_indices` output in the given `layer`.

    For example, if you wanted to visualize the which pixels contributed to classifying an image as 'bird', say output
    index 22 on final `keras.layers.Dense` layer, then, `filter_indices = [22]`, `layer = dense_layer`.

    Alternatively one could use `filter_indices = [22, 23]` and hope to see image regions that are common to output
    categories 22, 23 to show up in the heatmap.

    Args:
        img: 4D input image tensor with shape: `(samples, channels, rows, cols)` if dim_ordering='th'
            or `(samples, rows, cols, channels)` if dim_ordering='tf'.
        layer: The `keras.Layer` layer whose filters needs to be visualized.
        filter_indices: filter indices within the layer to be maximized.
            For `keras.layers.Dense` layer, `filter_idx` is interpreted as the output index.

            If you are visualizing final `keras.layers.Dense` layer, you tend to get
            better results with 'linear' activation as opposed to 'softmax'. This is because 'softmax'
            output can be maximized by minimizing scores for other classes.

        seed_img: The input image for which activation map needs to be visualized.
        overlay: If true, overlays the heatmap over the original image (Default value = True)

    Returns:
        The heatmap image indicating image regions that, when changed, would contribute the most towards maximizing
        a the filter output.
    """

    losses = [(ActivationMaximization(layer, filter_indices), 1)]
    opt = Optimizer(img, losses)
    _, grads = opt.minimize(max_iter=1,
                            verbose=True,
                            jitter=0,
                            seed_img=seed_img)

    s, c, w, h = utils.get_img_indices()
    grads = np.max(np.abs(grads), axis=c, keepdims=True)

    # Smoothen activation map
    grads = utils.deprocess_image(grads[0])
    grads /= np.max(grads)

    # Convert to heatmap and zero out low probabilities for a cleaner output.
    heatmap = cv2.applyColorMap(cv2.GaussianBlur(grads * 255, (3, 3), 0),
                                cv2.COLORMAP_JET)
    heatmap[np.where(grads <= 0.2)] = 0

    if overlay:
        return cv2.addWeighted(seed_img, 1, heatmap, 0.5, 0)
    else:
        return heatmap
Example #4
0
def visualize_activation(img,
                         layer,
                         filter_indices=None,
                         seed_img=None,
                         max_iter=200,
                         act_max_weight=1,
                         lp_norm_weight=10,
                         tv_weight=10,
                         verbose=False,
                         show_filter_idx_text=True,
                         idx_label_map=None,
                         cols=5):
    """Generates stitched input image(s) over all `filter_indices` in the given `layer` that maximize
    the filter output activation.

    For example, if you wanted to visualize the input image that would maximize the output index 22, say on
    final `keras.layers.Dense` layer, then, `filter_indices = [22]`, `layer = dense_layer`.

    If `filter_indices = [22, 23]`, then a stitched image comprising of two images are generated, each
    corresponding to the entry in `filter_indices`.

    Args:
        img: 4D input image tensor with shape: `(samples, channels, rows, cols)` if dim_ordering='th'
            or `(samples, rows, cols, channels)` if dim_ordering='tf'.
        layer: The `keras.Layer` layer whose filters needs to be visualized.
        filter_indices: filter indices within the layer to be maximized.
            If None, all filters are visualized. (Default value = None)

            An input image is generated for each entry in `filter_indices`. The entry can also be an array.
            For example, `filter_indices = [[1, 2], 3, [4, 5, 6]]` would generate three input images. The first one
            would maximize output of filters 1, 2, 3 jointly. A fun use of this might be to generate a dog-fish
            image by maximizing 'dog' and 'fish' output in final `Dense` layer.

            For `keras.layers.Dense` layers, `filter_idx` is interpreted as the output index.

            If you are visualizing final `keras.layers.Dense` layer, you tend to get
            better results with 'linear' activation as opposed to 'softmax'. This is because 'softmax'
            output can be maximized by minimizing scores for other classes.

        seed_img: Seeds the optimization with a starting image. Initialized with a random value when set to None.
            (Default value = None)
        max_iter: The maximum number of gradient descent iterations. (Default value = 200)
        act_max_weight: The weight param for `ActivationMaximization` loss. Not used if 0 or None. (Default value = 1)
        lp_norm_weight: The weight param for `LPNorm` regularization loss. Not used if 0 or None. (Default value = 10)
        tv_weight: The weight param for `TotalVariation` regularization loss. Not used if 0 or None. (Default value = 10)
        verbose: Shows verbose loss output for each filter. (Default value = False)
                Very useful to estimate loss weight factor. (Default value = True)
        show_filter_idx_text: Adds filter_idx text to the image if set to True. (Default value = True)
            If the entry in `filter_indices` is an array, then comma separated labels are generated.
        idx_label_map: Map of filter_idx to text label. If not None, this map is used to translate filter_idx
            to text value when show_filter_idx_text = True. (Default value = None)
        cols: Max number of image cols. New row is created when number of images exceed the column size.
            (Default value = 5)

    Returns:
        Stitched image output visualizing input images that maximize the filter output(s). (Default value = 10)
    """
    if filter_indices is None:
        filter_indices = np.arange(_get_num_filters(layer))

    imgs = []
    for i, idx in enumerate(filter_indices):
        indices = idx if isinstance(idx, list) else [idx]

        losses = [(ActivationMaximization(layer, indices), act_max_weight
                   or 0), (LPNorm(), lp_norm_weight or 0),
                  (TotalVariation(), tv_weight or 0)]

        opt = Optimizer(img, losses)
        print('Working on filter {}/{}'.format(i + 1, len(filter_indices)))
        opt_img, g = opt.minimize(seed_img=seed_img,
                                  max_iter=max_iter,
                                  verbose=verbose)

        # Add filter text to image if applicable.
        if show_filter_idx_text:
            label = None
            if idx_label_map:
                label = ', '.join([idx_label_map.get(i) for i in indices])
            if label is None:
                label = "Filter {}".format(', '.join([str(i)
                                                      for i in indices]))
            cv2.putText(opt_img, label, (10, 25), cv2.FONT_HERSHEY_SIMPLEX,
                        0.75, (0, 0, 0), 2)

        imgs.append(opt_img)

    return utils.stitch_images(imgs, cols=cols)
Example #5
0
y0 = y0[1:-1]

all_times = []
all_wall_times = []
all_function_evals = []
all_nit = []

#### Iterate over number of points
for num in num_pts:

    all_x = np.linspace(0, 1, num)

    start = time.time()
    opt = Optimizer(brachistochrone, y0, 1, 1)
    # print(opt.gradient_func(y0))
    opt.minimize()
    print(funct_eval)
    # set_trace()

    # fit = minimize(fun, y0,  method="BFGS")
    end = time.time()

    #### Saving Values
    all_wall_times.append(end - start)
    # all_function_evals.append(fit.nfev)
    # all_nit.append(fit.nit)

    #### Output for debugging
    print("-------------")
    print(num)
    all_times.append(brachistochrone(opt.Xk_1))
Example #6
0
from classes.Element import Element
from classes.Pool import Pool
from classes.Species import Species
from optimizer import Optimizer

m0_H2O2 = Species([Element("H", 2), Element("O", 2)], mol=0)
m0_H2O = Species([Element("H", 2), Element("O", 1)], mol=0)
m0_CO2 = Species([Element("C"), Element("O", 2)], mol=0)
m0_OH = Species([Element("O"), Element("H")], mol=0)
m0_CO = Species([Element("C"), Element("O")], mol=0)
CH4 = Species([Element("C"), Element("H", 4)])
O2 = Species([Element("O", 2)])
H2 = Species([Element("H", 2)])

pool_1 = Pool([O2, H2, m0_OH, m0_H2O, m0_H2O2, CH4, m0_CO, m0_CO2])
pool_2 = Pool([O2, H2, m0_H2O])
pool_3 = Pool([O2, m0_H2O, CH4, m0_CO2])

pools = [pool_1, pool_2, pool_3]
pres = 100_000
t = 500  # Kelvin
if __name__ == '__main__':
    for p in pools:
        print("Processing:", p)
        proj = Optimizer(p, t, pres)
        result = proj.minimize()
        print("\n" + "=" * 50 + "\n")