def render_atlas_tile(model,op_name,directions,icon_size=45,n_steps=127,transforms_amount=1,cossim_pow=0,L2_amount=2): transforms_options = [ [ transform.jitter(2) ], [ transform.pad(12, mode="constant", constant_value=.5), transform.jitter(8), transform.random_scale([1 + (i - 5) / 50. for i in range(11)]), transform.random_rotate(list(range(-10, 11)) + 5 * [0]), transform.jitter(4), ], [ transform.pad(2, mode='constant', constant_value=.5), transform.jitter(4), transform.jitter(4), transform.jitter(8), transform.jitter(8), transform.jitter(8), transform.random_scale([0.995**n for n in range(-5,80)] + [0.998**n for n in 2*list(range(20,40))]), transform.random_rotate(list(range(-20,20))+list(range(-10,10))+list(range(-5,5))+5*[0]), transform.jitter(2), ], ] param_f = lambda: param.image(icon_size, batch=directions.shape[0]) obj = objectives.Objective.sum( [objectives.direction_neuron(op_name, v, batch=n, cossim_pow=cossim_pow) for n,v in enumerate(directions) ]) - L2_amount * objectives.L2("input", 0.5) * objectives.L2("input", 0.5) thresholds=(n_steps//2, n_steps) vis_imgs = render.render_vis(model, obj, param_f, transforms=transforms_options[transforms_amount], thresholds=thresholds, verbose=False)[-1] return vis_imgs
def vis_traditional( self, feature_list=None, *, transforms=[transform.jitter(2)], l2_coeff=0.0, l2_layer_name=None, ): if feature_list is None: feature_list = list(range(self.acts_reduced.shape[-1])) try: feature_list = list(feature_list) except TypeError: feature_list = [feature_list] obj = sum([ objectives.direction_neuron(self.layer_name, self.channel_dirs[feature], batch=feature) for feature in feature_list ]) if l2_coeff != 0.0: assert ( l2_layer_name is not None ), "l2_layer_name must be specified if l2_coeff is non-zero" obj -= objectives.L2(l2_layer_name) * l2_coeff param_f = lambda: param.image(64, batch=len(feature_list)) return render.render_vis(self.model, obj, param_f=param_f, transforms=transforms)[-1]
def test_L2(inceptionv1): objective = objectives.L2() # on input by default assert_gradient_ascent(objective, inceptionv1)
def make_regularization(L1=0.0, L2=0.0, TV=0.0): return -L1 * objectives.L2() - L2 * objectives.L2( ) - TV * objectives.total_variation()