def visualize(imgs, prefix, is_color=False): raster = [] count = 0 if is_color is True and imgs.shape[3] % 3 != 0: filts = numpy.floor(imgs.shape[3] / 3) imgs = imgs[:, :, :, 0:filts] for i in xrange(imgs.shape[3]): curr_image = imgs[:, :, :, i] if is_color is True: raster.append( rgb2gray( numpy.array( make_viewer(curr_image.reshape( (curr_image.shape[0], curr_image.shape[1] * curr_image.shape[2])), is_color=False).get_img()))) if count == 2: cv2.imwrite(prefix + str(i) + ".jpg", gray2rgb(raster[i - 2], raster[i - 1], raster[i])) count = -1 else: raster.append( numpy.array( make_viewer(curr_image.reshape( (curr_image.shape[0], curr_image.shape[1] * curr_image.shape[2])), is_color=False).get_img())) cv2.imwrite(prefix + str(i) + ".jpg", raster[i]) count = count + 1 return raster
def show_weights(model_path = "dae"): i = 1 models = [] weights = [] Xs = [] Ys = [] encode_functs = [] decode_functs = [] while os.path.isfile(layerpath(i,model_path)): models.append(serial.load(layerpath(i,model_path))) I = models[i-1].get_input_space().make_theano_batch() E = models[i-1].encode(I) encode_functs.append(theano.function( [I], E )) H = models[i-1].get_output_space().make_theano_batch() D = models[i-1].decode(H) decode_functs.append(theano.function( [H], D )) weights.append(models[i-1].get_weights()) i += 1 l1_acts = np.zeros([weights[2].shape[1],weights[0].shape[0]]) for k in range(len(weights[2].T)): feature = np.zeros(len(weights[2].T)) feature[k] = 1.0 l3_acts = decode_functs[2](np.atleast_2d(feature.astype(np.dtype(np.float32)))) l2_acts = decode_functs[1](l3_acts) l1_acts[k] = decode_functs[0](l2_acts) pv = patch_viewer.make_viewer(l1_acts, patch_shape=[28,28]) pv.save("mnist_l3_weights_decoder.png")
def show_sample_pairs(generator,Noise_Dim,data_obj,filename): if data_obj.pitch_scale: pitch_max = 1.0 else: pitch_max = 108.0 grid_shape = None input_noise = np.random.uniform(-1.0,1.0,(100, Noise_Dim)) samples = generator.predict(input_noise) grid_shape = (10,20) matched = np.zeros((samples.shape[0] *2, samples.shape[1])) X = np.concatenate((data_obj.X_train,data_obj.X_val,data_obj.X_test),axis=0) X = X.reshape(X.shape[0],X.shape[1]*X.shape[2]) for i in xrange(samples.shape[0]): matched[2*i,:] = samples[i,:].copy() dists = np.square(X - samples[i,:]).sum(axis = 1) j = np.argmin(dists) matched[2*i+1,:] = X[j,:] samples = matched is_color = False samples = patch_quantize_01(patch_thresholding(samples/pitch_max)) samples = samples * 2.0 - 1.0 viewer = make_viewer(samples, grid_shape=grid_shape,patch_shape=(4,samples.shape[-1]/4),\ is_color=is_color,rescale=False) print "Saving %s ..."%filename viewer.save(filename)
def gen_weight_patches(weights, save_filename=None): s0, s1, s2, s3, s4 = weights.shape weights = weights.reshape(s0 * s1 * s2, s3, s4, 1) # not sure this is needed weights = weights - weights.min() weights = weights / weights.max() * 255 # this works, but blends the weights # weights = scipy.ndimage.zoom( # weights, [1, 15, 15, 1], order=3, mode='nearest') out = np.zeros((s0 * s1 * s2, s3 * 10, s4 * 10, 1)) for i in range(s0 * s1 * s2): weight = weights[i, :, :, 0] out[i, :, :, 0] = imresize(weight, (s3 * 10, s4 * 10), interp='nearest') # print weights.shape viewer = make_viewer(out, grid_shape=(s0 * s2, s3)) if save_filename: viewer.save(save_filename) else: viewer.show()
def main(model_path, data_path, split, **kwargs): model = serial.load(model_path) raw_dataset = get_test_data() X = get_features(data_path, split, False) assert X.shape[0] == 8000 size = 100 for start in xrange(0,X.shape[0]-size,size): y = raw_dataset.y[start:start+size] pred_y = model.predict(X[start:start+size,:]) wrong_mask = y != pred_y raw_X = raw_dataset.X[start:start+size,:] pv = make_viewer(raw_X / 127.5, rescale = False, is_color = True, activation = wrong_mask ) pv.show() right = 0 for i in xrange(y.shape[0]): if y[i] == pred_y[i]: right += 1 print str(start+i)+': correct ('+raw_dataset.class_names[y[i]-1]+')' else: print str(start+i)+': mistook '+raw_dataset.class_names[y[i]-1]+' for '+raw_dataset.class_names[pred_y[i]-1] print 'accuracy this batch : ',float(right)/float(size) x = raw_input() if x == 'q': break
def main(model_path, data_path, split, **kwargs): model = serial.load(model_path) raw_dataset = get_test_data() X = get_features(data_path, split, False) assert X.shape[0] == 8000 size = 25 for start in xrange(0,X.shape[0]-size,size): y = raw_dataset.y[start:start+size] pred_y = model.predict(X[start:start+size,:]) wrong_mask = y != pred_y raw_X = raw_dataset.X[start:start+size,:] pv = make_viewer(raw_X / 127.5, rescale = False, is_color = True, activation = wrong_mask ) pv.show() right = 0 for i in xrange(y.shape[0]): if y[i] == pred_y[i]: right += 1 print str(start+i)+': correct ('+raw_dataset.class_names[y[i]-1]+')' else: print str(start+i)+': mistook '+raw_dataset.class_names[y[i]-1]+' for '+raw_dataset.class_names[pred_y[i]-1] print 'accuracy this batch : ',float(right)/float(size) x = raw_input() if x == 'q': break
def save_as_patches(activations, shape, out_path="patches_out.png", rescale=True): pv = patch_viewer.make_viewer(activations, patch_shape=shape, rescale=rescale) pv.save(out_path)
def visualize (imgs, prefix , is_color = False ): raster = [] count = 0 if is_color is True and imgs.shape[3] % 3 != 0: filts = numpy.floor( imgs.shape[3] / 3) imgs = imgs[:,:,:,0:filts] for i in xrange (imgs.shape[3]): curr_image = imgs[:,:,:,i] if is_color is True: raster.append(rgb2gray(numpy.array(make_viewer( curr_image.reshape((curr_image.shape[0],curr_image.shape[1] * curr_image.shape[2])), is_color = False ).get_img()))) if count == 2: cv2.imwrite(prefix + str(i) + ".jpg", gray2rgb(raster[i-2],raster[i-1],raster[i]) ) count = -1 else: raster.append(numpy.array(make_viewer( curr_image.reshape((curr_image.shape[0],curr_image.shape[1] * curr_image.shape[2])), is_color = False ).get_img())) cv2.imwrite(prefix + str(i) + ".jpg",raster[i]) count = count + 1 return raster
def get_mat_product_viewer(W1, W2): """ Show the matrix product of 2 layers. Parameters ---------- W1: list First hidden layer. W2: list Second hidden layer. out_prefix: str Path where to save image. """ prod = np.dot(W1, W2) pv = make_viewer(prod.T) return pv
from theano import function import theano.tensor as T import theano from pylearn2.gui.patch_viewer import make_viewer from pylearn2.gui.patch_viewer import PatchViewer from pylearn2.datasets.dense_design_matrix import DefaultViewConverter from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix from pylearn2.utils import serial parser = OptionParser() parser.add_option('-m', '--model', action='store', type='string', dest='path') parser.add_option('--width', action='store', type='int', dest='width') parser.add_option('--height', action='store', type='int', dest='height') parser.add_option('--channels', action='store', type='int', dest='chans') (opts, args) = parser.parse_args() def get_dims(nf): num_rows = numpy.floor(numpy.sqrt(nf)) return (num_rows, numpy.ceil(nf / num_rows)) # load model and retrieve parameters model = serial.load(opts.path) samples = model.neg_ev.get_value() viewer = make_viewer(samples, (5,10), (28,28)) pl.imshow(viewer.get_img()) pl.show()
ip.layer_schedule = [0] * 1 obs = ip.infer(X) pH , = obs['H_hat'] sample_from_posterior = function([X], theano_rng.binomial( size = pH.shape, p = pH, n = 1, dtype = pH.dtype) ) m = 100 X = dataset.get_batch_design(m) H = sample_from_posterior(X) assert len(dbm.rbms) == 1 while True: V = dataset.adjust_for_viewer(X) viewer = make_viewer(V, is_color = X.shape[1] % 3 == 0) viewer.show() print 'Waiting...' x = raw_input() if x == 'q': break print 'Running...' num_updates = 1 try: num_updates = int(x) except: pass
import numpy as np from pylearn2.gui.patch_viewer import PatchViewer from pylearn2.gui.patch_viewer import make_viewer from pylearn2.config import yaml_parse def main(model_path): model = serial.load(model_path) rbm1, rbm2 = model.rbms[0:2] W1 = rbm1.hidden_layers[0].get_weights() W2 = rbm2.hidden_layers[0].get_weights() W = np.dot(W1, W2) pv = make_viewer(prod.T) if out_prefix is None: pv.show() else: pv.save(out_prefix + "_prod.png") print 'Sorting so largest-norm layer 2 weights are plotted at the top' norms = np.square(W2).sum(axis=0) idxs = [elem[1] for elem in sorted(zip(-norms, range(norms.shape[0])))] new = W2.copy() for i in xrange(len(idxs)): new[:, i] = W2[:, idxs[i]] W2 = new
from pylearn2.utils import serial import sys _, model_path = sys.argv model = serial.load(model_path) from pylearn2.gui.patch_viewer import make_viewer space = model.generator.get_output_space() total_dimension = space.get_total_dimension() import numpy as np num_colors = 1 #if total_dimension % 3 == 0: # num_colors = 3 w = int(np.sqrt(total_dimension / num_colors)) from pylearn2.space import Conv2DSpace desired_space = Conv2DSpace(shape=[w, w], num_channels=num_colors, axes=('b',0,1,'c')) samples = space.format_as(batch=model.generator.sample(100), space=desired_space).eval() print (samples.min(), samples.mean(), samples.max()) viewer = make_viewer(samples * 2.0 - 1.0) viewer.show()
i = 1 models = [] weights = [] Xs = [] Ys = [] encode_functs = [] decode_functs = [] while os.path.isfile(layerpath(i)): models.append(serial.load(layerpath(i))) I = models[i-1].get_input_space().make_theano_batch() E = models[i-1].encode(I) encode_functs.append(theano.function( [I], E )) H = models[i-1].get_output_space().make_theano_batch() D = models[i-1].decode(H) decode_functs.append(theano.function( [H], D )) weights.append(models[i-1].get_weights()) i += 1 l1_acts = np.zeros([weights[1].shape[1],weights[0].shape[0]]) for k in range(len(weights[1].T)): feature = np.zeros(len(weights[1].T)) feature[k] = 1 l2_acts = decode_functs[1](np.atleast_2d(feature.astype(np.dtype(np.float32)))) l1_acts[k] = decode_functs[0](l2_acts) pv = patch_viewer.make_viewer(l1_acts, patch_shape=[28,28]) pv.save("mnist_l2_weights_decoder.png") #scipy.misc.imsave('mnist7_l1_w0.png',l1_act.reshape([28,28]))
temp = numpy.random.randint(0,2, size=model.neg_g.get_value().shape) model.neg_g.set_value(temp.astype('float32')) temp = numpy.random.randint(0,2, size=model.neg_h.get_value().shape) model.neg_h.set_value(temp.astype('float32')) v_std = numpy.sqrt(1./softplus(model.beta.get_value())) temp = numpy.random.normal(0, v_std, size=model.neg_v.get_value().shape) model.neg_v.set_value(temp.astype('float32')) # Burnin of Markov chain. for i in xrange(opts.burnin): sample_neg_func() # Start actual sampling. samples = numpy.zeros((opts.batch_size * opts.n, model.n_v)) indices = numpy.arange(0, len(samples), opts.n) idx = numpy.random.permutation(model.batch_size)[:opts.batch_size] for t in xrange(opts.n): samples[indices,:] = model.neg_ev.get_value()[idx] # skip in between plotted samples print t for i in xrange(opts.skip): sample_neg_func() indices += 1 img = make_viewer(samples, (opts.batch_size, opts.n), (opts.height, opts.width), is_color=opts.color) img.show()
def get_weights_report(model_path=None, model=None, rescale='individual', border=False, norm_sort=False, dataset=None): """ Returns a PatchViewer displaying a grid of filter weights Parameters ---------- model_path : str Filepath of the model to make the report on. rescale : str A string specifying how to rescale the filter images: \ 'individual' (default): scale each filter so that it \ uses as much as possible of the dynamic range \ of the display under the constraint that 0 \ is gray and no value gets clipped \ 'global' : scale the whole ensemble of weights \ 'none' : don't rescale dataset: pylearn2.datasets.dataset.Dataset Dataset object to do view conversion for displaying the weights. If \ not provided one will be loaded from the model's dataset_yaml_src. Returns ------- WRITEME """ if model is None: print 'making weights report' print 'loading model' model = serial.load(model_path) print 'loading done' else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale=' + rescale + ", must be 'none', 'global', or 'individual'") if hasattr(model, 'layers'): if isinstance(model.layers[0], mlp_models.PretrainedLayer): model = model.layers[0].layer_content if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] keys = [key for key in model \ if hasattr(model[key], 'ndim') and model[key].ndim == 2] if len(keys) > 2: key = None while key not in keys: logger.info('Which is the weights?') for key in keys: logger.info('\t{0}'.format(key)) key = input() else: key, = keys weights = model[key] norms = np.sqrt(np.square(weights).sum(axis=1)) print 'min norm: ',norms.min() print 'mean norm: ',norms.mean() print 'max norm: ',norms.max() return patch_viewer.make_viewer(weights, is_color=weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] except NotImplementedError: if dataset is None: print 'loading dataset...' control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() print '...done' try: W = model.get_weights()
def get_weights_report(model_path=None, model=None, rescale='individual', border=False, norm_sort=False, dataset=None): """ Returns a PatchViewer displaying a grid of filter weights Parameters: model_path: the filepath of the model to make the report on. rescale: a string specifying how to rescale the filter images 'individual' (default): scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped 'global' : scale the whole ensemble of weights 'none' : don't rescale dataset: a Dataset object to do view conversion for displaying the weights. if not provided one will be loaded from the model's dataset_yaml_src """ if model is None: print 'making weights report' print 'loading model' model = serial.load(model_path) print 'loading done' else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale=' + rescale + ", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] weights, = model.values() norms = np.sqrt(np.square(weights).sum(axis=1)) print 'min norm: ', norms.min() print 'mean norm: ', norms.mean() print 'max norm: ', norms.max() return patch_viewer.make_viewer(weights, is_color=weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] except Exception, e: if dataset is None: print 'loading dataset...' control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() print '...done' if hasattr(model, 'get_weights'): W = model.get_weights() if 'weightsShared' in dir(model): W = model.weightsShared.get_value() if 'W' in dir(model): if hasattr(model.W, '__array__'): warnings.warn( 'model.W is an ndarray; I can figure out how to display this but that seems like a sign of a bad bug' ) W = model.W else: W = model.W.get_value() has_D = False if 'D' in dir(model): has_D = True D = model.D if 'enc_weights_shared' in dir(model): W = model.enc_weights_shared.get_value() if W is None: raise AttributeError( 'model does not have a variable with a name like "W", "weights", etc that pylearn2 recognizes' )
def get_weights_report(model_path = None, model = None, rescale = 'individual', border = False, norm_sort = False, dataset = None): """ Returns a PatchViewer displaying a grid of filter weights Parameters: model_path: the filepath of the model to make the report on. rescale: a string specifying how to rescale the filter images 'individual' (default): scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped 'global' : scale the whole ensemble of weights 'none' : don't rescale dataset: a Dataset object to do view conversion for displaying the weights. if not provided one will be loaded from the model's dataset_yaml_src """ if model is None: print 'making weights report' print 'loading model' model = serial.load(model_path) print 'loading done' else: assert model_path is None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale='+rescale+", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] weights ,= model.values() return patch_viewer.make_viewer(weights, is_color = weights.shape[1] % 3 == 0) if dataset is None: print 'loading dataset...' control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() print '...done' W = None if hasattr(model,'get_weights'): W = model.get_weights() if 'weightsShared' in dir(model): W = model.weightsShared.get_value() if 'W' in dir(model): if hasattr(model.W,'__array__'): warnings.warn('model.W is an ndarray; I can figure out how to display this but that seems like a sign of a bad bug') W = model.W else: W = model.W.get_value() has_D = False if 'D' in dir(model): has_D = True D = model.D if 'enc_weights_shared' in dir(model): W = model.enc_weights_shared.get_value() if W is None: raise AttributeError('model does not have a variable with a name like "W", "weights", etc that pylearn2 recognizes') if len(W.shape) == 2: if hasattr(model,'get_weights_format'): weights_format = model.get_weights_format() if hasattr(model, 'weights_format'): weights_format = model.weights_format assert hasattr(weights_format,'__iter__') assert len(weights_format) == 2 assert weights_format[0] in ['v','h'] assert weights_format[1] in ['v','h'] assert weights_format[0] != weights_format[1] if weights_format[0] == 'v': W = W.T h = W.shape[0] if norm_sort: norms = np.sqrt(1e-8+np.square(W).sum(axis=1)) norm_prop = norms / norms.max() hr = int(np.ceil(np.sqrt(h))) hc = hr if 'hidShape' in dir(model): hr, hc = model.hidShape pv = patch_viewer.PatchViewer(grid_shape=(hr,hc), patch_shape=dataset.weights_view_shape()[0:2], is_color = dataset.weights_view_shape()[2] == 3) weights_view = dataset.get_weights_view(W) assert weights_view.shape[0] == h #print 'weights_view shape '+str(weights_view.shape) if global_rescale: weights_view /= np.abs(weights_view).max() if norm_sort: print 'sorting weights by decreasing norm' idx = sorted( range(h), key = lambda l : - norm_prop[l] ) else: idx = range(h) if border: act = 0 else: act = None for i in range(0,h): patch = weights_view[idx[i],...] pv.add_patch( patch, rescale = patch_rescale, activation = act) else: e = model.weights d = model.dec_weights_shared.value h = e.shape[0] if len(e.shape) == 8: raise Exception("get_weights_report doesn't support tiled convolution yet, use the show_weights8 app") if e.shape[4] != 1: raise Exception('weights shape: '+str(e.shape)) shape = e.shape[1:3] dur = e.shape[3] show_dec = id(e) != id(d) pv = patch_viewer.PatchViewer( grid_shape = ((1+show_dec)*h,dur), patch_shape=shape) for i in range(0,h): pv.addVid( e[i,:,:,:,0], rescale = rescale) if show_dec: pv.addVid( d[i,:,:,:,0], rescale = rescale) print 'smallest enc weight magnitude: '+str(np.abs(W).min()) print 'mean enc weight magnitude: '+str(np.abs(W).mean()) print 'max enc weight magnitude: '+str(np.abs(W).max()) norms = np.sqrt(np.square(W).sum(axis=1)) assert norms.shape == (h,) print 'min norm: ',norms.min() print 'mean norm: ',norms.mean() print 'max norm: ',norms.max() return pv
nsample.set_value(temp.astype(floatX)) # Burnin of Markov chain. for i in xrange(opts.burnin): model.sample_neg_func() # Start actual sampling. samples = numpy.zeros((opts.batch_size * opts.n, model.n_u[0])) indices = numpy.arange(0, len(samples), opts.n) energies = numpy.zeros(opts.batch_size * opts.n) for t in xrange(opts.n): samples[indices,:] = e_nsamples0.get_value() # skip in between plotted samples for i in xrange(opts.skip): sample_neg_func() energies[indices] = compute_energy() indices += 1 # transform energies between 0 and 1 energies -= energies.min() energies /= energies.max() import pdb; pdb.set_trace() img = make_viewer(samples, (opts.batch_size, opts.n), (opts.width, opts.height), activation = energies, is_color=opts.color) img.show()
import sys from pylearn2.utils import get_dataless_dataset from pylearn2.utils import serial import numpy as np from pylearn2.gui.patch_viewer import make_viewer ignore, model_path = sys.argv model = serial.load(model_path) dataset = get_dataless_dataset(model) biases = model.visible_layer.get_biases() biases = np.zeros((1,biases.shape[0]))+biases print 'values: ',(biases.min(), biases.mean(), biases.max()) pv = make_viewer(biases) pv.show()
############## # PLOT FILTERS ############## def get_dims(n): num_rows = numpy.floor(numpy.sqrt(n)) return (numpy.int(num_rows), numpy.int(numpy.ceil(n / num_rows))) nblocks = model.depth - 1 W = [model.W[i].get_value().T for i in xrange(1, model.depth)] max_filters = max([len(Wi) for Wi in W]) print 'max_filters = ', max_filters block_viewer = make_viewer(W[0], get_dims(max_filters), (opts.height, opts.width)) layer0_image = copy.copy(block_viewer.image) main_viewer = PatchViewer( (1, 2), (block_viewer.image.shape[0], block_viewer.image.shape[1]), is_color=opts.color, pad=(5, 5)) topo_shape = [opts.height, opts.width, opts.chans] view_converter = DefaultViewConverter(topo_shape) for k in xrange(500): main_viewer.add_patch(layer0_image[:, :, 0] - 0.5) # positive weights
from pylearn2.gui.patch_viewer import make_viewer space = model.generator.get_output_space() from pylearn2.config import yaml_parse import numpy as np dataset = yaml_parse.load(model.dataset_yaml_src) dataset = dataset.get_test_set() grid_shape = None from pylearn2.utils import sharedX X = sharedX(dataset.get_batch_topo(100)) samples, ignore = model.generator.inpainting_sample_and_noise(X) samples = samples.eval() total_dimension = space.get_total_dimension() num_colors = 1 if total_dimension % 3 == 0: num_colors = 3 w = int(np.sqrt(total_dimension / num_colors)) from pylearn2.space import Conv2DSpace desired_space = Conv2DSpace(shape=[w, w], num_channels=num_colors, axes=('b', 0, 1, 'c')) is_color = samples.shape[-1] == 3 print(samples.min(), samples.mean(), samples.max()) # Hack for detecting MNIST [0, 1] values. Otherwise we assume centered images if samples.min() > 0: samples = samples * 2.0 - 1.0 viewer = make_viewer(samples, grid_shape=grid_shape, is_color=is_color) viewer.show()
continue else: print "examining this element" final = elem try: print "Trying get_weights topo" topo = final.get_weights_topo() print "It worked" success = True except Exception: pass if success: print "Making the viewer and showing" make_viewer(topo).show() quit() try: print "Trying get_weights" weights = final.get_weights() print "It worked" success = True except NotImplementedError: i -= 1 # skip over SpaceConverter, etc. print "Out of the while loop" print "weights shape ", weights.shape viewer = make_viewer(weights, is_color=weights.shape[1] % 3 == 0 and weights.shape[1] != 48 * 48)
from theano import function import theano.tensor as T import theano from pylearn2.gui.patch_viewer import make_viewer from pylearn2.gui.patch_viewer import PatchViewer from pylearn2.datasets.dense_design_matrix import DefaultViewConverter from pylearn2.datasets.dense_design_matrix import DenseDesignMatrix from pylearn2.utils import serial parser = OptionParser() parser.add_option('-m', '--model', action='store', type='string', dest='path') parser.add_option('--width', action='store', type='int', dest='width') parser.add_option('--height', action='store', type='int', dest='height') parser.add_option('--channels', action='store', type='int', dest='chans') (opts, args) = parser.parse_args() def get_dims(nf): num_rows = numpy.floor(numpy.sqrt(nf)) return (num_rows, numpy.ceil(nf / num_rows)) # load model and retrieve parameters model = serial.load(opts.path) samples = model.neg_ev.get_value() viewer = make_viewer(samples, (5, 10), (28, 28)) pl.imshow(viewer.get_img()) pl.show()
return model_path + "_l" + str(i) + ".pkl" i = 1 models = [] weights = [] Xs = [] Ys = [] encode_functs = [] decode_functs = [] while os.path.isfile(layerpath(i)): models.append(serial.load(layerpath(i))) I = models[i - 1].get_input_space().make_theano_batch() E = models[i - 1].encode(I) encode_functs.append(theano.function([I], E)) H = models[i - 1].get_output_space().make_theano_batch() D = models[i - 1].decode(H) decode_functs.append(theano.function([H], D)) weights.append(models[i - 1].get_weights()) i += 1 l1_acts = np.zeros(weights[0].T.shape) for k in range(len(weights[1])): feature = weights[1][k] #feature = weights[1].T[k] l1_acts[k] = decode_functs[0](np.atleast_2d(feature)) pv = patch_viewer.make_viewer(l1_acts, patch_shape=[28, 28]) pv.save("mnist_l2_weightsmat.png") #scipy.misc.imsave('mnist7_l1_w0.png',l1_act.reshape([28,28]))
def get_weights_report(model_path = None, model = None, rescale = 'individual', border = False, norm_sort = False, dataset = None): """ Returns a PatchViewer displaying a grid of filter weights Parameters: model_path: the filepath of the model to make the report on. rescale: a string specifying how to rescale the filter images 'individual' (default): scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped 'global' : scale the whole ensemble of weights 'none' : don't rescale dataset: a Dataset object to do view conversion for displaying the weights. if not provided one will be loaded from the model's dataset_yaml_src """ if model is None: print 'making weights report' print 'loading model' model = serial.load(model_path) print 'loading done' else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale='+rescale+", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] weights ,= model.values() norms = np.sqrt(np.square(weights).sum(axis=1)) print 'min norm: ',norms.min() print 'mean norm: ',norms.mean() print 'max norm: ',norms.max() return patch_viewer.make_viewer(weights, is_color = weights.shape[1] % 3 == 0) weights_view = None W = None W0,W1,_ = model.get_weights() G = model.groups weights_format = ('v', 'g', 'h') W1 = W1.T W0 = W0.T h1 = W1.shape[0] h0 = W0.shape[0] print W0.shape, W1.shape weights_view1 = dataset.get_weights_view(W1) weights_view0 = dataset.get_weights_view(W0) hr1 = int(np.ceil(np.sqrt(h1))) hc1 = hr1 pv1 = patch_viewer.PatchViewer(grid_shape=(hr1,hc1), patch_shape=weights_view1.shape[1:3], is_color = weights_view1.shape[-1] == 3) hr0 = G.shape[0] hc0 = G.sum(1).max() pv0 = patch_viewer.PatchViewer(grid_shape=(hr0,hc0), patch_shape=weights_view0.shape[1:3], is_color = weights_view0.shape[-1] == 3) null_patch = np.zeros(weights_view0.shape[1:3]) if border: act = 0 else: act = None for i in range(0,h1): patch = weights_view1[i,...] pv1.add_patch( patch, rescale = patch_rescale, activation = act) for i in range(0,hr0): weights_view = weights_view0[i,...] g = 0 for j in range(0, G.shape[1]): if G[i,j] == 1: patch = weights_view[j,...] pv0.add_patch( patch, rescale = patch_rescale, activation = act) g += 1 assert g <= hc0 for g in range(g,hc0): pv0.add_patch( null_patch, rescale = patch_rescale, activation = act) return pv0, pv1
from pylearn2.models.dbm import load_matlab_dbm model = load_matlab_dbm('after_joint_train.mat', num_chains=rows * cols) #model = load_matlab_dbm('after_backprop.mat', num_chains = rows * cols) dbm = model from theano import function import theano.tensor as T sample_func = function([], updates=model.get_sampling_updates()) render_func = function( [], T.nnet.sigmoid(T.dot(dbm.H_chains[0], dbm.W[0].T) + dbm.bias_vis)) from pylearn2.datasets.mnist import MNIST dataset = MNIST(which_set='train') X = dataset.get_batch_design(rows * cols) model.V_chains.set_value(X) for i in xrange(200): print i sample_func() from pylearn2.gui.patch_viewer import make_viewer pv = make_viewer(dataset.adjust_for_viewer(render_func())) pv.show()
_, model_path = sys.argv from pylearn2.utils import serial model = serial.load(model_path) d = model.discriminator import gc del model gc.collect() from pylearn2.utils import sharedX X = sharedX(d.get_input_space().get_origin_batch(1)) obj = -d.fprop(X).sum() from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent as BGD import theano.tensor as T def norm_constraint(updates): assert X in updates updates[X] = updates[X] / (1e-7 + T.sqrt(T.sqr(X).sum())) opt = BGD(objective=obj, params=[X], param_constrainers=[norm_constraint], conjugate=True, reset_conjugate=False, reset_alpha=False, line_search_mode='exhaustive', verbose=3, max_iter=20) results = [] import numpy as np rng = np.random.RandomState([1, 2, 3]) for i in xrange(10): X.set_value(rng.randn(*X.get_value().shape).astype(X.dtype) / 10.) opt.minimize() Xv = X.dimshuffle(3, 1, 2, 0).eval() results.append(Xv) X = np.concatenate(results, axis=0) from pylearn2.gui.patch_viewer import make_viewer v = make_viewer(X) v.show()
from pylearn2.utils import serial kmeans = serial.load('kmeans.pkl') mu = kmeans.mu print (mu.min(),mu.mean(),mu.max()) mu -= .5 mu *= 2 from pylearn2.gui.patch_viewer import make_viewer pv = make_viewer(mu) pv.show()
def get_weights_report(model_path = None, model = None, rescale = 'individual', border = False, norm_sort = False, dataset = None): """ Returns a PatchViewer displaying a grid of filter weights Parameters: model_path: the filepath of the model to make the report on. rescale: a string specifying how to rescale the filter images 'individual' (default): scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped 'global' : scale the whole ensemble of weights 'none' : don't rescale dataset: a Dataset object to do view conversion for displaying the weights. if not provided one will be loaded from the model's dataset_yaml_src """ if model is None: print 'making weights report' print 'loading model' model = serial.load(model_path) print 'loading done' else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale='+rescale+", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] weights ,= model.values() norms = np.sqrt(np.square(weights).sum(axis=1)) print 'min norm: ',norms.min() print 'mean norm: ',norms.mean() print 'max norm: ',norms.max() return patch_viewer.make_viewer(weights, is_color = weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] except NotImplementedError: if dataset is None: print 'loading dataset...' control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() print '...done' try: W = model.get_weights() except AttributeError, e: raise AttributeError(""" Encountered an AttributeError while trying to call get_weights on a model. This probably means you need to implement get_weights for this model class, but look at the original exception to be sure. If this is an older model class, it may have weights stored as weightsShared, etc. Original exception: """+str(e))
model = load_matlab_dbm('joint_trained_dbm_interm.mat', num_chains = rows * cols) dbm = model from theano import function import theano.tensor as T sample_func = function([],updates = model.get_sampling_updates()) render_func = function([],T.nnet.sigmoid(T.dot(dbm.H_chains[0],dbm.W[0].T)+dbm.bias_vis)) from pylearn2.datasets.mnist import MNIST dataset = MNIST(which_set = 'train') X = dataset.get_batch_design(rows*cols) model.V_chains.set_value(X) for i in xrange(200): print i sample_func() from pylearn2.gui.patch_viewer import make_viewer pv = make_viewer(dataset.adjust_for_viewer(render_func())) pv.show()
r = 6 c = 6 dataset = CIFAR10(which_set='train', one_hot=True, gcn=55.) ten4 = dataset.get_batch_topo(m) from pylearn2.utils import sharedX ten4th = sharedX(ten4) X = cifar10neighbs(ten4, (r, c)) from theano import function X = function([], X)() print X.shape from pylearn2.gui.patch_viewer import make_viewer from pylearn2.utils.image import show stride = (32 - r + 1) * (32 - c + 1) for i in xrange(m): ten4v = ten4[i, :, :, :] ten4v -= ten4v.min() ten4v /= ten4v.max() show(ten4v) patch_viewer = make_viewer(X[i * stride:(i + 1) * stride], is_color=True) patch_viewer.show() print 'waiting...' x = raw_input()
results = infer_func(x) [qg, qh, qs] = [r[0] for r in results] # given y, get the "meta" g-filters import pdb; pdb.set_trace() g_s = numpy.dot(qh, Wh) * qs h_s = numpy.dot(qg, Wg) * qs for i in xrange(model.n_g): gfilt[i,:] = numpy.dot(Wg[i,:] * g_s, Wv.T) for j in xrange(model.n_h): hfilt[j,:] = numpy.dot(Wh[j,:] * h_s, Wv.T) ########### PLOTTING ############### x_viewer = make_viewer(x, is_color=is_color) wg_viewer = make_viewer(gfilt, get_dims(model.n_g), (imgw,imgh), is_color=is_color) wh_viewer = make_viewer(hfilt, get_dims(model.n_h), (imgw,imgh), is_color=is_color) wv_viewer = make_viewer(Wv.T, get_dims(model.n_s), (imgw,imgh), is_color=is_color) fig = pl.figure() pl.subplot(2,2,3); pl.axis('off') pl.title('input') pl.imshow(x_viewer.image) pl.subplot(2,2,1); pl.axis('off') pl.title('g-filters') pl.imshow(wg_viewer.image) pl.subplot(2,2,2); pl.axis('off') pl.title('h-filters') pl.imshow(wh_viewer.image) pl.subplot(2,2,4); pl.axis('off')
print 'loading dataset' from pylearn2.config import yaml_parse dataset = yaml_parse.load(model.dataset_yaml_src) batch_size = 100 batches = 50 for i in xrange(batches): print 'batch ',i X = dataset.get_batch_design(batch_size) f(X) H = ave_V_h.get_value() S = H * ave_V_s.get_value() G = ave_V_g.get_value() from pylearn2.gui.patch_viewer import make_viewer pv1 = make_viewer(S) pv1.show() pv2 = make_viewer(H) pv2.show() pv3 = make_viewer(G) pv3.show()
r = 6 c = 6 dataset = CIFAR10(which_set = 'train', one_hot = True, gcn = 55.) ten4 = dataset.get_batch_topo(m) from pylearn2.utils import sharedX ten4th = sharedX(ten4) X = cifar10neighbs(ten4, (r,c)) from theano import function X = function([],X)() print X.shape from pylearn2.gui.patch_viewer import make_viewer from pylearn2.utils.image import show stride = (32-r+1)*(32-c+1) for i in xrange(m): ten4v =ten4[i,:,:,:] ten4v -= ten4v.min() ten4v /= ten4v.max() show(ten4v) patch_viewer = make_viewer(X[i*stride:(i+1)*stride], is_color= True) patch_viewer.show() print 'waiting...' x = raw_input()
def get_weights_report(model_path = None, model = None, rescale = 'individual', border = False, norm_sort = False, dataset = None): """ Returns a PatchViewer displaying a grid of filter weights Parameters: model_path: the filepath of the model to make the report on. rescale: a string specifying how to rescale the filter images 'individual' (default): scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped 'global' : scale the whole ensemble of weights 'none' : don't rescale dataset: a Dataset object to do view conversion for displaying the weights. if not provided one will be loaded from the model's dataset_yaml_src """ if model is None: print 'making weights report' print 'loading model' model = serial.load(model_path) print 'loading done' else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale='+rescale+", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] weights ,= model.values() norms = np.sqrt(np.square(weights).sum(axis=1)) print 'min norm: ',norms.min() print 'mean norm: ',norms.mean() print 'max norm: ',norms.max() return patch_viewer.make_viewer(weights, is_color = weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] except Exception, e: if dataset is None: print 'loading dataset...' control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() print '...done' if hasattr(model,'get_weights'): W = model.get_weights() if 'weightsShared' in dir(model): W = model.weightsShared.get_value() if 'W' in dir(model): if hasattr(model.W,'__array__'): warnings.warn('model.W is an ndarray; I can figure out how to display this but that seems like a sign of a bad bug') W = model.W else: W = model.W.get_value() has_D = False if 'D' in dir(model): has_D = True D = model.D if 'enc_weights_shared' in dir(model): W = model.enc_weights_shared.get_value() if W is None: raise AttributeError('model does not have a variable with a name like "W", "weights", etc that pylearn2 recognizes')
ave_V_h: ave_V_h + T.dot(H.T, V), ave_V_g: ave_V_g + T.dot(G.T, V), }) print 'loading dataset' from pylearn2.config import yaml_parse dataset = yaml_parse.load(model.dataset_yaml_src) batch_size = 100 batches = 50 for i in xrange(batches): print 'batch ', i X = dataset.get_batch_design(batch_size) f(X) H = ave_V_h.get_value() S = H * ave_V_s.get_value() G = ave_V_g.get_value() from pylearn2.gui.patch_viewer import make_viewer pv1 = make_viewer(S) pv1.show() pv2 = make_viewer(H) pv2.show() pv3 = make_viewer(G) pv3.show()
from pylearn2.optimization.batch_gradient_descent import BatchGradientDescent as BGD import theano.tensor as T def norm_constraint(updates): assert X in updates updates[X] = updates[X] / (1e-7 + T.sqrt(T.sqr(X).sum())) opt = BGD(objective=obj, params=[X], param_constrainers=[norm_constraint], conjugate=True, reset_conjugate=False, reset_alpha=False, line_search_mode='exhaustive', verbose=3, max_iter=20) results = [] import numpy as np rng = np.random.RandomState([1, 2, 3]) for i in xrange(10): X.set_value(rng.randn(*X.get_value().shape).astype(X.dtype) / 10.) opt.minimize() Xv = X.dimshuffle(3, 1, 2, 0).eval() results.append(Xv) X = np.concatenate(results, axis=0) from pylearn2.gui.patch_viewer import make_viewer v = make_viewer(X) v.show()
def get_weights_report(model_path=None, model=None, rescale='individual', border=False, norm_sort=False, dataset=None): """ Returns a PatchViewer displaying a grid of filter weights Parameters ---------- model_path : str Filepath of the model to make the report on. rescale : str A string specifying how to rescale the filter images: - 'individual' (default) : scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped - 'global' : scale the whole ensemble of weights - 'none' : don't rescale dataset : pylearn2.datasets.dataset.Dataset Dataset object to do view conversion for displaying the weights. If not provided one will be loaded from the model's dataset_yaml_src. Returns ------- WRITEME """ print type(dataset) print type(model) if model is None: logger.info('making weights report') logger.info('loading model') model = serial.load(model_path) logger.info('loading done') else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale=' + rescale + ", must be 'none', 'global', or 'individual'") print "model type: " + str(type(model)) if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] keys = [key for key in model \ if hasattr(model[key], 'ndim') and model[key].ndim == 2] if len(keys) > 2: key = None while key not in keys: logger.info('Which is the weights?') for key in keys: logger.info('\t{0}'.format(key)) key = raw_input() else: key, = keys weights = model[key] norms = np.sqrt(np.square(weights).sum(axis=1)) logger.info('min norm: {0}'.format(norms.min())) logger.info('mean norm: {0}'.format(norms.mean())) logger.info('max norm: {0}'.format(norms.max())) return patch_viewer.make_viewer(weights, is_color=weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] print "h:" + str(h) except NotImplementedError: if dataset is None: logger.info('loading dataset...') control.push_load_data(False) dataset_filename = yaml_parse.load(model.dataset_yaml_src) dataset = serial.load(dataset_filename) control.pop_load_data() logger.info('...done') try: W = model.get_weights() except AttributeError, e: raise AttributeError(""" Encountered an AttributeError while trying to call get_weights on a model. This probably means you need to implement get_weights for this model class, but look at the original exception to be sure. If this is an older model class, it may have weights stored as weightsShared, etc. Original exception: """+str(e))
def show(x, xhat): temp = numpy.zeros((2*len(x), x.shape[1])) temp[::2] = x temp[1::2] = xhat make_viewer(temp, (10,20), (28,28)).show()
def get_weights_report(model_path=None, model=None, rescale='individual', border=False, norm_sort=False, dataset=None): """ Returns a PatchViewer displaying a grid of filter weights Parameters ---------- model_path : str Filepath of the model to make the report on. rescale : str A string specifying how to rescale the filter images: - 'individual' (default) : scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped - 'global' : scale the whole ensemble of weights - 'none' : don't rescale dataset : pylearn2.datasets.dataset.Dataset Dataset object to do view conversion for displaying the weights. If not provided one will be loaded from the model's dataset_yaml_src. Returns ------- WRITEME """ if model is None: logger.info('making weights report') logger.info('loading model') model = serial.load(model_path) logger.info('loading done') else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale=' + rescale + ", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] keys = [key for key in model \ if hasattr(model[key], 'ndim') and model[key].ndim == 2] if len(keys) > 2: key = None while key not in keys: logger.info('Which is the weights?') for key in keys: logger.info('\t{0}'.format(key)) key = raw_input() else: key, = keys weights = model[key] norms = np.sqrt(np.square(weights).sum(axis=1)) logger.info('min norm: {0}'.format(norms.min())) logger.info('mean norm: {0}'.format(norms.mean())) logger.info('max norm: {0}'.format(norms.max())) return patch_viewer.make_viewer(weights, is_color=weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] except NotImplementedError: if dataset is None: logger.info('loading dataset...') control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() logger.info('...done') try: W = model.get_weights() except AttributeError, e: raise AttributeError(""" Encountered an AttributeError while trying to call get_weights on a model. This probably means you need to implement get_weights for this model class, but look at the original exception to be sure. If this is an older model class, it may have weights stored as weightsShared, etc. Original exception: """+str(e))
print 'loading dataset' if cifar10: print 'CIFAR10 detected' dataset = CIFAR10(which_set = "train") elif cifar100: print 'CIFAR100 detected' dataset = CIFAR100(which_set = 'train') elif stl10: print 'STL10 detected' dataset = serial.load('${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/train.pkl') X = dataset.get_design_matrix()[batch_start:batch_start + batch_size,:] size = np.sqrt(model.nvis/3) if cifar10 or cifar100: pv1 = make_viewer( (X-127.5)/127.5, is_color = True, rescale = False) elif stl10: pv1 = make_viewer( X/127.5, is_color = True, rescale = False) dataset.set_design_matrix(X) patchifier = ExtractGridPatches( patch_shape = (size,size), patch_stride = (1,1) ) if size == 8: if cifar10: pipeline = serial.load('${GOODFELI_TMP}/cifar10_preprocessed_pipeline_2M.pkl') elif stl10: assert False elif size ==6: if cifar10:
_, model_path = sys.argv out_prefix = None else: _, model_path, out_prefix =sys.argv model = serial.load(model_path) layer_1, layer_2 = model.hidden_layers[0:2] W1 = layer_1.get_weights() W2 = layer_2.get_weights() print W1.shape print W2.shape prod = np.dot(W1,W2) pv = make_viewer(prod.T) if out_prefix is None: pv.show() else: pv.save(out_prefix+"_prod.png") print 'Sorting so largest-norm layer 2 weights are plotted at the top' norms = np.square(W2).sum(axis=0) idxs = [elem[1] for elem in sorted( zip( -norms, range(norms.shape[0]) ) ) ] new = W2.copy() for i in xrange(len(idxs)): new[:,i] = W2[:,idxs[i]] W2 = new
_, model_path = sys.argv model = serial.load(model_path) from pylearn2.gui.patch_viewer import make_viewer space = model.generator.get_output_space() from pylearn2.config import yaml_parse import numpy as np dataset = yaml_parse.load(model.dataset_yaml_src) dataset = dataset.get_test_set() grid_shape = None from pylearn2.utils import sharedX X = sharedX(dataset.get_batch_topo(100)) samples, ignore = model.generator.inpainting_sample_and_noise(X) samples = samples.eval() total_dimension = space.get_total_dimension() num_colors = 1 if total_dimension % 3 == 0: num_colors = 3 w = int(np.sqrt(total_dimension / num_colors)) from pylearn2.space import Conv2DSpace desired_space = Conv2DSpace(shape=[w, w], num_channels=num_colors, axes=('b',0,1,'c')) is_color = samples.shape[-1] == 3 print (samples.min(), samples.mean(), samples.max()) # Hack for detecting MNIST [0, 1] values. Otherwise we assume centered images if samples.min() >0: samples = samples * 2.0 - 1.0 viewer = make_viewer(samples, grid_shape=grid_shape, is_color=is_color) viewer.show()
nsample.set_value(temp.astype(floatX)) # Burnin of Markov chain. for i in xrange(opts.burnin): model.sample_neg_func() # Start actual sampling. samples = numpy.zeros((opts.batch_size * opts.n, model.n_u[0])) indices = numpy.arange(0, len(samples), opts.n) energies = numpy.zeros(opts.batch_size * opts.n) for t in xrange(opts.n): samples[indices, :] = e_nsamples0.get_value() # skip in between plotted samples for i in xrange(opts.skip): sample_neg_func() energies[indices] = compute_energy() indices += 1 # transform energies between 0 and 1 energies -= energies.min() energies /= energies.max() import pdb pdb.set_trace() img = make_viewer(samples, (opts.batch_size, opts.n), (opts.width, opts.height), activation=energies, is_color=opts.color) img.show()
if cifar10: print 'CIFAR10 detected' dataset = CIFAR10(which_set="train") elif cifar100: print 'CIFAR100 detected' dataset = CIFAR100(which_set='train') elif stl10: print 'STL10 detected' dataset = serial.load( '${PYLEARN2_DATA_PATH}/stl10/stl10_32x32/train.pkl') X = dataset.get_design_matrix()[batch_start:batch_start + batch_size, :] size = np.sqrt(model.nvis / 3) if cifar10 or cifar100: pv1 = make_viewer((X - 127.5) / 127.5, is_color=True, rescale=False) elif stl10: pv1 = make_viewer(X / 127.5, is_color=True, rescale=False) dataset.set_design_matrix(X) patchifier = ExtractGridPatches(patch_shape=(size, size), patch_stride=(1, 1)) if size == 8: if cifar10: pipeline = serial.load( '${GOODFELI_TMP}/cifar10_preprocessed_pipeline_2M.pkl') elif stl10: assert False elif size == 6:
continue else: print "examining this element" final = elem try: print "Trying get_weights topo" topo = final.get_weights_topo() print "It worked" success = True except Exception: pass if success: print "Making the viewer and showing" make_viewer(topo).show() quit() try: print "Trying get_weights" weights = final.get_weights() print "It worked" success = True except NotImplementedError: i -= 1 # skip over SpaceConverter, etc. print "Out of the while loop" print "weights shape ", weights.shape viewer = make_viewer(weights, is_color=weights.shape[1] % 3 == 0 and weights.shape[1] != 48*48) print "image shape ", viewer.image.shape
def get_weights_report(model_path=None, model=None, rescale='individual', border=False, norm_sort=False, dataset=None): """ Returns a PatchViewer displaying a grid of filter weights Parameters ---------- model_path : str Filepath of the model to make the report on. rescale : str A string specifying how to rescale the filter images: - 'individual' (default) : scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped - 'global' : scale the whole ensemble of weights - 'none' : don't rescale dataset : pylearn2.datasets.dataset.Dataset Dataset object to do view conversion for displaying the weights. If not provided one will be loaded from the model's dataset_yaml_src. Returns ------- WRITEME """ if model is None: logger.info('making weights report') logger.info('loading model') model = serial.load(model_path) logger.info('loading done') else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale=' + rescale + ", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] keys = [key for key in model \ if hasattr(model[key], 'ndim') and model[key].ndim == 2] if len(keys) > 2: key = None while key not in keys: logger.info('Which is the weights?') for key in keys: logger.info('\t{0}'.format(key)) key = input() else: key, = keys weights = model[key] norms = np.sqrt(np.square(weights).sum(axis=1)) logger.info('min norm: {0}'.format(norms.min())) logger.info('mean norm: {0}'.format(norms.mean())) logger.info('max norm: {0}'.format(norms.max())) return patch_viewer.make_viewer(weights, is_color=weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] except NotImplementedError: if dataset is None: logger.info('loading dataset...') control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() logger.info('...done') try: W = model.get_weights() except AttributeError as e: reraise_as(AttributeError(""" Encountered an AttributeError while trying to call get_weights on a model. This probably means you need to implement get_weights for this model class, but look at the original exception to be sure. If this is an older model class, it may have weights stored as weightsShared, etc. Original exception: """+str(e))) if W is None and weights_view is None: raise ValueError("model doesn't support any weights interfaces") if weights_view is None: weights_format = model.get_weights_format() assert hasattr(weights_format,'__iter__') assert len(weights_format) == 2 assert weights_format[0] in ['v','h'] assert weights_format[1] in ['v','h'] assert weights_format[0] != weights_format[1] if weights_format[0] == 'v': W = W.T h = W.shape[0] if norm_sort: norms = np.sqrt(1e-8+np.square(W).sum(axis=1)) norm_prop = norms / norms.max() weights_view = dataset.get_weights_view(W) assert weights_view.shape[0] == h try: hr, hc = model.get_weights_view_shape() except NotImplementedError: hr = int(np.ceil(np.sqrt(h))) hc = hr if 'hidShape' in dir(model): hr, hc = model.hidShape pv = patch_viewer.PatchViewer(grid_shape=(hr, hc), patch_shape=weights_view.shape[1:3], is_color = weights_view.shape[-1] == 3) if global_rescale: weights_view /= np.abs(weights_view).max() if norm_sort: logger.info('sorting weights by decreasing norm') idx = sorted( range(h), key=lambda l : - norm_prop[l] ) else: idx = range(h) if border: act = 0 else: act = None for i in range(0,h): patch = weights_view[idx[i],...] pv.add_patch(patch, rescale=patch_rescale, activation=act) abs_weights = np.abs(weights_view) logger.info('smallest enc weight magnitude: {0}'.format(abs_weights.min())) logger.info('mean enc weight magnitude: {0}'.format(abs_weights.mean())) logger.info('max enc weight magnitude: {0}'.format(abs_weights.max())) if W is not None: norms = np.sqrt(np.square(W).sum(axis=1)) assert norms.shape == (h,) logger.info('min norm: {0}'.format(norms.min())) logger.info('mean norm: {0}'.format(norms.mean())) logger.info('max norm: {0}'.format(norms.max())) return pv
def get_weights_report(model_path=None, model=None, rescale='individual', border=False, norm_sort=False, dataset=None): """ Returns a PatchViewer displaying a grid of filter weights Parameters: model_path: the filepath of the model to make the report on. rescale: a string specifying how to rescale the filter images 'individual' (default): scale each filter so that it uses as much as possible of the dynamic range of the display under the constraint that 0 is gray and no value gets clipped 'global' : scale the whole ensemble of weights 'none' : don't rescale dataset: a Dataset object to do view conversion for displaying the weights. if not provided one will be loaded from the model's dataset_yaml_src """ if model is None: print 'making weights report' print 'loading model' model = serial.load(model_path) print 'loading done' else: assert model_path is None assert model is not None if rescale == 'none': global_rescale = False patch_rescale = False elif rescale == 'global': global_rescale = True patch_rescale = False elif rescale == 'individual': global_rescale = False patch_rescale = True else: raise ValueError('rescale=' + rescale + ", must be 'none', 'global', or 'individual'") if isinstance(model, dict): #assume this was a saved matlab dictionary del model['__version__'] del model['__header__'] del model['__globals__'] weights, = model.values() norms = np.sqrt(np.square(weights).sum(axis=1)) print 'min norm: ', norms.min() print 'mean norm: ', norms.mean() print 'max norm: ', norms.max() return patch_viewer.make_viewer(weights, is_color=weights.shape[1] % 3 == 0) weights_view = None W = None try: weights_view = model.get_weights_topo() h = weights_view.shape[0] except NotImplementedError: if dataset is None: print 'loading dataset...' control.push_load_data(False) dataset = yaml_parse.load(model.dataset_yaml_src) control.pop_load_data() print '...done' try: W = model.get_weights() except AttributeError, e: raise AttributeError(""" Encountered an AttributeError while trying to call get_weights on a model. This probably means you need to implement get_weights for this model class, but look at the original exception to be sure. If this is an older model class, it may have weights stored as weightsShared, etc. Original exception: """ + str(e))
if opts.splitblocks: pl.imshow(block_viewer.image, interpolation='nearest') pl.axis('off') pl.title('Wv - block %i, chan %i' % (bidx, chan_i)) pl.savefig('filters/filters_chan%i_block%i.png' % (bidx, chan_i)) chan_viewer.add_patch(block_viewer.image[:,:,viewer_dims] - 0.5) block_viewer.clear() main_viewer.add_patch(chan_viewer.image[:,:,viewer_dims] - 0.5) chan_viewer.clear() return copy.copy(main_viewer.image) viewer_g = make_viewer(wvg, get_dims(model.n_g), (opts.height, opts.width), is_color=True) viewer_h = make_viewer(wvh, get_dims(model.n_h), (opts.height, opts.width), is_color=True) w_image = plot(wv) viewer = PatchViewer((1, 3), (numpy.max((viewer_g.image.shape[0], viewer_h.image.shape[0], w_image.shape[0])), numpy.max((viewer_g.image.shape[1], viewer_h.image.shape[1], w_image.shape[1]))), is_color = opts.color, pad=(0,10)) viewer_dims = slice(0, None) if opts.color else 0 viewer.add_patch(viewer_g.image[:,:, viewer_dims] - 0.5) viewer.add_patch(viewer_h.image[:,:, viewer_dims] - 0.5) viewer.add_patch(w_image[:,:, viewer_dims] - 0.5) pl.axis('off')
import sys from pylearn2.utils import get_dataless_dataset from pylearn2.utils import serial import numpy as np from pylearn2.gui.patch_viewer import make_viewer ignore, model_path = sys.argv model = serial.load(model_path) dataset = get_dataless_dataset(model) biases = model.visible_layer.get_biases() biases = np.zeros((1, biases.shape[0])) + biases print 'values: ', (biases.min(), biases.mean(), biases.max()) pv = make_viewer(biases) pv.show()