_weights = [[glumpy.Image(w, vmin=-m, vmax=m) for w in ws] for ws in weights] W = 100 * (opts.cols + 1) + 4 H = 100 * opts.rows + 4 win = glumpy.Window(W, H) loaded = False updates = -1 batches = 0. recent = collections.deque(maxlen=20) errors = [collections.deque(maxlen=20) for _ in range(10)] testset = [None] * 10 trainset = dict((i, []) for i in range(10)) loader = idx_reader.iterimages(opts.labels, opts.images, False) rbm = opts.model and pickle.load(open(opts.model, 'rb')) or rbm.RBM( 28 * 28, opts.rows * opts.cols, opts.binary) trainer = rbm.Trainer( rbm, momentum=opts.momentum, target_sparsity=opts.sparsity, ) def get_pixels(): global loaded if not loaded and numpy.all([len(trainset[t]) > 10 for t in range(10)]): loaded = True
visible_frames = [ fig.add_figure(args.n + 1, args.n, position=(args.n, r)).add_frame(aspect=1) for r in range(args.n) ] weight_frames = [ fig.add_figure(args.n + 1, args.n, position=(c, r)).add_frame(aspect=1) for r in range(args.n) for c in range(args.n) ] loaded = False recent = collections.deque(maxlen=20) errors = [collections.deque(maxlen=20) for _ in range(10)] trainset = dict((i, []) for i in range(10)) loader = idx_reader.iterimages(args.images, args.labels, True) Model = lmj.rbm.Convolutional if args.conv else lmj.rbm.RBM rbm = args.model and pickle.load(open(args.model, 'rb')) or Model( 28 * 28, args.n * args.n, not args.gaussian) Trainer = lmj.rbm.ConvolutionalTrainer if args.conv else lmj.rbm.Trainer trainer = Trainer(rbm, l2=args.l2, momentum=args.momentum, target_sparsity=args.sparsity) def get_pixels(): global loaded if not loaded and all(len(v) > 50 for v in trainset.itervalues()): loaded = True
hiddens = glumpy.image.Image(_hiddens) weights = [glumpy.image.Image(w) for w in _weights] visible_frames = [ fig.add_figure(args.n + 1, args.n, position=(args.n, r)).add_frame(aspect=1) for r in range(args.n)] weight_frames = [ fig.add_figure(args.n + 1, args.n, position=(c, r)).add_frame(aspect=1) for r in range(args.n) for c in range(args.n)] loaded = False recent = collections.deque(maxlen=20) errors = [collections.deque(maxlen=20) for _ in range(10)] trainset = dict((i, []) for i in range(10)) loader = idx_reader.iterimages(args.images, args.labels, True) Model = lmj.rbm.Convolutional if args.conv else lmj.rbm.RBM rbm = args.model and pickle.load(open(args.model, 'rb')) or Model( 28 * 28, args.n * args.n, not args.gaussian) Trainer = lmj.rbm.ConvolutionalTrainer if args.conv else lmj.rbm.Trainer trainer = Trainer(rbm, l2=args.l2, momentum=args.momentum, target_sparsity=args.sparsity) def get_pixels(): global loaded if not loaded and all(len(v) > 50 for v in trainset.itervalues()): loaded = True if loaded: t = rng.randint(10)
_hiddens = [glumpy.Image(h) for h in hiddens] _weights = [[glumpy.Image(w, vmin=-m, vmax=m) for w in ws] for ws in weights] W = 100 * (opts.cols + 1) + 4 H = 100 * opts.rows + 4 win = glumpy.Window(W, H) loaded = False updates = -1 batches = 0.0 recent = collections.deque(maxlen=20) errors = [collections.deque(maxlen=20) for _ in range(10)] testset = [None] * 10 trainset = dict((i, []) for i in range(10)) loader = idx_reader.iterimages(opts.labels, opts.images, False) rbm = opts.model and pickle.load(open(opts.model, "rb")) or rbm.RBM(28 * 28, opts.rows * opts.cols, opts.binary) trainer = rbm.Trainer(rbm, momentum=opts.momentum, target_sparsity=opts.sparsity) def get_pixels(): global loaded if not loaded and numpy.all([len(trainset[t]) > 10 for t in range(10)]): loaded = True if loaded and rng.random() < 0.99: t = rng.randint(10) pixels = trainset[t][rng.randint(len(trainset[t]))] else: t, pixels = loader.next()