def kill_app(cls): """Asks the front end to send a shutdown request to shutdown the server""" # msg triggers a shutdown request from the Frontend if not cls._is_silent: print("Shutting down Web App") cls._send_to_client("shutdown", {}) MainLoop.kill_main_loop() print("Shutting down Board Listener") if AppManager._main_loop_thread is not None: AppManager._main_loop_thread.join() exit(1)
def run(): name = 'colored-mnist' epochs = 200 subdir = name + "-" + time.strftime("%Y%m%d-%H%M%S") if not os.path.isdir(subdir): os.mkdir(subdir) bs = 150 data_train = CaptionedMNIST(banned=[np.random.randint(0,10) for i in xrange(12)], dataset='train', num=50000, bs=bs) data_valid = CaptionedMNIST(banned=[np.random.randint(0,10) for i in xrange(12)], dataset='valid', num=10000, bs=bs) train_stream = DataStream.default_stream(data_train, iteration_scheme=SequentialScheme(data_train.num_examples, bs)) valid_stream = DataStream.default_stream(data_valid, iteration_scheme=SequentialScheme(data_valid.num_examples, bs)) img_height, img_width = (60,60) x = T.matrix('features') #x.tag.test_value = np.random.rand(bs, 60*60).astype('float32') y = T.lmatrix('captions') #y.tag.test_value = np.random.rand(bs, 12).astype(int) mask = T.lmatrix('mask') #mask.tag.test_value = np.ones((bs,12)).astype(int) K = 29 lang_N = 14 N = 32 read_size = 8 write_size = 8 m = 256 gen_dim = 300 infer_dim = 300 z_dim = 150 l = 512 model = ImageModel(bs, K, lang_N, N, read_size, write_size, m, gen_dim, infer_dim, z_dim, l, image_size=60*60, cinit=-10, channels=3) model._inputs = [x,y,mask] kl, log_recons, log_likelihood, c = model.train(x,y,mask) kl.name = 'kl' log_recons.name = 'log_recons' log_likelihood.name = 'log_likelihood' c.name = 'c' model._outputs = [kl, log_recons, log_likelihood, c] params = model.params from solvers.RMSProp import RMSProp as solver lr = theano.shared(np.asarray(0.001).astype(theano.config.floatX)) updates = solver(log_likelihood, params, lr=lr)#0.001)#, clipnorm=10.0) model._updates = updates logger.info('Compiling sample function') model.build_sample_function(y, mask) logger.info('Compiled sample function') # ============= TRAIN ========= plots = [['train_kl','valid_kl'], ['train_log_recons','valid_log_recons'], ['train_log_likelihood','valid_log_likelihood']] main_loop = MainLoop(model, train_stream, [FinishAfter(epochs), Track(variables=['kl','log_recons','log_likelihood'], prefix='train'), #TrackBest(variables=['kl'], prefix='train'), DataStreamTrack(valid_stream, ['kl','log_recons','log_likelihood'], prefix='valid'), SampleSentences(subdir, bs, 60, 60), DropLearningRate(lr, 110, 0.00001), Plot(name, plots, 'http://nameless-wave-6526.herokuapp.com/'), SaveModel(subdir, name+'.model'), TimeProfile(), Printing()]) main_loop.run()
def run(): name = 'coco-nopeep' epochs = 200 subdir = name + "-" + time.strftime("%Y%m%d-%H%M%S") if not os.path.isdir(subdir): os.mkdir(subdir) bs = 200 data_train = MSCoco(dataset='train', num=82611, bs=bs) data_valid = MSCoco(dataset='val', num=4989, bs=bs) train_stream = DataStream.default_stream(data_train, iteration_scheme=SequentialScheme(data_train.num_examples, bs)) valid_stream = DataStream.default_stream(data_valid, iteration_scheme=SequentialScheme(data_valid.num_examples, bs)) img_height, img_width = (32,32) x = T.matrix('features') #x.tag.test_value = np.random.rand(bs, 3*32*32).astype('float32') y = T.lmatrix('captions') #y.tag.test_value = np.random.rand(bs, 57).astype(int) mask = T.lmatrix('mask') #mask.tag.test_value = np.ones((bs,57)).astype(int) K = 25323 lang_N = 57 N = 32 read_size = 9 write_size = 9 m = 256 gen_dim = 550 infer_dim = 550 z_dim = 275 l = 512 model = ImageModel(bs, K, lang_N, N, read_size, write_size, m, gen_dim, infer_dim, z_dim, l, image_size=32*32, channels=3, cinit=0.0) model._inputs = [x,y,mask] kl, log_recons, log_likelihood, c = model.train(x,y,mask) kl.name = 'kl' log_recons.name = 'log_recons' log_likelihood.name = 'log_likelihood' c.name = 'c' model._outputs = [kl, log_recons, log_likelihood, c] params = model.params #from solvers.RMSProp import RMSProp as solver lr = theano.shared(np.asarray(0.001).astype(theano.config.floatX)) #updates = solver(log_likelihood, params, lr=lr, clipnorm=10.0)#0.001) #lr = 0.001 grads = T.grad(log_likelihood, params) his = [] for p in params: pz = p.get_value()*0 his.append(theano.shared(pz)) threshold = 10.0 decay = 0.9 updates = OrderedDict() for p, ph, g in zip(params, his, grads): l2_norm = T.sqrt(T.sqr(g).sum()) m = T.switch(l2_norm < threshold, 1, threshold/l2_norm) grad = m*g ph_n = decay * ph + (1-decay)*grad**2 updates[ph] = ph_n updates[p] = p-(lr/T.sqrt(ph_n+1e-6))*grad model._updates = updates logger.info('Compiling sample function') model.build_sample_function(y, mask) logger.info('Compiled sample function') # ============= TRAIN ========= plots = [['train_kl','valid_kl'], ['train_log_recons','valid_log_recons'], ['train_log_likelihood','valid_log_likelihood']] main_loop = MainLoop(model, train_stream, [FinishAfter(epochs), Track(variables=['kl','log_recons','log_likelihood'], prefix='train'), #TrackBest(variables=['kl'], prefix='train'), DataStreamTrack(valid_stream, ['kl','log_recons','log_likelihood'], prefix='valid'), SampleSentences(subdir, bs, 32, 32), DropLearningRate(lr, 25, 0.0001), Plot(name, plots, 'http://nameless-wave-6526.herokuapp.com/'), SaveModel(subdir, name+'.model'), TimeProfile(), Printing()]) main_loop.run()
def run(): name = 'coco-nopeep' epochs = 200 subdir = name + "-" + time.strftime("%Y%m%d-%H%M%S") if not os.path.isdir(subdir): os.mkdir(subdir) bs = 200 data_train = MSCoco(dataset='train', num=82611, bs=bs) data_valid = MSCoco(dataset='val', num=4989, bs=bs) train_stream = DataStream.default_stream(data_train, iteration_scheme=SequentialScheme( data_train.num_examples, bs)) valid_stream = DataStream.default_stream(data_valid, iteration_scheme=SequentialScheme( data_valid.num_examples, bs)) img_height, img_width = (32, 32) x = T.matrix('features') #x.tag.test_value = np.random.rand(bs, 3*32*32).astype('float32') y = T.lmatrix('captions') #y.tag.test_value = np.random.rand(bs, 57).astype(int) mask = T.lmatrix('mask') #mask.tag.test_value = np.ones((bs,57)).astype(int) K = 25323 lang_N = 57 N = 32 read_size = 9 write_size = 9 m = 256 gen_dim = 550 infer_dim = 550 z_dim = 275 l = 512 model = ImageModel(bs, K, lang_N, N, read_size, write_size, m, gen_dim, infer_dim, z_dim, l, image_size=32 * 32, channels=3, cinit=0.0) model._inputs = [x, y, mask] kl, log_recons, log_likelihood, c = model.train(x, y, mask) kl.name = 'kl' log_recons.name = 'log_recons' log_likelihood.name = 'log_likelihood' c.name = 'c' model._outputs = [kl, log_recons, log_likelihood, c] params = model.params #from solvers.RMSProp import RMSProp as solver lr = theano.shared(np.asarray(0.001).astype(theano.config.floatX)) #updates = solver(log_likelihood, params, lr=lr, clipnorm=10.0)#0.001) #lr = 0.001 grads = T.grad(log_likelihood, params) his = [] for p in params: pz = p.get_value() * 0 his.append(theano.shared(pz)) threshold = 10.0 decay = 0.9 updates = OrderedDict() for p, ph, g in zip(params, his, grads): l2_norm = T.sqrt(T.sqr(g).sum()) m = T.switch(l2_norm < threshold, 1, threshold / l2_norm) grad = m * g ph_n = decay * ph + (1 - decay) * grad**2 updates[ph] = ph_n updates[p] = p - (lr / T.sqrt(ph_n + 1e-6)) * grad model._updates = updates logger.info('Compiling sample function') model.build_sample_function(y, mask) logger.info('Compiled sample function') # ============= TRAIN ========= plots = [['train_kl', 'valid_kl'], ['train_log_recons', 'valid_log_recons'], ['train_log_likelihood', 'valid_log_likelihood']] main_loop = MainLoop( model, train_stream, [ FinishAfter(epochs), Track(variables=['kl', 'log_recons', 'log_likelihood'], prefix='train'), #TrackBest(variables=['kl'], prefix='train'), DataStreamTrack(valid_stream, ['kl', 'log_recons', 'log_likelihood'], prefix='valid'), SampleSentences(subdir, bs, 32, 32), DropLearningRate(lr, 25, 0.0001), Plot(name, plots, 'http://nameless-wave-6526.herokuapp.com/'), SaveModel(subdir, name + '.model'), TimeProfile(), Printing() ]) main_loop.run()
# Set up the display, camera, counter display = PyBadgeDisplay(status_lights) camera = Camera() counter = MAndMCounter() def take_picture_and_count_mandms(): # Show a message display.show_taking_picture_message() # Take the picture buffer = camera.take_picture() # Show a counting message display.show_counting_message() # Count the M&Ms count = counter.count_mandms(buffer) # Show the count of M&Ms found display.show_found_message(count) # Set up button polling loop main_loop = MainLoop() main_loop.on_button_a = take_picture_and_count_mandms # Start the loop main_loop.start_loop()
def run(): report = file('report-hmdb-tdd.txt', 'w') max_time = 200 configs = [] cc = create_config for d in ['1', '2', '3']: configs.append( cc('tdd-max-pool-h-4000 ' + d, max_time, 4000, 'hmdb-tdd-1.hdf5', { 'method': 'max', 'hidden_size': 4000 }, 'hidden_2_layer_model', 0.0001)) configs.append( cc('tdd-mean-pool-h-4000 ' + d, max_time, 4000, 'hmdb-tdd.hdf5', { 'method': 'mean', 'hidden_size': 4000 }, 'hidden_2_layer_model', 0.0001)) configs.append( cc('tdd-sum-pool-h-4000 ' + d, max_time, 4000, 'hmdb-tdd.hdf5', { 'method': 'sum', 'hidden_size': 4000 }, 'hidden_2_layer_model', 0.0005)) configs.append( cc('tdd-spyramid-1-h-1000', max_time, 4000, 'hmdb-tdd.hdf5', { 'levels': 1, 'hidden_size': 1000 }, 'temporal_pyramid_model')) configs.append( cc('tdd-spyramid-4-h-4000 ' + d, max_time, 4000, '/ssd2/hmdb/hmdb-tdd.hdf5', { 'levels': 4, 'hidden_size': 4000 }, 'temporal_pyramid_model', 0.0001)) for d in ['1', '2', '3']: for model in ['temporal_learned_model']: s = s + ' split=' + d for num_f in [3]: configs.append( cc('tdd-pyramid-1-N-' + str(num_f) + '-h-1000' + s, max_time, 4000, 'hmdb-tdd.hdf5', { 'levels': 1, 'hidden_size': 1000, 'N': num_f }, model, 0.05)) for config in configs: name = config['name'] epochs = 250 subdir = name + "-" + time.strftime("%Y%m%d-%H%M%S") if not os.path.isdir(subdir): os.mkdir(subdir) bs = 100 #int(sys.argv[1]) max_time = config['max_time'] #int(sys.argv[2]) feature_dim = config['feature_dim'] #int(sys.argv[3]) from uniform_dataset import UniformDataset data_train = UniformDataset( bs=bs, filename=config['filename'], which_sets=['train'], sources=['features', 'time_mask', 'labels']) data_test = UniformDataset(bs=bs, filename=config['filename'], which_sets=['test'], sources=['features', 'time_mask', 'labels']) train_stream = DataStream.default_stream( data_train, iteration_scheme=SequentialScheme(data_train.num_examples, bs)) test_stream = DataStream.default_stream( data_test, iteration_scheme=SequentialScheme(data_test.num_examples, bs)) x = T.tensor3('features') time_mask = T.wmatrix('time_mask') y = T.imatrix('labels') mod = importlib.import_module(config['model']) classes = 51 model = mod.TemporalModel([x, time_mask, y], bs, max_time, classes, feature_dim, **config['model_kwargs']) prob, pred, loss, error, acc = model.run(x, time_mask, y) prob.name = 'prob' acc.name = 'acc' pred.name = 'pred' loss.name = 'loss' error.name = 'error' model._outputs = [prob, pred, loss, error, acc] params = model.params # from solvers.sgd import SGD as solver from solvers.RMSProp import RMSProp as solver updates = solver(loss, params, lr=config['lr'], clipnorm=10.0) for i, u in enumerate(updates): if u[0].name == 'g' or u[0].name == 'sigma' or u[0].name == 'd': updates[i] = (u[0], T.mean(u[1]).dimshuffle(['x'])) model._updates = updates # ============= TRAIN ========= plots = [['train_loss', 'test_loss'], ['train_acc', 'test_acc']] main_loop = MainLoop( model, train_stream, [ FinishAfter(epochs), Track(variables=['loss', 'error', 'acc'], prefix='train'), DataStreamTrack(test_stream, ['loss', 'error', 'acc'], prefix='test', best_method=[min, min, max]), #SaveModel(subdir, name+'.model'), TimeProfile(), Report(os.path.join(subdir, 'report.txt'), name=name), Printing() ]) main_loop.run() config['best_acc'] = main_loop.log.current_row['best_test_acc'] print >> report, config['name'], 'best test acc', config['best_acc'] report.flush() print ''.join(79 * '-') print 'FINAL REPORT' print ''.join(79 * '-') for config in configs: print config['name'], 'best test acc', config['best_acc']
arg_parser.add_argument('--docs-root', '-r', type=pathlib.Path, required=True) arg_parser.add_argument('--port', '-p', type=int, default=8080) arg_parser.add_argument('--addr', '-i', type=str, default='127.0.0.1') arg_parser.add_argument('--workers', '-w', type=int, default=4) arg_parser.add_argument('--debug', '-d', action='store_true') args = arg_parser.parse_args() logging.basicConfig( format= '[%(asctime)s] %(process)d:%(threadName)s %(levelname).1s - %(message)s', datefmt='%Y.%m.%d %H:%M:%S', level=logging.DEBUG if args.debug else logging.INFO) logging.info('started master') for worker in range(args.workers): pid = os.fork() if pid: # in master worker_pids.append(pid) continue else: # in worker logging.info('started worker') serversock = init_serversocket(args.addr, args.port, BACKLOG) loop = MainLoop(serversock, args.docs_root) loop.run() for pid in worker_pids: os.waitpid(pid, 0)
def run(): report = file('report-hmdb-tdd-binary.txt', 'w') max_time = 200 configs = [] cc = create_config for d in ['1', '2', '3']: configs.append( cc('tdd-max-pool-h-4000 ' + d, max_time, 4000, 'hmdb-tdd-1.hdf5', { 'method': 'max', 'hidden_size': 4000 }, 'baseline_binary_model', 0.01)) configs.append( cc('tdd-mean-pool-h-4000 ' + d, max_time, 4000, 'hmdb-tdd.hdf5', { 'method': 'mean', 'hidden_size': 4000 }, 'baseline_binary_model', 0.0001)) configs.append( cc('tdd-sum-pool-h-4000 ' + d, max_time, 4000, 'hmdb-tdd.hdf5', { 'method': 'sum', 'hidden_size': 4000 }, 'baseline_binary_model', 0.0005)) #configs.append(cc('tdd-spyramid-1-h-1000', max_time, 4000, 'hmdb-tdd.hdf5', {'levels':1, 'hidden_size':1000}, 'temporal_pyramid_model')) # configs.append(cc('tdd-spyramid-4-h-4000 '+d, max_time, 4000, 'hmdb-tdd.hdf5', {'levels':3, 'hidden_size':4000}, 'temporal_pyramid_binary_model', 0.01)) # for d in ['1', '2', '3']: for model in ['binary_learned_model']: #, 'temporal_random_model']: s = s + ' split=' + d for num_f in [3]: configs.append( cc('plot-attention-', max_time, 4000, 'hmdb-tdd.hdf5', { 'levels': 6, 'hidden_size': 4000, 'N': num_f }, model, 0.005)) for config in configs: name = config['name'] + sys.argv[1] epochs = 150 subdir = name + "-" + time.strftime("%Y%m%d-%H%M%S") if not os.path.isdir(subdir): os.mkdir(subdir) bs = 64 #int(sys.argv[1]) max_time = config['max_time'] #int(sys.argv[2]) feature_dim = config['feature_dim'] #int(sys.argv[3]) from uniform_dataset import UniformDataset data_train = UniformDataset( bs=bs, filename=config['filename'], which_sets=['train'], sources=['features', 'time_mask', 'labels']) data_test = UniformDataset(bs=bs, filename=config['filename'], which_sets=['test'], sources=['features', 'time_mask', 'labels']) train_stream = DataStream.default_stream( data_train, iteration_scheme=SequentialScheme(data_train.num_examples, bs)) test_stream = DataStream.default_stream( data_test, iteration_scheme=SequentialScheme(data_test.num_examples, bs)) x = T.tensor3('features') time_mask = T.wmatrix('time_mask') y = T.imatrix('labels') mod = importlib.import_module(config['model']) models = [] b_model = None classes = eval(sys.argv[1]) for clas in classes: model = mod.TemporalModel([x, time_mask, y], bs, max_time, clas, feature_dim, **config['model_kwargs']) models.append(model) if not b_model: b_model = model b_model._outputs = [] b_model._updates = [] prob, loss, (tp, tn, fp, fn) = model.run(x, time_mask, y) prob.name = 'prob_' + str(clas) loss.name = 'loss_' + str(clas) tp.name = 'tp_' + str(clas) tn.name = 'tn_' + str(clas) fp.name = 'fp_' + str(clas) fn.name = 'fn_' + str(clas) b_model._outputs += [prob, loss, tp, tn, fp, fn] #for filt in model.temporal_pyramid: # print filt.g.name, filt.d.name, filt.sigma.name # b_model._outputs += [filt.g, filt.d, filt.sigma] params = model.params # from solvers.sgd import SGD as solver from solvers.RMSProp import RMSProp as solver updates = solver(loss, params, lr=config['lr'], clipnorm=10.0) for i, u in enumerate(updates): if u[0].name is None: continue if 'g.' in u[0].name or 'shhigma.' in u[0].name or 'd.' in u[ 0].name: updates[i] = (u[0], T.mean(u[1]).dimshuffle(['x'])) b_model._updates += updates # ============= TRAIN ========= tc = classes #plots = [['_plt_g.af-0','_plt_g.af-1','_plt_g.af-2'],['_plt_d.af-0','_plt_d.af-1','_plt_d.af-2'],['_plt_sigma.af-0','_plt_sigma.af-1','_plt_sigma.af-2']] #track_plot = [(x[5:],'last') for sl in plots for x in sl] var = [[ 'loss_' + str(i), ('tp_' + str(i), 'sum'), ('tn_' + str(i), 'sum'), ('fp_' + str(i), 'sum'), ('fn_' + str(i), 'sum'), ('recall_' + str(i), 'after', 'tp_' + str(i), 'fn_' + str(i), lambda x, y: x / (x + y)), ('prec_' + str(i), 'after', 'tp_' + str(i), 'fp_' + str(i), lambda x, y: x / (x + y)) ] for i in tc] var = [item for sublist in var for item in sublist] bm = [[min, max, max, min, min, max, max] for i in tc] bm = [item for sublist in bm for item in sublist] main_loop = MainLoop( b_model, train_stream, [ FinishAfter(epochs), Track(variables=var, prefix='train'), #Track(variables=track_plot, prefix='_plt'), DataStreamTrack( test_stream, var, prefix='test', best_method=bm), TimeProfile(), SaveAfter(models), #PlotLocal(name, subdir, plots), Report(os.path.join(subdir, 'report.txt'), name=name), Printing() ]) main_loop.run() config['best_prec'] = main_loop.log.current_row['best_test_prec'] print >> report, config['name'], 'best test prec', config['best_prec'] report.flush() print ''.join(79 * '-') print 'FINAL REPORT' print ''.join(79 * '-') for config in configs: print config['name'], 'best test prec', config['best_prec']
import traceback from display.drawer import CursesScreen from main_loop import MainLoop # Get essential 0 initialized components # get display game_window = CursesScreen() # get input game_input = game_window.get_input # get sound # get others # set main main = MainLoop(game_window, game_input, 60) try: main.run() except Exception as e: game_window.close() traceback.print_exc() print str(e) exit(1) game_window.close()