def __init__(self, dataset_name): raw_data_dir = config.raw_data_dir(dataset_name) with open(raw_data_dir + '/dataset.json', 'r', encoding='utf-8') as captions_f: captions_data = json.load(captions_f)['images'] features = scipy.io.loadmat( raw_data_dir + '/vgg_feats.mat')['feats'].T #image features matrix are transposed raw_dataset = { 'train': { 'filenames': list(), 'images': list(), 'captions': list() }, 'val': { 'filenames': list(), 'images': list(), 'captions': list() }, 'test': { 'filenames': list(), 'images': list(), 'captions': list() }, } for (image_id, (caption_data, image)) in enumerate(zip(captions_data, features)): split = caption_data['split'] if split == 'restval': continue filename = caption_data['filename'] caption_group = [ caption['tokens'] for caption in caption_data['sentences'] ] #image = image/np.linalg.norm(image) raw_dataset[split]['filenames'].append(filename) raw_dataset[split]['images'].append(image) raw_dataset[split]['captions'].append(caption_group) if config.debug: for split in raw_dataset: for column in raw_dataset[split]: raw_dataset[split][column] = raw_dataset[split][ column][:500] self.train = data.DataSource( caption_groups=raw_dataset['train']['captions'], images=np.array(raw_dataset['train']['images']), image_filenames=raw_dataset['train']['filenames']) self.val = data.DataSource( caption_groups=raw_dataset['val']['captions'], images=np.array(raw_dataset['val']['images']), image_filenames=raw_dataset['val']['filenames']) self.test = data.DataSource( caption_groups=raw_dataset['test']['captions'], images=np.array(raw_dataset['test']['images']), image_filenames=raw_dataset['test']['filenames'])
def Open(self, app, **kwargs): print("Open", kwargs['FilePath']) dataSource = data.DataSource(kwargs['FilePath']) self.__dataSource = dataSource self.__allocSummary = self.__dataSource.getAllocSummary() app.updateCanvas(self.__allocSummary.keys(), self.__allocSummary.values()) app.updateFrameList([])
def main(): mp.set_start_method("spawn", force=True) args = arg_parse() # see test-tube #args = hyp_search.hyp_arg_parse() if not os.path.exists(os.path.dirname(args.model_path)): os.makedirs(os.path.dirname(args.model_path)) print("Starting {} workers".format(args.n_workers)) in_queue, out_queue = mp.Queue(), mp.Queue() print("Using dataset {}".format(args.dataset)) record_keys = ["conv_type", "n_layers", "hidden_dim", "margin", "dataset", "dataset_type", "max_graph_size", "skip"] args_str = ".".join(["{}={}".format(k, v) for k, v in sorted(vars(args).items()) if k in record_keys]) logger = SummaryWriter("log/" + args_str) model = build_model(args) model.share_memory() data_source = data.DataSource(args.dataset) workers = [] for i in range(args.n_workers): worker = mp.Process(target=train, args=(args, model, args.dataset, in_queue, out_queue)) worker.start() workers.append(worker) if args.test: validation(args, model, data_source, in_queue, out_queue, logger, 0, make_pr_curve=True) else: batch_n = 0 for epoch in range(args.n_batches // args.eval_interval): for i in range(args.eval_interval): in_queue.put(("step", None)) for i in range(args.eval_interval): msg, params = out_queue.get() train_loss, train_acc = params print("Batch {}. Loss: {:.4f}. Training acc: {:.4f}".format( batch_n, train_loss, train_acc), end=" \r") logger.add_scalar("Loss/train", train_loss, batch_n) logger.add_scalar("Accuracy/train", train_acc, batch_n) batch_n += 1 validation(args, model, data_source, in_queue, out_queue, logger, batch_n) if not args.test: print("Saving {}".format(args.model_path)) torch.save(model.state_dict(), args.model_path) for i in range(len(workers)): in_queue.put(("done", None)) for worker in workers: worker.join()
def __init__(self, log): self.Log = log self.DataSource = data.DataSource(self.Log) # self.Temp = self.DataSource.queryCurrentTemps() self.Temp = {} self.Humidity = {} self.InSettings = False self.DataThread = threading.Thread(target=self.dataDaemon, args=(DATA_INTERVAL,), daemon=True) self.DataThread.start() self.Sleeping = False self.LastMovement = time.time() if PRODUCTION: # Work around for bug in libsdl os.environ['SDL_VIDEO_WINDOW_POS'] = "{0},{1}".format(0, 0) pygame.init() self.Screen = pygame.display.set_mode((0, 0), pygame.NOFRAME) pygame.mouse.set_visible(False) # self.Screen = pygame.display.set_mode((0, 0), FULLSCREEN) # pygame.mouse.set_visible(0) else: pygame.init() self.Screen = pygame.display.set_mode(SCREEN_SIZE) self.Clock = pygame.time.Clock() self.Background = pygame.image.load(BACKGROUND_IMAGE) self.PowerButton = widgets.PowerButton((SCREEN_SIZE[0]-55, 5), self.handlePower) self.SettingsButton = widgets.SettingsButton((SCREEN_SIZE[0] - (55*2),5), self.handleSettings) self.Font = pygame.font.SysFont("avenir", 18) self.Arduino = control.Arduino(self.Log) self.TempController = control.TempControl(self.Log, self.Arduino, self.Screen) self.Settings = control.Settings(self.Log, self.Screen, self.Arduino, self.handleSettings) # # Sensor Widgets # self.TimerControl = widgets.TimerControl((250,5), self.TempController.handleStart, self.TempController.handleStop) # Position will get updated on first render self.StartStop = widgets.StartStopButton((250,5), self.TimerControl.start, self.TimerControl.stop)
def train(args, model, dataset_name, in_queue, out_queue): """Train the order embedding model. args: Commandline arguments in_queue: input queue to an intersection computation worker out_queue: output queue to an intersection computation worker """ opt = optim.Adam(model.parameters(), args.lr) data_source = data.DataSource(dataset_name) # for batch_num in range(args.n_batches): done = False while not done and not (args.n_workers == 0 and in_queue.empty()): loaders = data_source.gen_data_loaders(args.batch_size, train=True) for batch_target, batch_neg_target, batch_neg_query in zip(*loaders): msg, _ = in_queue.get() if msg == "done": done = True break # train model.train() model.zero_grad() pos_a, pos_b, neg_a, neg_b = data_source.gen_batch( batch_target, batch_neg_target, batch_neg_query, True) pos_a = pos_a.to(utils.get_device()) pos_b = pos_b.to(utils.get_device()) neg_a = neg_a.to(utils.get_device()) neg_b = neg_b.to(utils.get_device()) emb_pos_a, emb_pos_b = model.emb_model(pos_a), model.emb_model( pos_b) emb_neg_a, emb_neg_b = model.emb_model(neg_a), model.emb_model( neg_b) emb_as = torch.cat((emb_pos_a, emb_neg_a), dim=0) emb_bs = torch.cat((emb_pos_b, emb_neg_b), dim=0) labels = torch.tensor([1] * pos_a.num_graphs + [0] * neg_a.num_graphs).to( utils.get_device()) pred = model(emb_as, emb_bs) loss = model.criterion(pred, labels) loss.backward() if not args.test: opt.step() pred = model.predict(pred) acc = torch.mean((pred == labels).type(torch.float)) out_queue.put(("step", (loss.item(), acc)))
class ApiSimpatizantes(object): db = data.DataSource() db.get_connection() db.connect() def on_get(self, req, resp, cedula): """Handles all GET requests.""" #origin = req.get_header('Origin') cedula = cedula.replace("-", "") print(cedula) response = self.db.get_simpatizantes(cedula) if response is None: response = json.dumps({}) resp.set_header('Access-Control-Allow-Origin', '*') resp.content_type = 'application/json' resp.status = falcon.HTTP_200 resp.body = response
def main(dataset, saved_model_path, _config, _log): policy = tf.saved_model.load(saved_model_path) flat_loss = policy.loss policy.loss = lambda *structs: flat_loss(*tf.nest.flatten(structs)) learner = Learner(policy=policy, **_config['learner']) _, test_paths = data.train_test_split(**dataset) embed_controller = embed.embed_controller_discrete # TODO: configure data_config = dict(_config['data'], embed_controller=embed_controller) test_data = data.DataSource(test_paths, **data_config) test_manager = train_lib.TrainManager(learner, test_data, dict(train=False)) total_steps = 0 for _ in range(1000): # now test test_stats = test_manager.step() train_lib.log_stats(ex, test_stats, total_steps) test_loss = test_stats['loss'].numpy() print(f'test_loss={test_loss:.4f}')
def main(): args = arg_parse() # see test-tube #args = hyp_search.hyp_arg_parse() if not os.path.exists(os.path.dirname(args.model_path)): os.makedirs(os.path.dirname(args.model_path)) print("Starting {} workers".format(args.n_workers)) print("Using dataset {}".format(args.dataset)) record_keys = [ "conv_type", "n_layers", "hidden_dim", "margin", "dataset", "dataset_type", "max_graph_size", "skip" ] args_str = ".".join([ "{}={}".format(k, v) for k, v in sorted(vars(args).items()) if k in record_keys ]) logger = SummaryWriter("log/" + args_str) model = build_model(args) data_source = data.DataSource(args.dataset) opt = optim.Adam(model.parameters(), args.lr) if args.test: validation(args, model, data_source, logger, 0, make_pr_curve=True) else: batch_n = 0 for epoch in range(args.n_batches // args.eval_interval): print("Epoch", epoch) train_epoch(args, model, data_source, opt) validation(args, model, data_source, logger, batch_n) if not args.test: print("Saving {}".format(args.model_path)) torch.save(model.state_dict(), args.model_path)
def __init__(self, log): self.Log = log self.DataSource = data.DataSource(self.Log) # self.Temp = self.DataSource.queryCurrentTemps() # self.Humidity = self.DataSource.queryCurrentHumidty() self.Temp = {} self.Humidity = {} self.InSettings = False self.DataThread = threading.Thread(target=self.dataDaemon, args=(DATA_INTERVAL, ), daemon=True) self.DataThread.start() self.Sleeping = False self.LastMovement = time.time() if PRODUCTION: # Work around for bug in libsdl os.environ['SDL_VIDEO_WINDOW_POS'] = "{0},{1}".format(0, 0) pygame.init() self.Screen = pygame.display.set_mode((0, 0), pygame.NOFRAME) pygame.mouse.set_visible(False) # self.Screen = pygame.display.set_mode((0, 0), FULLSCREEN) # pygame.mouse.set_visible(0) else: pygame.init() self.Screen = pygame.display.set_mode(SCREEN_SIZE) self.Clock = pygame.time.Clock() self.Background = pygame.image.load(BACKGROUND_IMAGE) self.PowerButton = widgets.PowerButton((SCREEN_SIZE[0] - 55, 5), self.handlePower) self.SettingsButton = widgets.SettingsButton( (SCREEN_SIZE[0] - (55 * 2), 5), self.handleSettings) self.Font = pygame.font.SysFont("avenir", 18) self.Outdoor = self.Font.render("Outdoor", 1, widgets.BLACK) self.ControlPanel = control.Control(self.Log, self.Screen, self.handleSettings) # # Sensor Widgets # self.DisplayObjects = [] t1 = widgets.TempAndHumidity((521, 417), self.getTempAndHumidity, ("internal1", )) t2 = widgets.TempAndHumidity((647, 307), self.getTempAndHumidity, ("internal2", )) t3 = widgets.TempAndHumidity((726, 212), self.getTempAndHumidity, ("internal3", )) t4 = widgets.TempAndHumidity((179, 117), self.getTempAndHumidity, ("duct4", )) t5 = widgets.TempAndHumidity((138, 309), self.getTempAndHumidity, ("duct5", )) t6 = widgets.TempAndHumidity((303, 275), self.getTempAndHumidity, ("duct6", )) t7 = widgets.TempAndHumidity((288, 404), self.getTempAndHumidity, ("duct7", )) t8 = widgets.TempAndHumidity((219, 368), self.getTempAndHumidity, ("duct8", )) t9 = widgets.TempAndHumidity((31, 30), self.getTempAndHumidity, ("outdoor9", )) self.DisplayObjects.append(t1) self.DisplayObjects.append(t2) self.DisplayObjects.append(t3) self.DisplayObjects.append(t4) self.DisplayObjects.append(t5) self.DisplayObjects.append(t6) self.DisplayObjects.append(t7) self.DisplayObjects.append(t8) self.DisplayObjects.append(t9) self.TimerControl = widgets.TimerControl((250, 5), self.ControlPanel.handleStart, self.ControlPanel.handleStop) # Position will get updated on first render self.StartStop = widgets.StartStopButton( (250, 5), self.TimerControl.start, self.TimerControl.stop)