def __init__(self, model, holdout, batch_size=30, iters_per_spin=10, validation_period=10): self.problem = rr.BanditValueProblem(model=model) self.tr_base = adel.BasicDataset() self.val_base = adel.BasicDataset() self.holdout = adel.HoldoutWrapper(training=self.tr_base, holdout=self.val_base, **holdout) self.tr_sampler = adel.DatasetSampler(base=self.tr_base, method='uniform') self.tr_sampled = adel.LabeledDatasetTranslator(base=self.tr_sampler) self.tr_data = adel.LabeledDatasetTranslator(base=self.tr_base) self.val_data = adel.LabeledDatasetTranslator(base=self.val_base) self.reporter = adel.LabeledDatasetTranslator(base=self.holdout) self.iters_per_spin = iters_per_spin self.val_period = validation_period self.batch_size = batch_size self.spin_counter = 0 self.error_plotter = rr.LineSeriesPlotter( title='Value error over time %s' % model.scope, xlabel='Spin iter', ylabel='Mean squared loss') self.value_plotter = rr.LineSeriesPlotter(title='Values %s' % model.scope, xlabel='Spin iter', ylabel='Value')
def __init__(self, problem, holdout, batch_size=30, iters_per_spin=10, validation_period=10): self.problem = problem self.train_base = adel.BasicDataset() self.val_base = adel.BasicDataset() self.holdout = adel.HoldoutWrapper(training=self.train_base, holdout=self.val_base, **holdout) self.train_sampler = adel.DatasetSampler(base=self.train_base, method='uniform') self.train_sampled = adel.LabeledDatasetTranslator( base=self.train_sampler) self.train_data = adel.LabeledDatasetTranslator(base=self.train_base) self.val_data = adel.LabeledDatasetTranslator(base=self.val_base) self.reporter = adel.LabeledDatasetTranslator(base=self.holdout) self.iters_per_spin = iters_per_spin self.val_period = validation_period self.batch_size = batch_size self.spin_counter = 0 self.error_plotter = LineSeriesPlotter( title='Value error over time %s' % self.scope, xlabel='Spin iter', ylabel=self.problem.loss_type) self.value_plotter = ScatterPlotter(title='Value parities %s' % self.scope, xlabel='Target value', ylabel='Estimated value') self.plottables = [self.error_plotter, self.value_plotter] if self.problem.model.using_image: # TODO HACK self.filters = self.problem.params[0] n_filters = int(self.filters.shape[-1]) self.filter_plotter = FilterPlotter(n_filters) self.plottables.append(self.filter_plotter)
def get_embedding(self, sess, on_training_data=True): """Returns the embedding values for training or validation data """ if on_training_data: data = self.train_base else: data = self.val_base chunker = adel.DatasetChunker(base=data, block_size=500) # TODO x = [self.problem.run_embedding(sess=sess, ins=zip(*adel.LabeledDatasetTranslator(chunk).all_inputs)[0]) # ins=adel.LabeledDatasetTranslator(chunk).all_inputs) for chunk in chunker.iter_subdata(key=None)] # HACK key if len(x) == 0: x = [] else: x = np.vstack(x) y = adel.LabeledDatasetTranslator(data).all_labels return x, y
if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dataset_path', type=str, help='Path to the dataset pickle') parser.add_argument('config_path', type=str, help='Path to configuration YAML') parser.add_argument('output_path', type=str, help='Path to save checkpoints to') args = parser.parse_args() # Load dataset print 'Loading dataset %s...' % args.dataset_path all_data = adel.BasicDataset() all_data.load(args.dataset_path) all_labeled = adel.LabeledDatasetTranslator(all_data) # Parse data properties s, r = all_labeled.all_data[0] bel, img = s bel_size = len(bel) img_size = img.shape print 'Found belief size: %d img_size: %s' % (bel_size, str(img_size)) # Load config with open(args.config_path) as f: config = yaml.load(f) network_config = config['network'] problem_config = config['embedding'] learner_config = config['learner']
parser.add_argument('--image_topic', default='image', type=str, help='The state image topic (sensor_msgs.Image)') parser.add_argument('--sync_tol', default=0.1, type=float, help='Tolerance in seconds for belief/image synchronization') parser.add_argument('--sync_lag', default=1.0, type=float, help='Processing lag for buffering') parser.add_argument('--dt', default=1.0, type=float, help='Discrete time step in seconds') parser.add_argument('--gamma', default=0, type=float, help='Discount rate to use for reward integration') args = parser.parse_args() dataset = adel.BasicDataset() data_reporter = adel.LabeledDatasetTranslator(base=dataset) def queue_data(is_active, payload): if is_active: s, a, r, sn = payload else: s, a = payload r = 0 data_reporter.report_label(x=s, y=r) belief_source = rr.VectorSource() image_source = rr.ImageSource() state_source = rr.MultiDataSource([belief_source, image_source], tol=args.sync_tol) frontend = rr.SARSFrontend(source=state_source, dt=args.dt, lag=args.sync_lag,