def __init__(self, model, holdout, batch_size=30, iters_per_spin=10, validation_period=10): self.problem = rr.BanditValueProblem(model=model) self.tr_base = adel.BasicDataset() self.val_base = adel.BasicDataset() self.holdout = adel.HoldoutWrapper(training=self.tr_base, holdout=self.val_base, **holdout) self.tr_sampler = adel.DatasetSampler(base=self.tr_base, method='uniform') self.tr_sampled = adel.LabeledDatasetTranslator(base=self.tr_sampler) self.tr_data = adel.LabeledDatasetTranslator(base=self.tr_base) self.val_data = adel.LabeledDatasetTranslator(base=self.val_base) self.reporter = adel.LabeledDatasetTranslator(base=self.holdout) self.iters_per_spin = iters_per_spin self.val_period = validation_period self.batch_size = batch_size self.spin_counter = 0 self.error_plotter = rr.LineSeriesPlotter( title='Value error over time %s' % model.scope, xlabel='Spin iter', ylabel='Mean squared loss') self.value_plotter = rr.LineSeriesPlotter(title='Values %s' % model.scope, xlabel='Spin iter', ylabel='Value')
def __init__(self, problem, holdout, batch_size=30, iters_per_spin=10, validation_period=10): self.problem = problem self.train_base = adel.BasicDataset() self.val_base = adel.BasicDataset() self.holdout = adel.HoldoutWrapper(training=self.train_base, holdout=self.val_base, **holdout) self.train_sampler = adel.DatasetSampler(base=self.train_base, method='uniform') self.train_sampled = adel.LabeledDatasetTranslator( base=self.train_sampler) self.train_data = adel.LabeledDatasetTranslator(base=self.train_base) self.val_data = adel.LabeledDatasetTranslator(base=self.val_base) self.reporter = adel.LabeledDatasetTranslator(base=self.holdout) self.iters_per_spin = iters_per_spin self.val_period = validation_period self.batch_size = batch_size self.spin_counter = 0 self.error_plotter = LineSeriesPlotter( title='Value error over time %s' % self.scope, xlabel='Spin iter', ylabel=self.problem.loss_type) self.value_plotter = ScatterPlotter(title='Value parities %s' % self.scope, xlabel='Target value', ylabel='Estimated value') self.plottables = [self.error_plotter, self.value_plotter] if self.problem.model.using_image: # TODO HACK self.filters = self.problem.params[0] n_filters = int(self.filters.shape[-1]) self.filter_plotter = FilterPlotter(n_filters) self.plottables.append(self.filter_plotter)
def __init__(self, classifier, holdout, optimizer, visualize=True, vis_res=10): self.classifier = classifier self.optimizer = optim.parse_optimizers(**optimizer) self.training_base = adel.BasicDataset() self.tuning_base = adel.BasicDataset() self.tuning_holdout = adel.HoldoutWrapper(training=self.training_base, holdout=self.tuning_base, **holdout) self.report_binary = adel.BinaryDatasetTranslator(self.tuning_holdout) self.training_binary = adel.BinaryDatasetTranslator(self.training_base) self.tuning_binary = adel.BinaryDatasetTranslator(self.tuning_base) self.validation_base = adel.BasicDataset() self.validation_binary = adel.BinaryDatasetTranslator( self.validation_base) self.update_counter = 0 self.visualize = visualize self.vis_res = vis_res if self.visualize: # TODO Put scope in name self.heat_plotter = ImagePlotter(vmin=0, vmax=1, title='Classifier') self.point_plotter = LineSeriesPlotter(other=self.heat_plotter) self.error_plotter = LineSeriesPlotter( title='Logistic losses over time', xlabel='Spin iter', ylabel='Logistic loss') self.roc_plotter = LineSeriesPlotter(title='ROC', xlabel='False positive rate', ylabel='True positive rate')
import matplotlib.pyplot as plt if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('dataset_path', type=str, help='Path to the dataset pickle') parser.add_argument('config_path', type=str, help='Path to configuration YAML') parser.add_argument('output_path', type=str, help='Path to save checkpoints to') args = parser.parse_args() # Load dataset print 'Loading dataset %s...' % args.dataset_path all_data = adel.BasicDataset() all_data.load(args.dataset_path) all_labeled = adel.LabeledDatasetTranslator(all_data) # Parse data properties s, r = all_labeled.all_data[0] bel, img = s bel_size = len(bel) img_size = img.shape print 'Found belief size: %d img_size: %s' % (bel_size, str(img_size)) # Load config with open(args.config_path) as f: config = yaml.load(f) network_config = config['network'] problem_config = config['embedding']
help='The belief state feature topic (broadcast.FloatVectorStamped)') parser.add_argument('--image_topic', default='image', type=str, help='The state image topic (sensor_msgs.Image)') parser.add_argument('--sync_tol', default=0.1, type=float, help='Tolerance in seconds for belief/image synchronization') parser.add_argument('--sync_lag', default=1.0, type=float, help='Processing lag for buffering') parser.add_argument('--dt', default=1.0, type=float, help='Discrete time step in seconds') parser.add_argument('--gamma', default=0, type=float, help='Discount rate to use for reward integration') args = parser.parse_args() dataset = adel.BasicDataset() data_reporter = adel.LabeledDatasetTranslator(base=dataset) def queue_data(is_active, payload): if is_active: s, a, r, sn = payload else: s, a = payload r = 0 data_reporter.report_label(x=s, y=r) belief_source = rr.VectorSource() image_source = rr.ImageSource() state_source = rr.MultiDataSource([belief_source, image_source], tol=args.sync_tol)