def test_add_scalar_dict(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = {"fizz": 3, "buzz": 5} foo.add_scalar_dict(data, wall_time=0, step=5) data = {"fizz": 6, "buzz": 10} foo.add_scalar_dict(data)
def test_get_scalar_values_two_data(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") foo.add_scalar_value("bar", 0, wall_time=0, step=0) foo.add_scalar_value("bar", 1, wall_time=1, step=1) self.assertEqual(foo.get_scalar_values("bar"), [[0.0, 0, 0.0], [1.0, 1, 1.0]])
def test_init_from_file(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") foo.add_scalar_value("bar", 2, wall_time=time.time(), step=1) filename = foo.to_zip() new = cc.create_experiment("new", filename) os.remove(filename)
def __init__(self, sizes, cost=CrossEntropyCost): """The list ``sizes`` contains the number of neurons in the respective layers of the network. For example, if the list was [2, 3, 1] then it would be a three-layer network, with the first layer containing 2 neurons, the second layer 3 neurons, and the third layer 1 neuron. The biases and weights for the network are initialized randomly, using ``self.default_weight_initializer`` (see docstring for that method). """ self.num_layers = len(sizes) self.sizes = sizes self.default_weight_initializer() self.cost = cost self.cc = CrayonClient(hostname="localhost", port=8889) self.timeH = time.time() self.cc.remove_experiment("train_accuracy") self.cc.remove_experiment("train_loss") self.cc.remove_experiment("test_accuracy") self.cc.remove_experiment("test_loss") self.trainA = self.cc.create_experiment("train_accuracy") self.trainL = self.cc.create_experiment("train_loss") self.testA = self.cc.create_experiment("test_accuracy") self.testL = self.cc.create_experiment("test_loss")
def test_get_scalar_names(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") foo.add_scalar_value("fizz", 0, wall_time=0) foo.add_scalar_value("buzz", 0, wall_time=0) self.assertEqual(sorted(foo.get_scalar_names()), sorted(["fizz", "buzz"]))
def main(): global args args = parser.parse_args() cc = CrayonClient(port=8089) for name in args.name.split(','): shutil.rmtree(f'weights/{name}/', ignore_errors=True) shutil.rmtree(f'output/{name}/', ignore_errors=True) os.makedirs(f'weights/{name}') for fold in range(NUM_SPLITS): print(f'=> Targeting {name} fold {fold+1}/{NUM_SPLITS}') os.makedirs(f'output/{name}/fold{fold}/train') os.makedirs(f'output/{name}/fold{fold}/valid') arch = name.split('_')[0] model = models[arch](1) model = nn.DataParallel(model) model.cuda() train_loader, valid_loader, _ = get_loaders(args.batch_size, NUM_SPLITS, fold) train_eval(model, name, train_loader, valid_loader, fold, make_experiment(cc, name, fold), init_lr=args.lr, epochs=args.epochs, num_epochs_per_decay=args.num_epochs_per_decay) del model
def test_get_histogram_values_two_data(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = { "min": 0, "max": 100, "num": 3, "bucket_limit": [10, 50, 30], "bucket": [5, 45, 25] } foo.add_histogram_value("bar", data, wall_time=0, step=0) data = { "min": 0, "max": 100, "num": 3, "bucket_limit": [10, 50, 30], "bucket": [5, 45, 25] } foo.add_histogram_value("bar", data, wall_time=1, step=1) self.assertEqual(foo.get_histogram_values("bar"), [[ 0.0, 0, [ 0.0, 100.0, 3.0, 0.0, 0.0, [10.0, 50.0, 30.0], [5.0, 45.0, 25.0] ] ], [ 1.0, 1, [ 0.0, 100.0, 3.0, 0.0, 0.0, [10.0, 50.0, 30.0], [5.0, 45.0, 25.0] ] ]])
def init_logger(tensorboard=True, prepend_text=""): global logger, experimentLogger logger = logging.getLogger('heel-contour-prediction') #log file handler print(settings.opt) fileHandler = logging.FileHandler(os.path.join(settings.opt['save'], prepend_text + settings.opt['description'] + '.log')) formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') fileHandler.setFormatter(formatter) fileHandler.setLevel(logging.INFO) logger.addHandler(fileHandler) #output stream handler streamHandler = logging.StreamHandler() streamHandler.setFormatter(formatter) streamHandler.setLevel(logging.INFO) logger.addHandler(streamHandler) logger.setLevel(logging.INFO) logger.info('file handler and stream handler are ready for logging') if(tensorboard == True): cc = CrayonClient(hostname="localhost") experimentLogger = cc.create_experiment(Path(settings.opt['save']).name) # log the configuration logger.info(settings.opt)
def test_add_scalar_dict_wrong_data(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = {"fizz": "foo", "buzz": 5} self.assertRaises(ValueError, foo.add_scalar_dict, data) data = {3: 6, "buzz": 10} self.assertRaises(ValueError, foo.add_scalar_dict, data)
def test_open_experiment(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") foo.add_scalar_value("bar", 1, step=2, wall_time=0) foo = cc.open_experiment("foo") foo.add_scalar_value("bar", 3, wall_time=1) self.assertEqual(foo.get_scalar_values("bar"), [[0.0, 2, 1.0], [1.0, 3, 3.0]])
def __init__(self, experiment_name: str, log_dir=None): from pycrayon import CrayonClient self.client = CrayonClient(port=6007) self.experiment_name = experiment_name try: self.client.remove_experiment(experiment_name) except ValueError: pass self.experiment = self.client.create_experiment(experiment_name)
def test_add_histogram_value_wrong_data(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = ["lolz", "lulz", "lelz"] self.assertRaises(ValueError, foo.add_histogram_value, "bar", data, tobuild=True)
def tensorboard(): ''' ''' from pycrayon import CrayonClient cc = CrayonClient(hostname=TENSORBOARD_SERVER) try: cc.remove_experiment(EXP_NAME) except: pass foo = cc.create_experiment(EXP_NAME)
def test_get_scalar_values_auto_step(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") foo.add_scalar_value("bar", 0, wall_time=0) foo.add_scalar_value("bar", 1, wall_time=1) foo.add_scalar_value("bar", 2, wall_time=2, step=10) foo.add_scalar_value("bar", 3, wall_time=3) self.assertEqual( foo.get_scalar_values("bar"), [[0.0, 0, 0.0], [1.0, 1, 1.0], [2.0, 10, 2.0], [3.0, 11, 3.0]])
def setup_tensorboard(exp_id, cur_t, hostname, port): exp_filename = '{}_{}'.format(cur_t, exp_id) tb = CrayonClient(hostname=hostname, port=port) try: tb_experiment = tb.create_experiment(exp_filename) except: # flush the data anew tb.remove_experiment(exp_filename) tb_experiment = tb.create_experiment(exp_filename) return tb_experiment, tb
def test_remove_experiment(self): cc = CrayonClient(port=self.test_server_port) self.assertRaises(ValueError, cc.open_experiment, "foo") foo = cc.create_experiment("foo") foo.add_scalar_value("bar", 1, step=2, wall_time=0) self.assertRaises(ValueError, cc.create_experiment, "foo") cc.open_experiment("foo") cc.remove_experiment(foo.xp_name) self.assertRaises(ValueError, cc.remove_experiment, foo.xp_name) foo = cc.create_experiment("foo")
def test_add_histogram_value_wrong_variable(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = { "min": 0, "max": 100, "num": 3, "bucket_limit": [10, 50, 30], "bucket": [5, 45, 25] } self.assertRaises(ValueError, foo.add_histogram_value, "", data)
def test_get_scalar_dict(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = {"fizz": 3, "buzz": 5} foo.add_scalar_dict(data, wall_time=0, step=5) data = {"fizz": 6, "buzz": 10} foo.add_scalar_dict(data, wall_time=1) self.assertEqual(foo.get_scalar_values("fizz"), [[0.0, 5, 3.0], [1.0, 6, 6.0]]) self.assertEqual(foo.get_scalar_values("buzz"), [[0.0, 5, 5.0], [1.0, 6, 10.0]])
def test_add_histogram_value_with_sumsq(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = { "min": 0, "max": 100, "num": 3, "bucket_limit": [10, 50, 30], "bucket": [5, 45, 25], "sum_squares": 5625 } foo.add_histogram_value("bar", data)
def test_add_histogram_value(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = { "min": 0, "max": 100, "num": 3, "bucket_limit": [10, 50, 30], "bucket": [5, 45, 25] } foo.add_histogram_value("bar", data, wall_time=0, step=0) foo.add_histogram_value("bar", data)
def test_get_histogram_values_wrong_variable(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = { "min": 0, "max": 100, "num": 3, "bucket_limit": [10, 50, 30], "bucket": [5, 45, 25] } foo.add_histogram_value("bar", data, wall_time=0, step=0) self.assertRaises(ValueError, foo.get_histogram_values, "")
def parse_args(): parser = argparse.ArgumentParser( description='umt.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter) opts.add_md_help_argument(parser) opts.model_opts(parser) opts.preprocess_opts(parser) opts.train_opts(parser) opt = parser.parse_args() torch.manual_seed(opt.seed) if opt.word_vec_size != -1: opt.src_word_vec_size = opt.word_vec_size opt.tgt_word_vec_size = opt.word_vec_size if opt.layers != -1: opt.enc_layers = opt.layers opt.dec_layers = opt.layers opt.brnn = (opt.encoder_type == "brnn") # if opt.seed > 0: random.seed(opt.seed) torch.manual_seed(opt.seed) if torch.cuda.is_available() and not opt.gpuid: print("WARNING: You have a CUDA device, should run with -gpuid 0") if opt.gpuid: cuda.set_device(opt.gpuid[0]) if opt.seed > 0: torch.cuda.manual_seed(opt.seed) if len(opt.gpuid) > 1: sys.stderr.write("Sorry, multigpu isn't supported yet, coming soon!\n") sys.exit(1) # Set up the Crayon logging server. if opt.exp_host != "": from pycrayon import CrayonClient cc = CrayonClient(hostname=opt.exp_host) experiments = cc.get_experiment_names() print(experiments) if opt.exp in experiments: cc.remove_experiment(opt.exp) return opt
def test_get_histogram_names(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") data = { "min": 0, "max": 100, "num": 3, "bucket_limit": [10, 50, 30], "bucket": [5, 45, 25] } foo.add_histogram_value("fizz", data, wall_time=0, step=0) foo.add_histogram_value("buzz", data, wall_time=1, step=1) self.assertEqual(sorted(foo.get_histogram_names()), sorted(["fizz", "buzz"]))
def create_crayon_logger(exp_name, port=8889): """ """ # Connect Crayon Logger (TensorBoard "wrapper") to the server cc = CrayonClient(hostname="localhost", port=port) tb_log_exp_name = exp_name # Remove previous experiment try: cc.remove_experiment(tb_log_exp_name) except ValueError: # experiment doesn't already exist - nothing to be done here print("Experiment '{}' didn't exist already (nothing to be done).".format(\ tb_log_exp_name)) # Create a new experiment tb_log = cc.create_experiment(tb_log_exp_name) return tb_log
def train_tracking_model(config): r""" Training function for :class:`eurus.track.pytorch.train.TrackingModel`. Parameters ---------- config : :class:`eurus.track.pytorch.train.TrackingModelConfig` The configuration to run the training. """ if config.crayon_config is not None: crayon_config = config.crayon_config cc = CrayonClient(hostname=crayon_config.server_address) crayon_logger = cc.create_experiment(crayon_config.experiment_name) else: crayon_logger = None dataset = create_dataset(config.dataset_config) dataloader = DataLoader(dataset, **config.dataloader_config.configuration) model = create_tracking_model(config.tracking_model_config) criterion = nn.MSELoss() if torch.cuda.is_available(): model = model.cuda() criterion = criterion.cuda() # TODO: Optimizer config? optimizer = optim.SGD(model.parameters(), lr=config.lr) for epoch in range(1, config.n_epochs + 1): loss_meter, time_meter = training_loop( epoch, dataloader, model, criterion, optimizer, crayon_logger) logger.info( 'Epoch: {0:05d} completed \t' 'Average Loss: {1:4.4f} \t' 'Total time: {2:4.4f}'.format( epoch, loss_meter.avg, time_meter.sum)) if crayon_logger is not None: crayon_logger.add_scalar_value('loss_epochs', loss_meter.avg) crayon_logger.add_scalar_value('time_epochs', time_meter.sum) torch.save(model.state_dict(), config.weights_file)
def test_backup(self): cc = CrayonClient(port=self.test_server_port) foo = cc.create_experiment("foo") foo.add_scalar_value("bar", 2, wall_time=time.time(), step=1) foo.add_scalar_value("bar", 2, wall_time=time.time(), step=2) foo_data = foo.get_scalar_values("bar") filename = foo.to_zip() cc.remove_experiment("foo") foo = cc.create_experiment("foo", zip_file=filename) new_data = foo.get_scalar_values("bar") self.assertEqual(foo_data, new_data) new = cc.create_experiment("new", zip_file=filename) new_data = new.get_scalar_values("bar") self.assertEqual(foo_data, new_data) os.remove(filename)
def __init__(self, experiment_name: str, hostname=QB_TB_HOSTNAME, port=QB_TB_PORT): if host_is_up(hostname, port): from pycrayon import CrayonClient self.client = CrayonClient(hostname=hostname, port=port) self.experiment_name = experiment_name try: self.client.remove_experiment(experiment_name) except ValueError: pass self.experiment = self.client.create_experiment(experiment_name) else: log.info( f'Tensorboard not found on http://{hostname}:{port}, experiment logging disabled' ) self.client = None self.experiment_name = None self.experiment = None
def make_crayon_experiments(experiment_name, new=True): client = CrayonClient(hostname=config.CRAYON_SERVER_HOSTNAME) train_experiment_name = f'{experiment_name}_train' valid_experiment_name = f'{experiment_name}_valid' if new: try: client.remove_experiment(train_experiment_name) except ValueError: pass try: client.remove_experiment(valid_experiment_name) except ValueError: pass train_experiment = client.create_experiment( train_experiment_name) train_experiment.scalar_steps['lr'] = 1 valid_experiment = client.create_experiment(valid_experiment_name) else: train_experiment = client.open_experiment(train_experiment_name) valid_experiment = client.open_experiment(valid_experiment_name) return train_experiment, valid_experiment
def main(): args = parse_args() ctx = mx.gpu(args.gpu) print(args) cc = CrayonClient(hostname='10.132.90.242') if args.exp_name is None: args.exp_name = datetime.now().strftime('frcnnEval_%m-%d') try: exp = cc.create_experiment(args.exp_name) except: exp = cc.open_experiment(args.exp_name) for x in args.epoch.split(","): mAp = test_rcnn(args.network, args.dataset, args.image_set, args.root_path, args.dataset_path, ctx, args.prefix, int(x), args.vis, args.shuffle, args.has_rpn, args.proposal, args.thresh, args.use_global_context, args.use_roi_align) exp.add_scalar_value('mAp', mAp) return
def __init__(self, agent, capacity, batch_size, gamma, tau, init_lr, weight_decay, crayon_vis): super(DDPGOptimizer, self).__init__() self.agent = agent self.gamma = gamma self.tau = tau self.memory = ReplayMemory(capacity, batch_size) self.critic_criterion = nn.MSELoss() self.critic_optimizer = optim.Adam(self.agent.critic.parameters(), lr=init_lr[ 'critic'], weight_decay=weight_decay) self.actor_optimizer = optim.Adam( self.agent.actor.parameters(), lr=init_lr['actor']) self.crayon_vis = crayon_vis if self.crayon_vis: self.cc = CrayonClient() try: self.stats = self.cc.create_experiment('stats') except ValueError: self.cc.remove_experiment('stats') self.stats = self.cc.create_experiment('stats')