def Proto(default: T, help=None, dtype=None, aliases=None, **kwargs) -> T: return DefaultBear(None, default=default, help_str=help, dtype=dtype, aliases=aliases, kwargs=kwargs)
def load_config(config_file_path): """should raise ScannerError if there is a problem in the yaml file.""" yaml = YAML(typ='unsafe', pure=True) p = Path(config_file_path) parsed = yaml.load_all(p) data = next(parsed) return DefaultBear(None, **data)
def adapt_and_test(): import os import dill from playground.maml.maml_torch.maml_multi_step import FunctionalMLP logger.configure(log_directory=Args.log_dir, prefix=Args.log_prefix) logger.log_params(Args=vars(Args)) # load weights with open(os.path.join(Args.log_dir, Args.log_prefix, Args.weight_path), 'rb') as f: weights = dill.load(f) model = FunctionalMLP(1, 1) losses = DefaultBear(list) for amp, task in amp_tasks: model.params.update({ k: t.tensor(v, requires_grad=True, dtype=t.double).to(device) for k, v in weights[0].items() }) sgd = t.optim.SGD(model.parameters(), lr=Args.learning_rate) proper = t.tensor(task.proper()).to(device) samples = t.tensor(task.samples(Args.k_shot)).to(device) for grad_ind in range(Args.grad_steps): with t.no_grad(): xs, labels = proper ys = model(xs.unsqueeze(-1)) loss = model.criteria(ys, labels.unsqueeze(-1)) logger.log(grad_ind, loss=loss.item(), silent=grad_ind != Args.grad_steps - 1) losses[f"amp-{amp:.2f}-loss"].append(loss.item()) xs, labels = samples ys = model(xs.unsqueeze(-1)) loss = model.criteria(ys, labels.unsqueeze(-1)) sgd.zero_grad() loss.backward() sgd.step() # losses = np.array([v for k, v in losses.items()]) import matplotlib.pyplot as plt fig = plt.figure() plt.title(f'Learning Curves') for amp, task in amp_tasks: plt.plot(losses[f"amp-{amp:.2f}-loss"], label=f"amp {amp:.2f}") plt.legend() logger.log_pyplot(None, key=f"losses/learning_curves_amp.png", fig=fig) plt.close() average_losses = np.array( [losses[f"amp-{amp:.2f}-loss"] for amp, task in amp_tasks]) fig = plt.figure() plt.title(f'Learning Curves Averaged amp ~ [5 - 10]') plt.plot(average_losses.mean(0)) plt.ylim(0, 28) logger.log_pyplot(None, key=f"losses/learning_curves_amp_all.png", fig=fig) plt.close()
class RunConfig(Bear): """config namespare for the run script""" config = DefaultBear(None) # type: RunnerConfig env = {} # type: dict run = 'python main.py {args}' # type: str default_args = {} # type: dict args = {} # type: dict batch_args = [] # type: list
def main(config_file): """Escher-cli, a command line tool that helps with your ML experiments""" # logging.getLogger().setLevel(logging.DEBUG if debug else logging.INFO) # 0. set cwd if exist # if wd: # os.chdir(wd) # 1. take in yaml file, go through files and run one by one if config_file is None: raise EnvironmentError(f'need --config-file option') try: # noinspection PyUnresolvedReferences run_config = load_config(config_file) hydrated = DefaultBear(None, **{ k: v for k, v in vars(RunConfig).items() if not is_hidden(k) }) # type: RunConfig hydrated.update(**run_config) job(hydrated) except Exception as e: raise EnvironmentError(f'config-file parse error', e)
def plot_learning_curve(): amps = np.arange(5.0, 10.5, 0.5) phi0s = np.arange(1.0, 2.25, 0.25) * np.pi start = 0 xs = [] keys = set() data = DefaultBear(TimeSeries) for i, entry in enumerate(tqdm(load(Args.run_dir))): if i < start: continue _step = entry['_step'] _timestamp = entry['_timestamp'] for k, v in entry.items(): keys.add(k) if k in ['_step', '_timestamp']: continue data[k].append(_step, v) print(f"data includes the following keys:\n{keys}") import matplotlib.pyplot as plt fig = plt.figure(figsize=(11, 4)) plt.title('Sinusoidal') step = 1 for g in [step]: ys = np.array([data[f'{_}-grad-{g}-loss'].y for _ in range(10)]) plt.plot(data[f'0-grad-{g}-loss'].x, smooth(ys.mean(axis=0)), label='train') for g in [step]: ys = np.array([data[f'{_}-grad-{g}-test-loss'].y for _ in range(10)]) plt.plot(data[f'0-grad-{g}-test-loss'].x, smooth(ys.mean(axis=0)), label='test') plt.ylim(0, 5) plt.legend() plt.show() return fig
if __name__ == '__main__': from collections import defaultdict from waterbear import DefaultBear import matplotlib.pyplot as plt from multiprocessing.pool import Pool from ml_logger import logger p = Pool(10) traj_batch = p.map(sample_trajs, range(Args.n_envs)) G = maze_graph(np.concatenate(traj_batch)) queries = patch_graph(G) cache = DefaultBear(dict) start, goal = get_neighbor(G, (-0.16, 0.16)), get_neighbor(G, (-0.16, -0.16)) fig = plt.figure(figsize=(4, 4), dpi=300) for i, (key, search) in enumerate(methods.items()): queries.clear() name = search.__name__ title, *_ = search.__doc__.split('\n') short_name = short_names[key] path, ds = search(G, start, goal, partial(heuristic, G=G, scale=Args.h_scale)) cache.cost[short_name] = len(queries.keys()) cache.len[short_name] = sum(ds) print(f"{key:>10} len: {len(path)}", f"cost: {len(queries.keys())}")
import os from waterbear import DefaultBear ENV = DefaultBear(lambda: None, **os.environ) data_dir = ENV.DATA_DIR or "../../datasets/miniscapes" output_dir = ENV.OUTPUT_DIR or "../../datasets/miniscapes-processed" run_dir = ENV.RUN_DIR or "./training_logs" demo_dir = ENV.DEMO_DIR or "../../runs/image-segmentation/demo"
def Proto(default: T, help=None, dtype=None) -> T: return DefaultBear(None, default=default, help_str=help, dtype=dtype)
import matplotlib.pyplot as plt from ml_logger import logger # the local metric in this case really does not work well. # The data ontains some bad samples and it is throwing off my from plan2vec_experiments import instr # chunk_prefix G, pairwise, all_images = instr(rope_graph)() queries = patch_graph(G) # def plan_experiment(start, goal, G, queries): # start, goal = 1310, 1320 start, goal = 1111, 2121 # goal -= 120 # 10 worked well cache = DefaultBear(dict) logger.configure() path = [*range(start, goal + 1)] ds = [pairwise[i, j] for i, j in zip(path[:-1], path[1:])] plot_trajectory_rope(path, ds, all_images, title=f'Ground-Truth', key=f"../figures/rope_plans/bfs.png") queries.clear() path = bfs(G, start, goal) ds = [pairwise[i, j] for i, j in zip(path[:-1], path[1:])] cache.cost['bfs'] = len(queries.keys()) cache.len['bfs'] = len(path)
from waterbear import DefaultBear _c = DefaultBear(None, **dict(a=10)) _c.update(b=100) assert _c.b == 100 # @cli_parse # class G(ParamsProto): # npts: int = 0