def main(): import numpy as np xt = torch.linspace(-3 * np.pi, 3 * np.pi, 101) yt = torch.sin(xt) bp = {"xt": xt, "yt": yt, "width": 20, "num_epochs": 4} # bs = list(bee_trainer(**bp)) res = core.swarm_train(bee_trainer, bp, num_bees=5, fields="ypred,weights,biases", seed=20) # from pprint import pprint # pprint(bs) # print(res["weights"].shape) # print(res["biases"]) # print(res["biases"].max(), res["biases"].min()) # make_hist_animation(res["biases"], "biases") bw = res["biases"] / res["weights"] # print(bw.min(), bw.max()) # print(np.percentile(bw, [1, 5, 90, 95])) # # print(bw) bw = bw.clip(-10, 10) ls = animator.LineSwarm.standard(xt.detach().numpy(), yt.detach().numpy(), res["ypred"][::10], set_xlim=(-10, 10)) hist = animator.HistogramSwarm.from_swarm(bw, 100, set_title="Biases/Weights", set_ylabel="Count", set_xlim=(-10, 10)) animator.swarm_animate([ls, hist], "weight_distr.mp4")
def main(): x = torch.linspace(-10, 10, 100) beeparams = { "x": x, "num_epochs": 200, "lr": 0.005, "momentum": 0.5, "width": 50, "hidden": 3, "activation": activations.Tanh, } logging.info("Starting training") results = core.swarm_train(solo_train, beeparams, num_bees=20, fields="ypred,loss") print(results["loss"]) log.info("Making animation") yd = np.zeros(len(x)) yd[0] = -0.5 yd[-1] = 0.5 animator.make_animation(x.detach().numpy(), yd=yd, data=results["ypred"], title="secondderiv", destfile="sd.mp4")
def get_simple_results(): x = torch.linspace(-3.5, 3.5, 21) y = torch.sin(x) h, w = 1, 10 nepoch = 60 bee_trainer = regimes.make_bee( regimes.default_train, x, y, h, w, num_epochs=nepoch, lr=0.01, momentum=0.94 ) results = core.swarm_train(bee_trainer, num_bees=10, fields="ypred,loss", seed=10) return results
def main(width_list=None, momentum_list=None, lr_list=None, num_epochs=100, num_bees=10, seed=10): """ This is a simple experiment for allowing multiple values of width, momentum and learning rate to be tested. At the moment the different activations and width are not parameters, but can be varied in the function contents. I've had trouble calling from Reticulate with more than one hidden layer, however it seems to work fine in Python. """ ### just inserting default values here, since including iterable defaults is dangerous. if width_list is None: width_list = [10] if momentum_list is None: momentum_list = [0.9] if lr_list is None: lr_list = [0.02] x = torch.linspace(-5, 5, 61) y = torch.sin(x) static_params = {"x": x.numpy(), "y": y.numpy(), "seed": seed} # run experiments for each lr and save. reslist = [] hidden_list = [1] activations_list = [nn.Tanhshrink, nn.ReLU, nn.Tanh] variations_list = [ hidden_list, width_list, activations_list, momentum_list, lr_list ] param_list = itertools.product(*variations_list) for params in param_list: swarm_dict = {} bee, params = utils.make_bee( regimes.default_train, x, y, hidden=params[0], width=params[1], num_epochs=num_epochs, activation=params[2], momentum=params[3], lr=params[4], ) res = core.swarm_train(bee, num_bees=num_bees, fields="ypred,loss", seed=seed) swarm_dict["results"] = res swarm_dict["params"] = params reslist.append(swarm_dict.copy()) return reslist, static_params
def test_simple(): results = core.swarm_train( sin_experiment, bee_params=None, num_bees=2, seed=10, fields="ypred,loss" ) # print(results) assert results.keys() == {"ypred", "loss"} assert len(results["loss"]) == 2 assert results["loss"].shape == (2, 10) assert results["ypred"].shape == (2, 10, 100) # this tests the seed assert results["loss"][0][-1] == pytest.approx(0.5803773999214172)
def main(): xt = torch.linspace(-6, 6, 101) yt = torch.sin(xt) h, w = 2, 10 dropout = 0.1 nepoch = 1000 bee = regimes.make_bee(bee_trainer, xt, yt, h, w, dropout, nepoch) print("Starting training") results = core.swarm_train(bee, num_swarm=20, fields="ypred,loss,y_nodrop,loss_nodrop") print("Done Training, starting animating") basef = f"{dropout}_dropout_{h}h{w}w_{nepoch}e" animator.make_animation(xt, yt, results["ypred"], basef, f"data_out/{basef}.mp4") animator.make_animation(xt, yt, results["y_nodrop"], f"Eval {basef}", f"data_out/eval_{basef}.mp4") print("Animation done")
def main(hidden, width, activation, nepoch, lr, funcname, xdomain, swarmsize, destdir, show): print(hidden, width, activation, nepoch, lr, funcname, xdomain, swarmsize, destdir, show) xdomain = [float(x) for x in xdomain.split(":")] xt = torch.linspace(xdomain[0], xdomain[1], 101) yt = get_function(funcname)(xt) afunc = swarm.get_activation(activation) bee_trainer = regimes.make_bee( regimes.default_train, xt, yt, activation=afunc, hidden=hidden, width=width, lr=lr, num_epochs=nepoch, ) results = core.swarm_train(bee_trainer, num_bees=swarmsize, fields="ypred,loss") xdstr = f"[{xdomain[0]}:{xdomain[1]}]" fname = f"{funcname}_{xdstr}_{hidden}h{width}w_{activation}_{nepoch}e.mp4" destfile = os.path.join(destdir, fname) print(f"Creating animation and saving to {destfile}") ls1 = animator.LineSwarm.standard( xt.detach().numpy(), yt.detach().numpy(), results["ypred"], set_title=f"NN with {hidden} layers {width} wide and {activation} activation approximates {funcname}", ) animator.swarm_animate([ls1], destfile) if show: import webbrowser print(f"Opening {os.path.abspath(destfile)} in browser") webbrowser.open_new_tab(os.path.abspath(destfile))
def main(): # this is the simplest path to finish results = core.swarm_train(sin_experiment, num_bees=4, fields="ypred,loss,etime", seed=10) return results