예제 #1
0
def test_set_gradients():
    model = Model(TestModule(), SamplingStrategy.BOTTOM_UP)
    weight_grads = torch.rand(size=(5, 10))
    bias_grads = torch.rand(size=(5,))
    flattened_layer_grads = torch.cat((weight_grads.flatten(), bias_grads.flatten())).reshape(-1, 1)
    model.set_gradients("layer_1", flattened_layer_grads)
    assert torch.allclose(model.net.layer_1.weight.grad, weight_grads)
    assert torch.allclose(model.net.layer_1.bias.grad, bias_grads)
예제 #2
0
def test_set_layer_values():
    layer_weight = torch.rand(size=(5, 10))
    layer_bias = torch.rand(size=(5,))
    flattened_layer_weight = torch.cat((layer_weight.flatten(), layer_bias.flatten())).reshape(-1, 1)
    model = Model(TestModule())
    model.set_layer_value("layer_1", flattened_layer_weight)
    assert torch.allclose(model.net.layer_1.weight, layer_weight)
    assert torch.allclose(model.net.layer_1.bias, layer_bias)
def test_model_evaluator():
    samples = TestSamples(x=torch.ones(1, 3) * 5)
    model = Model(TestModule())
    objective = TestObjective()
    layer_name, _ = model.sample()
    layer_value = torch.ones(1, 3)
    evaluator = ModelEvaluator(model, objective, layer_name, samples)
    obj_value = evaluator(layer_value, "cpu")
    assert len(obj_value) == 1
    assert obj_value[0] == 15.0
 def __call__(self, model: Model, samples: Samples, device: str):
     # objective is inverse of squarred loss L = (y - w.x) ^-2
     # dL/dW = 2 (y - wx)^-1 (x)
     output = model.forward(samples.x)
     squarred_loss = (samples.y - output)**2
     obj_value = 1 / squarred_loss
     return [obj_value]
예제 #5
0
 def __call__(self, model: Model, samples: Samples,
              device: str) -> List[float]:
     deivce = torch.device(device)
     outputs = model.forward(samples.inputs.to(device))
     predicted_labels = torch.argmax(outputs, dim=1).cpu()
     accuracy = accuracy_score(samples.targets.tolist(),
                               predicted_labels.tolist())
     return [accuracy]
def test_estimator_step():
    samples = TestSamples(x=torch.ones(1, 1), y=torch.ones(1, 1) * 2)
    regressor = TestModule()
    model = Model(regressor)
    objective = TestObjective()
    optimizer = SGD(regressor.parameters(), 1e-2)
    estimator = ESOptimizer(model, optimizer, objective, [1.0], 1e-4, 50,
                            ["cpu"])
    gradient = estimator.gradient_step(samples)
    assert gradient.shape == (2, 1)
예제 #7
0
def evaluate_on_dataset(model: Model, dataloader: DataLoader):
    all_predicted = []
    all_targets = []
    for inputs, targets in dataloader:
        samples = BatchSamples(inputs=inputs, targets=targets)
        outputs = model.forward(samples.inputs)
        predicted_labels = torch.argmax(outputs, dim=1).cpu()
        all_predicted.extend(predicted_labels.tolist())
        all_targets.extend(targets.tolist())

    accuracy = accuracy_score(all_targets, all_predicted)
    return accuracy
예제 #8
0
 def __call__(self, model: Model, samples: Samples, device: str = "cpu") -> List[float]:
     # play an episode
     env = gym.make(samples.env_name)
     env._max_episode_steps = 500
     state = env.reset()
     done = False
     episodic_return = 0
     while not done:
         state = torch.from_numpy(state).reshape((1, -1)).float()
         action = int(model.forward(state))
         state, reward, done, info = env.step(action)
         episodic_return += reward
     return [episodic_return]
 def __call__(self, model: Model, samples: Samples, device: str):
     output = model.forward(samples.x)
     obj_value = [output.sum().item()]
     return obj_value
예제 #10
0
        env._max_episode_steps = 500
        state = env.reset()
        done = False
        episodic_return = 0
        while not done:
            state = torch.from_numpy(state).reshape((1, -1)).float()
            action = int(model.forward(state))
            state, reward, done, info = env.step(action)
            episodic_return += reward
        return [episodic_return]


if __name__ == "__main__":
    # model
    policy = Net(n_inputs=4, n_hidden=50, n_outputs=2)
    wrapped_model = Model(policy, strategy=SamplingStrategy.BOTTOM_UP)

    # objective function (loss function)
    reward_measure = EpisodicReturn()

    # optimizer
    es_optimizer = ESOptimizer(model=wrapped_model, sgd_optimizer=SGD(policy.parameters(), lr=1e-2),
                               objective_fn=reward_measure, obj_weights=[1.0],sigma=1e-1, n_samples=10,
                               devices=["cpu"])

    # create env samples
    samples = EnvSamples(env_name="CartPole-v0")

    # train for number of epochs
    running_return = 0
    show_every = 50
예제 #11
0
def test_all_at_once():
    model = Model(TestModule(), SamplingStrategy.ALL)
    layer_name, layer_value = model.sample()
    assert layer_name == "all"
    assert layer_value.shape[0] == 77
예제 #12
0
def test_random_sampling():
    model = Model(TestModule(), SamplingStrategy.RANDOM)
    assert model.sample()[0] in ["layer_3", "layer_2", "layer_1"]
    assert model.sample()[0] in ["layer_3", "layer_2", "layer_1"]
예제 #13
0
def test_top_down_sampling():
    model = Model(TestModule(), SamplingStrategy.TOP_DOWN)
    assert model.sample()[0] == "layer_3"
    assert model.sample()[0] == "layer_2"
    assert model.sample()[0] == "layer_1"
    assert model.sample()[0] == "layer_3"
예제 #14
0
def test_bottom_up_sampling():
    model = Model(TestModule(), SamplingStrategy.BOTTOM_UP)
    assert model.sample()[0] == "layer_1"
    assert model.sample()[0] == "layer_2"
    assert model.sample()[0] == "layer_3"
    assert model.sample()[0] == "layer_1"
예제 #15
0
def test_forward():
    model = Model(TestModule())
    input = torch.rand(size=(1, 10))
    output = model.forward(input)
    assert output.shape[0] == 1
    assert output.shape[1] == 1
예제 #16
0
    trainset = torchvision.datasets.MNIST(root='./data',
                                          train=True,
                                          download=True,
                                          transform=transform)
    testset = torchvision.datasets.MNIST(root='./data',
                                         download=True,
                                         transform=transform)
    trainloader = torch.utils.data.DataLoader(trainset,
                                              50,
                                              shuffle=True,
                                              num_workers=1)
    trainloader = torch.utils.data.DataLoader(testset, 50, num_workers=1)

    # model
    classifier = Net(n_outputs=10)
    wrapped_model = Model(classifier, strategy=SamplingStrategy.ALL)

    # objective function (loss function)
    obj_measure = Accuracy()

    # optimizer
    es_optimizer = ESOptimizer(model=wrapped_model,
                               sgd_optimizer=Adadelta(classifier.parameters()),
                               objective_fn=obj_measure,
                               obj_weights=[1.0],
                               sigma=1e-2,
                               n_samples=100,
                               devices=["cpu"],
                               n_workers=10)

    for epoch in range(1):