Ejemplo n.º 1
0
    ),
    # "static-second-layer-varying-sparsity": dict(
    #     model="DSCNN",
    #     network="gsc_sparse_dscnn",
    #     prune_methods=["none", "static"],
    #     sparsity=tune.grid_search([0.98, 0.99, 0.999]),
    # ),
}
exp_configs = (
    [(name, new_experiment(base_exp_config, c)) for name, c in experiments.items()]
    if experiments
    else [(experiment_name, base_exp_config)]
)

# Download dataset.
download_dataset(base_exp_config)

# Register serializers.
ray.init()
for t in [
    torch.FloatTensor,
    torch.DoubleTensor,
    torch.HalfTensor,
    torch.ByteTensor,
    torch.CharTensor,
    torch.ShortTensor,
    torch.IntTensor,
    torch.LongTensor,
    torch.Tensor,
]:
    ray.register_custom_serializer(t, serializer=serializer, deserializer=deserializer)
Ejemplo n.º 2
0
    boost_strength_factor=0.7,
    test_noise=True,
    noise_level=0.1,
    kwinners=tune.grid_search([True, False]),  # moved to a parameter
)

tune_config = dict(
    name="SET_DSNN_BoostingEval",
    num_samples=1,
    local_dir=os.path.expanduser("~/nta/results"),
    config=exp_config,
    checkpoint_freq=0,
    checkpoint_at_end=False,
    stop={"training_iteration": 300},
    resources_per_trial={
        "cpu": 1,
        "gpu": 1
    },
    loggers=DEFAULT_LOGGERS,
    verbose=1,
)

# override when running local for test
if not torch.cuda.is_available():
    exp_config["device"] = "cpu"
    tune_config["resources_per_trial"] = {"cpu": 1}

download_dataset(exp_config)
ray.init()
tune.run(Trainable, **tune_config)