def dataset(request):
    dt = cnn.Cifar10_data()

    def fin():
        print("Teardown dataset")

    request.addfinalizer(fin)
    return dt
        "optim_args": {
            "lr": LR,
            "weight_decay": 0.00001
        },
    },
}

results = pd.DataFrame(
    index=range(REPEATS * len(group_config)),
    columns=["Group", "Train Accuracy", "Test Accuracy", "Repeat"],
)

counter = 0

# DATASET
data_loader = cnn.Cifar10_data(mode="test", batch_size=BATCH_SIZE)

for i in range(REPEATS):
    print(f"Running trial {i+1}/{REPEATS}")
    net = cnn.NgnCnn(args.neurons, seed=i)
    for group in group_config:
        net_copy = copy.deepcopy(net)
        if "Dropout" in group:
            net_copy.dropout = 0.2
        if "Neural Noise" in group:
            net_copy.neural_noise = (-0.2, 0.5)
        net_copy.to(device)
        parameters = group_config[group]
        log, _ = cnn.train_model(
            model=net_copy,
            dataset=data_loader,
Example #3
0
            "lr": LR,
        },
    },  
    "Control": {
        "epochs": EPOCHS, 
        "neurogenesis": 0, 
        "early_stop": False, 
        "optim_args":{
            "lr": LR,
        }
    },
}


# DATASET
data_loader = cnn.Cifar10_data(mode="validation", data_folder=DATA_DIR, batch_size=BATCH_SIZE, num_workers=16)
REPEATS = 1
for i in range(REPEATS):
    print(f"Running trial {i+1}/{REPEATS}")
    net = cnn.NgnCnn(args.neurons, channels=3, seed=i, excite=True)
    for group in group_config:
        net_copy = copy.deepcopy(net)
        net_copy.to(device)
        if group == 'Dropout':
            net_copy.dropout = 0.2
        parameters = group_config[group]
        log, optimizer = cnn.train_model(
            model=net_copy,
            dataset=data_loader,
            dtype=dtype,
            device=device,
Example #4
0
counter = 0


def sim_matrix(a, b, eps=1e-8):
    """
    added eps for numerical stability
    """
    a_n, b_n = a.norm(dim=1)[:, None], b.norm(dim=1)[:, None]
    a_norm = a / torch.max(a_n, eps * torch.ones_like(a_n))
    b_norm = b / torch.max(b_n, eps * torch.ones_like(b_n))
    sim_mt = torch.mm(a_norm, b_norm.transpose(0, 1))
    return sim_mt


# DATASET
data_loader = cnn.Cifar10_data(mode="test", batch_size=4)

for i in range(REPEATS):
    print(f"Running trial {i+1}/{REPEATS}")
    net = cnn.NgnCnn(args.neurons, seed=i)
    for group_num, group in enumerate(group_config):
        net_copy = copy.deepcopy(net)
        if "Dropout" in group:
            net_copy.dropout = 0.2
        net_copy.to(device)
        parameters = group_config[group]
        log, _ = cnn.train_model(
            model=net_copy,
            dataset=data_loader,
            dtype=dtype,
            device=device,
Example #5
0
import numpy as np
import neurodl.cnn as cnn
import argparse
import torch.optim as optim
from datetime import datetime
from pathlib import Path
from ax.service.managed_loop import optimize
from ax import save
import joblib

torch.manual_seed(12345)
datatype = torch.float
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
BATCH_SIZE = 1000

data_loader = cnn.Cifar10_data(mode="validation", batch_size=BATCH_SIZE)

parametrization = [
    {
        "name": "lr",
        "type": "range",
        "bounds": [0.00001, 0.001],
        "value_type":
        "float",  # Optional, defaults to inference from type of "bounds".
        "log_scale": True,
    },
    {
        "name": "momentum",
        "type": "range",
        "bounds": [0, 1],
        "value_type":