Exemple #1
0
 def __init__(self, test_name, transfer_name, network_name, transfer_freeze,
              split_frac, max_epochs, batch_size, learning_rate, momentum,
              criterion_name, test_count, seed, data_collect_freq):
     assert network_name in self.__get_supported_networks(
     ), 'Network not supported'
     assert transfer_name in self.__get_supported_transfers(
     ), 'Transfer not supported'
     assert 0 < split_frac <= 1, 'Split frac not in (0, 1]'
     assert 0 < max_epochs, 'Max epochs must be positive'
     assert criterion_name in self.__get_supported_critetions(
     ), 'Criterion not supported'
     assert 0 < test_count, 'Test number must be positive'
     assert 0 < data_collect_freq, 'Data collection frequency must be positive'
     self.test_name = test_name
     self.network, self.input_size = networks.build(transfer_name,
                                                    network_name,
                                                    transfer_freeze)
     data_set = datasets.cifar10.from_kaggle(train=True,
                                             input_size=self.input_size)
     self.train_set, self.validation_set = datasets.split(data_set,
                                                          frac=split_frac)
     self.max_epochs = max_epochs
     self.batch_size = batch_size
     self.learning_rate = learning_rate
     self.momentum = momentum
     self.criterion = nn.__dict__[criterion_name]()
     self.test_count = test_count
     np.random.seed(seed)
     self.seeds = np.random.randint(1000000, size=test_count)
     self.data_collect_freq = data_collect_freq
Exemple #2
0
def main():
    D_train = DatasetCIFAR10('Train', 'datasets/cifar-10-batches-py/')
    (D_train, D_val) = split(D_train)

    network = Network(mini_inception_architecture,
                      input_shape=(32, 32, 3),
                      depth=32,
                      stride=1,
                      n_hidden=64,
                      n_classes=10)

    train(network, D_train, D_val)
def main():
    D_train = DatasetCIFAR10('Train', 'datasets/cifar-10-batches-py/')

    enlarge_plot_area()
    display = Displayer(
        load_labels('datasets/cifar-10-batches-py/batches.meta'))
    display(D_train)

    (D_train, D_val) = split(D_train)

    network = Network(mini_inception_architecture,
                      input_shape=(32, 32, 3),
                      depth=32,
                      stride=1,
                      n_hidden=64,
                      n_classes=10)

    train(network, D_train, D_val, report=plot_report)
Exemple #4
0
from itertools import chain
from datasets import split, get_MOTS_dicts, get_KITTI_MOTS_dicts
from detectron2.data import DatasetCatalog, MetadataCatalog


for dicts in zip((get_MOTS_dicts(), get_KITTI_MOTS_dicts())):
    all_train_dicts, test_dicts = split(dicts)
    chunk_size = len(all_train_dicts) // 5
    all_train_dicts = all_train_dicts[: chunk_size * 5]
    split_beg = [i * chunk_size for i in range(6)]
    split_end = split_beg[1:]
    folds = [all_train_dicts[i:j] for i, j in zip(split_beg, split_end)]
    for i in range(5):
        train_dicts = list(chain(*(folds[j] for j in range(5) if j != i)))
        val_dicts = list(folds[i])
        DatasetCatalog.clear()
        DatasetCatalog.register("train", lambda: train_dicts)
        DatasetCatalog.register("validation", lambda: val_dicts)
        DatasetCatalog.register("test", lambda: test_dicts)
        thing_classes = ["Car", "Pedestrian"]
        MetadataCatalog.get("train").set(thing_classes=thing_classes)
        MetadataCatalog.get("validation").set(thing_classes=thing_classes)
        MetadataCatalog.get("test").set(thing_classes=thing_classes)

        # training code goes here
Exemple #5
0
    parser.add_argument("--PTS", type=int, default=50, help="number of points")
    parser.add_argument("--HIDDEN",
                        type=int,
                        default=10,
                        help="number of hiddens")
    parser.add_argument("--RATE",
                        type=float,
                        default=0.05,
                        help="learning rate")
    parser.add_argument("--BACKEND", default="cpu", help="backend mode")
    parser.add_argument("--DATASET", default="simple", help="dataset")
    parser.add_argument("--PLOT", default=False, help="dataset")

    args = parser.parse_args()

    PTS = args.PTS

    if args.DATASET == "xor":
        data = datasets.xor(PTS)
    elif args.DATASET == "simple":
        data = datasets.simple(PTS)
    elif args.DATASET == "split":
        data = datasets.split(PTS)

    HIDDEN = int(args.HIDDEN)
    RATE = args.RATE

    FastTrain(HIDDEN,
              backend=FastTensorBackend
              if args.BACKEND != "gpu" else GPUBackend).train(data, RATE)
Exemple #6
0
        y_hat = self.predict(X)
        mse = np.mean((y - y_hat)**2)
        return mse


def aux_objective_func(algo, X, y):
    _y = np.where(y >= 0.0, 1, -1)
    y_hat = np.where(algo.predict(X) >= 0.0, 1, -1)
    return np.mean(_y == y_hat)


X, y = datasets.iris(return_splits=False)
X = X[y != 2]
y = y[y != 2]
y[y == 0] = -1
X_train, X_test, y_train, y_test = datasets.split(X, y)

alpha = ht.ContinuousParameter('alpha', lower_bound=10**-10, upper_bound=0.01)
epochs = ht.DiscreteParameter('epochs', lower_bound=200, upper_bound=10**4)

hypers = [alpha, epochs]

tuner = ht.HyperTune(algorithm=Perceptron,
                     parameters=hypers,
                     train_func=Perceptron.fit,
                     objective_func=Perceptron.mse,
                     train_func_args=(X_train, y_train),
                     objective_func_args=(X_test, y_test),
                     max_evals=50,
                     maximize=False)