Beispiel #1
0
def main():
    #hparams = [(low, high),        #per var
    #           (low, high)]
    hparams = [
        (0, 20),  #positive_reward_for_divert
        (0, 20),  #wrong_sup_at_goal
        (0, 20),  #flooding_reward
        (0, 20),  #neg_reward_ia
        (0, 20),  #negative_reward_for_empty_queue
        (0, 20)
    ]  #negative_reward_for_cycle

    #define path for the results
    hyperdive_results = join(path, 'rl', 'hyper_parameter', env_name)

    #make folder if not exist
    try:
        os.mkdir(hyperdive_results)
    except:
        pass

    #run the hyper drive optimization
    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=hyperdive_results,
               checkpoints_path=hyperdive_results,
               model="GP",
               n_iterations=50,
               verbose=True,
               random_state=42)
Beispiel #2
0
def run(results_dir, n_calls=200, n_runs=10):
    """Run benchmark for Branin function."""
    models = ['GP', 'RF', 'GBRT', 'Rand']
    bounds = [(-5.0, 10.0), (0.0, 15.0)]

    for model in models:
        model_dir = os.path.join(results_dir, model)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir, exist_ok=True)

        for random_state in range(n_runs):
            directory = os.path.join(model_dir, 'run' + str(random_state))

            if not os.path.exists(directory):
                os.makedirs(directory, exist_ok=True)

            checkpoint = load_results(directory)

            hyperdrive(branin,
                       bounds,
                       directory,
                       n_iterations=n_calls,
                       verbose=True,
                       random_state=random_state,
                       checkpoints=True,
                       restart=checkpoint)
Beispiel #3
0
def run(results_dir, n_calls=200, n_runs=10):
    """Run benchmark for Branin function."""
    models = ['Rand']
    bounds = np.tile((-5., 5.), (4, 1))

    for model in models:
        model_dir = os.path.join(results_dir, model)
        if not os.path.exists(model_dir):
            os.makedirs(model_dir, exist_ok=True)

        for random_state in range(n_runs):
            directory = os.path.join(model_dir, 'run' + str(random_state))

            if not os.path.exists(directory):
                os.makedirs(directory, exist_ok=True)


#            checkpoint = load_results(directory)

            hyperdrive(stybtang,
                       bounds,
                       directory,
                       n_iterations=n_calls,
                       verbose=True,
                       random_state=random_state,
                       checkpoints=True)
Beispiel #4
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir', type=str, help='Path to results directory.')
    parser.add_argument('--log_dir', type=str, default='./logs', help='Path to save logs')
    args = parser.parse_args()

    # Logging for Visual Comparison
    global logger
    log_cols=["Classifier", "Train Accuracy", "Val Accuracy", "Log Loss"]
    logger = Log(colnames=log_cols, savepath=args.log_dir, rank=rank)

    global X_train; global X_val; global X_test; global y_train; global y_val; global y_test
    X_train, X_val, X_test, y_train, y_val, y_test = load_data(0.25, 0.25)

    hparams = [(25, 100),  # n_estimators
               (2, 10)]   # max_depth

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=50,
               verbose=True,
               random_state=0,
               checkpoints=True)

    # Save the log data frame
    logger.save()
Beispiel #5
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir',
                        type=str,
                        help='Path to results directory.')
    args = parser.parse_args()

    hparams = [
        (2, 10),  # max_depth
        (10.0**-2, 10.0**0),  # learning_rate
        (1, 10),  # max_features
        (2, 100),  # min_samples_split
        (1, 100)
    ]  # min_samples_leaf

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=50,
               verbose=True,
               random_state=0,
               sampler="lhs",
               n_samples=5,
               checkpoints=True)
Beispiel #6
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir',
                        type=str,
                        help='Path to results directory.')
    args = parser.parse_args()

    global batch_size, num_classes, epochs
    batch_size = 128
    num_classes = 10
    epochs = 12

    # input image dimensions
    img_rows, img_cols = 28, 28

    global x_train, y_train, x_test, y_test
    # the data, split between train and test sets
    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    global input_shape
    if K.image_data_format() == 'channels_first':
        x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
        x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
        input_shape = (1, img_rows, img_cols)
    else:
        x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
        x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
        input_shape = (img_rows, img_cols, 1)

    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    x_train /= 255
    x_test /= 255

    # convert class vectors to binary class matrices
    y_train = keras.utils.to_categorical(y_train, num_classes)
    y_test = keras.utils.to_categorical(y_test, num_classes)

    hparams = [
        (2, 5),  # kernel1
        (2, 5),  # kernel2
        (0.25, 0.75),  # dropout1
        (0.25, 0.75)
    ]  # dropout2

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=20,
               verbose=True,
               random_state=0)
def main():
    parser = argparse.ArgumentParser(description='CHEERS hyperparameter optimization')
    parser.add_argument('--results', type=str, help='Path to results directory.')
    args = parser.parse_args()

    hparams = [(0.1, 1000),      # C
               (0.0001, 10)]     # epsilon

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results,
               checkpoints_path=args.results,
               model="GP",
               n_iterations=100,
               verbose=True,
               random_state=0)
Beispiel #8
0
def main():
    global args
    args = parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    torch.manual_seed(args.seed)
    if args.cuda:
        torch.cuda.manual_seed(args.seed)

    train_data = Deidentified(
        data_path=args.data_dir + '/data/train',
        label_path=args.data_dir + '/labels/train'
    )

    global train_size
    train_size = len(train_data)

    test_data = Deidentified(
        data_path=args.data_dir + '/data/test',
        label_path=args.data_dir + '/labels/test'
    )

    global train_loader
    train_loader = DataLoader(train_data, batch_size=args.batch_size, shuffle=True)
    global test_loader
    test_loader = DataLoader(test_data, batch_size=args.batch_size, shuffle=False)
    global wv_matrix
    wv_matrix = load_wv_matrix(args.data_dir + '/wv_matrix/wv_matrix.npy')

    global criterion
    criterion = nn.CrossEntropyLoss()

    hparams = [(2, 10),      # kernel1
               (2, 10),      # kernel2
               (2, 10),      # kernel3
               (100, 200),   # num_filters1
               (100, 200),   # num_filters2
               (100, 200)]   # num_filters3

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=15,
               verbose=True,
               random_state=0)
Beispiel #9
0
def main():
    hparams = [
        (2, 10),  # encoder kernel1
        (2, 10),  # encoder kernel2
        (2, 10),  # encoder kernel3
        (2, 10),  # decoder kernel1
        (2, 10),  # decoder kernel2
        (2, 10)
    ]  # decoder kernel3

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=50,
               verbose=True,
               random_state=0,
               deadline=args.deadline)
Beispiel #10
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir', type=str, help='Path to results directory.')
    args = parser.parse_args()

    hparams = [(2, 10),             # max_depth
               (10.0**-2, 10.0**0)] # learning_rate

    # Load results from previous runs 
    checkpoint = load_results(args.results_dir)

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=100,
               verbose=True,
               random_state=0,
               checkpoints=True,
               restart=checkpoint)
Beispiel #11
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir', type=str, help='Path to results directory.')
    args = parser.parse_args()

    hparams = [(0.001, 0.1),
               (0.0, 0.90),
               (0.001, 0.1),
               (0.0, 0.90)]

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=15,
               verbose=True,
               random_state=0,
               sampler="lhs",
               n_samples=5,
               checkpoints=True)
Beispiel #12
0
def main():
    hparams = [
        (2, 10),  # kernel1
        (2, 10),  # kernel2
        (2, 10),  # kernel3
        (2, 10),  # kernel4
        (0.25, 0.95),  # dropout5
        (0.25, 0.95)
    ]  # dropout6

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=20,
               verbose=True,
               sampler="lhs",
               n_samples=4,
               random_state=0,
               deadline=args.deadline)
Beispiel #13
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir',
                        type=str,
                        help='Path to results directory.')
    args = parser.parse_args()

    hparams = [
        (2, 10),  # max_depth
        (10.0**-2, 10.0**0)
    ]  # learning_rate

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=100,
               verbose=True,
               random_state=0,
               deadline=120)
Beispiel #14
0
def main():
    global args
    args = parse_args()
    torch.manual_seed(args.seed)

    global device
    device = torch.device("cuda" if args.cuda else "cpu")

    global corpus
    corpus = data.Corpus(args.data)
    
    global eval_batch_size
    eval_batch_size = 10

    global train_data
    train_data = batchify(corpus.train, args.batch_size)

    global val_data
    val_data = batchify(corpus.valid, eval_batch_size)

    global criterion
    criterion = nn.CrossEntropyLoss()

    hparams = [
        (2, 5),                                  # nlayers
        (100, 300),                              # word embedding dim
        (50, 250),                               # nhid
        ('RNN_TANH', 'RNN_RELU', 'LSTM', 'GRU') # model
    ]

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=20,
               verbose=True,
               random_state=0,
               deadline=7000,
               checkpoints=True)
Beispiel #15
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument(
        '--ndims',
        type=int,
        help='Number of dimensions for Styblinski-Tang function')
    parser.add_argument('--results_dir',
                        type=str,
                        help='Path to results directory.')
    parser.add_argument('--checkpoints', type=str, help='Path to checkpoints')
    args = parser.parse_args()

    stybtang = StyblinskiTang(args.ndims)
    bounds = np.tile((-5., 5.), (args.ndims, 1))

    hyperdrive(objective=stybtang,
               hyperparameters=bounds,
               results_path=args.results_dir,
               checkpoints_path=args.checkpoints,
               model="GP",
               n_iterations=50,
               verbose=True,
               random_state=0)
Beispiel #16
0
def main():
    hparams = [
        (2, 10),  # kernel1
        (2, 10),  # kernel2
        (2, 10),  # kernel3
        (2, 10),  # kernel4
        (2, 10),  # kernel5
        (2, 10),  # kernel6
        (2, 10),  # kernel7
        (2, 10),  # kernel8
        (2, 10),  # kernel9
        (2, 10),  # kernel10
        (0.25, 0.95),  # dropout5
        (0.25, 0.95)
    ]  # dropout6

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=50,
               verbose=True,
               random_state=0,
               deadline=args.deadline)
Beispiel #17
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir',
                        type=str,
                        help='Path to results directory.')
    args = parser.parse_args()

    hparams = [
        (2, 8),  # kernel1
        (2, 8)
    ]  # kernel2
    # (2, 8),     # kernel3
    # (2, 8),     # kernel4
    # (2, 8),     # kernel5
    # (2, 8),     # kernel6
    # (32, 64)]   # batch_size

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=11,
               verbose=True,
               random_state=0)
        [True, False],  # use cyclic
        (0, 8),  # nheads
        (64, 1024),  # itnermedioate
        (1, 6)
    ]  # linear layers
    try:
        os.makedirs(
            '/gpfs/alpine/med106/proj-shared/aclyde/MolecularAttention/qm8/hyperopt/cv'
            + str(config['cv']) + '/results/')
    except:
        pass
    try:
        os.makedirs(
            '/gpfs/alpine/med106/proj-shared/aclyde/MolecularAttention/qm8/hyperopt/cv'
            + str(config['cv']) + '/checkpoints/')
    except:
        pass
    hyperdrive(
        objective=train_qm8,
        hyperparameters=params,
        results_path=
        '/gpfs/alpine/med106/proj-shared/aclyde/MolecularAttention/qm8/hyperopt/cv'
        + str(config['cv']) + '/results/',
        checkpoints_path=
        '/gpfs/alpine/med106/proj-shared/aclyde/MolecularAttention/qm8/hyperopt/cv'
        + str(config['cv']) + '/checkpoints/',
        model="GP",
        n_iterations=50,
        verbose=True,
        random_state=0)
Beispiel #19
0
def main():
    parser = argparse.ArgumentParser(description='Setup experiment.')
    parser.add_argument('--results_dir',
                        type=str,
                        default='./results',
                        help='Path to results directory.')
    parser.add_argument('--log_dir',
                        type=str,
                        default='./logs',
                        help='Path to save logs')
    parser.add_argument(
        '--data',
        type=str,
        default='/lustre/atlas/proj-shared/csc249/yngtodd/data/fashion',
        help='Path to data')
    args = parser.parse_args()

    # k-fold configuration
    n_splits = 2

    # get data
    trainpath = os.path.join(args.data, 'fashion-mnist_train.csv')
    testpath = os.path.join(args.data, 'fashion-mnist_test.csv')

    test = pd.read_csv(trainpath)
    train = pd.read_csv(testpath)

    global logger
    log_cols = ["Classifier", "Train Accuracy (Mean)", "Val Accuracy", "Loss"]
    logger = Log(colnames=log_cols, savepath=args.log_dir, rank=rank)

    global y_train_CNN, X_train_CNN
    y_train_CNN = train.ix[:, 0].values.astype(
        'int32')  # only labels i.e targets digits
    X_train_CNN = np.array(train.iloc[:, 1:].values).reshape(
        train.shape[0], 1, 28,
        28).astype(np.uint8)  # reshape to be [samples][pixels][width][height]

    global y_test_CNN, X_test_CNN
    y_test_CNN = test.ix[:, 0].values.astype(
        'int32')  # only labels i.e targets digits
    X_test_CNN = np.array(test.iloc[:, 1:].values).reshape(
        (test.shape[0], 1, 28, 28)).astype(np.uint8)

    # normalize inputs from 0-255 to 0-1
    X_train_CNN = X_train_CNN / 255
    X_test_CNN = X_test_CNN / 255

    global X_train, X_val, y_train, y_val
    X_train, X_val, y_train, y_val = \
            train_test_split(X_train_CNN, y_train_CNN, test_size=0.33, random_state=42)

    # one hot encode outputs
    y_train = to_categorical(y_train)
    y_val = to_categorical(y_val)
    y_test = to_categorical(y_test_CNN)
    num_classes = y_train.shape[1]

    global kf
    kf = KFold(n_splits=n_splits, random_state=seed, shuffle=True)
    kf.get_n_splits(X_train)

    hparams = [(0.0001, 0.1)  # lr
               ]

    hyperdrive(objective=objective,
               hyperparameters=hparams,
               results_path=args.results_dir,
               model="GP",
               n_iterations=100,
               verbose=True,
               random_state=0,
               checkpoints=True)

    # Save the log data frame
    logger.save()
Beispiel #20
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N',
                        help='input batch size for testing (default: 1000)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.01, metavar='LR',
                        help='learning rate (default: 0.01)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--seed', type=int, default=42, metavar='S',
                        help='random seed (default: 42)')
    parser.add_argument('--log-interval', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--fp16-allreduce', action='store_true', default=False,
                        help='use fp16 compression during allreduce')
    parser.add_argument('--results_path', type=str, help="Path to store results")
    args = parser.parse_args()
    args.cuda = not args.no_cuda and torch.cuda.is_available()

    # Horovod: initialize library.
    hvd.init()
    torch.manual_seed(args.seed)

    if args.cuda:
        # Horovod: pin GPU to local rank.
        torch.cuda.set_device(hvd.local_rank())
        torch.cuda.manual_seed(args.seed)


    kwargs = {'num_workers': 1, 'pin_memory': True} if args.cuda else {}
    train_dataset = \
        datasets.MNIST('data-%d' % hvd.rank(), train=True, download=True,
                    transform=transforms.Compose([
                        transforms.ToTensor(),
                        transforms.Normalize((0.1307,), (0.3081,))
                    ]))

    # Horovod: use DistributedSampler to partition the training data.
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset, num_replicas=hvd.size(), rank=hvd.rank())
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, sampler=train_sampler, **kwargs)

    test_dataset = \
        datasets.MNIST('data-%d' % hvd.rank(), train=False, transform=transforms.Compose([
            transforms.ToTensor(),
            transforms.Normalize((0.1307,), (0.3081,))
        ]))

    # Horovod: use DistributedSampler to partition the test data.
    test_sampler = torch.utils.data.distributed.DistributedSampler(
        test_dataset, num_replicas=hvd.size(), rank=hvd.rank())
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=args.test_batch_size,
                                            sampler=test_sampler, **kwargs)

    model = Net()

    if args.cuda:
        # Move model to GPU.
        model.cuda()

    # Horovod: broadcast parameters.
    hvd.broadcast_parameters(model.state_dict(), root_rank=0)

    global optimizer
    # Horovod: scale learning rate by the number of GPUs.
    optimizer = optim.SGD(model.parameters(), lr=args.lr * hvd.size(),
                        momentum=args.momentum)

    # Horovod: (optional) compression algorithm.
    compression = hvd.Compression.fp16 if args.fp16_allreduce else hvd.Compression.none

    # Horovod: wrap optimizer with DistributedOptimizer.
    optimizer = hvd.DistributedOptimizer(optimizer,
                                        named_parameters=model.named_parameters(),
                                        compression=compression)

    #for epoch in range(1, args.epochs + 1):
    #    train(epoch, model, optimizer, train_sampler, train_loader, args)
    #    test(model, test_sampler, test_loader, args)

    space = [(2, 8)]

    #if hvd.rank() == 0:
    hyperdrive(
        lambda hparams: objective(
            hparams,
            model,
            train_sampler,
            train_loader,
            args
        ),
        hyperparameters=space,
        results_path=args.results_path,
        checkpoints_path=args.results_path,
        model="GP",
        n_iterations=50,
        verbose=True,
        random_state=0
    )
Beispiel #21
0
def main():
    global train_x
    global train_y
    global test_x
    global test_y
    train_x = np.load('data/train_X.npy')
    train_y = np.load('data/train_Y.npy')
    test_x = np.load('data/test_X.npy')
    test_y = np.load('data/test_Y.npy')

    for task in range(4):
        le = preprocessing.LabelEncoder()
        le.fit(train_y[:, task])
        train_y[:, task] = le.transform(train_y[:, task])
        test_y[:, task] = le.transform(test_y[:, task])

    max_vocab = np.max(train_x)
    max_vocab2 = np.max(test_x)
    if max_vocab2 > max_vocab:
        max_vocab = max_vocab2

    print('max_vocab:', max_vocab)

    np.random.seed(0)

    global wv_len
    wv_len = 50
    global wv_mat
    wv_mat = np.random.randn(max_vocab + 1, wv_len).astype('float32') * 0.1
    #num_classes = np.max( train_y ) + 1

    global num_classes
    num_classes = []
    num_classes.append(np.max(train_y[:, 0]) + 1)
    num_classes.append(np.max(train_y[:, 1]) + 1)
    num_classes.append(np.max(train_y[:, 2]) + 1)
    num_classes.append(np.max(train_y[:, 3]) + 1)

    global validation_data
    validation_data = ({
        'Input': test_x
    }, {
        'Dense0': test_y[:, 0],
        'Dense1': test_y[:, 1],
        'Dense2': test_y[:, 2],
        'Dense3': test_y[:, 3]
    })

    space = [(1, 10), (1, 10), (1, 10), (5, 500), (0.00001, 0.1)]

    savepoint = load_results(args.results_dir)

    hyperdrive(objective=objective,
               hyperparameters=space,
               results_path=args.results_dir,
               model="GP",
               n_iterations=25,
               checkpoints=True,
               verbose=True,
               random_state=0,
               restart=savepoint)