def main():
    args = parse_args()
    experiment = Run()
    params = load_values(args.param_file)
    if params:
        experiment.log_inputs(**params)
    metrics = load_values(args.metric_file)
    if metrics:
        experiment.log_metrics(**metrics)
    if args.tag:
        experiment.log_tags(args.tag)
    for dataset in load_datasets(args.data_file):
        experiment.log_data_ref(**dataset)
    if args.capture_png:
        imgs = discover_png(experiment.get_outputs_path())
        for img in imgs:
            if isinstance(img, str):
                experiment.log_image(img)
            elif isinstance(img, SerialImages):
                for idx, path in enumerate(img.paths):
                    experiment.log_image(path, name=img.name, step=idx)
            else:
                raise NotImplementedError('We should never get here.')
예제 #2
0
    return accuracy_score(pred, y_test)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--max_depth', type=int, default=3)
    parser.add_argument('--num_rounds', type=int, default=10)
    parser.add_argument('--min_child_weight', type=int, default=5)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='iris')
    experiment.create(tags=['examples', 'xgboost'])
    experiment.log_inputs(log_learning_rate=args.log_learning_rate,
                          max_depth=args.max_depth,
                          num_rounds=args.num_rounds,
                          min_child_weight=args.min_child_weight)

    iris = load_iris()
    X = iris.data
    Y = iris.target

    X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)

    # Polyaxon
    experiment.log_data_ref(content=X_train, name='x_train')
    experiment.log_data_ref(content=y_train, name='y_train')
    experiment.log_data_ref(content=X_test, name='X_test')
    experiment.log_data_ref(content=y_test, name='y_train')

    logger.info('Train model...')
예제 #3
0
        '--max_iter',
        type=int,
        default=1000)
    parser.add_argument(
        '--tol',
        type=float,
        default=0.001
    )
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='sgd-classifier')
    experiment.create(tags=['examples', 'scikit-learn'])
    experiment.log_inputs(loss=args.loss,
                          penalty=args.penalty,
                          l1_ratio=args.l1_ratio,
                          max_iter=args.max_iter,
                          tol=args.tol)

    (X, y) = load_data()

    # Polyaxon
    experiment.log_data_ref(content=X, name='dataset_X')
    experiment.log_data_ref(content=y, name='dataset_y')

    accuracies = model(X=X,
                       y=y,
                       loss=args.loss,
                       penalty=args.penalty,
                       l1_ratio=args.l1_ratio,
                       max_iter=args.max_iter,
예제 #4
0
    parser.add_argument('--conv1_size', type=int, default=32)
    parser.add_argument('--conv2_size', type=int, default=64)
    parser.add_argument('--dropout', type=float, default=0.8)
    parser.add_argument('--hidden1_size', type=int, default=500)
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--epochs', type=int, default=1)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='mnist')
    experiment.create(tags=['keras'])
    experiment.log_inputs(conv1_size=args.conv1_size,
                          conv2_size=args.conv2_size,
                          dropout=args.dropout,
                          hidden1_size=args.hidden1_size,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          epochs=args.epochs)

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # Polyaxon
    experiment.log_data_ref(content=x_train, name='x_train')
    experiment.log_data_ref(content=y_train, name='y_train')
    experiment.log_data_ref(content=x_test, name='x_test')
    experiment.log_data_ref(content=y_test, name='y_test')

    x_train, y_train, x_test, y_test = transform_data(x_train, y_train, x_test,
                                                      y_test)
    accuracy = train(conv1_size=args.conv1_size,
예제 #5
0
    parser.add_argument('--fc1_hidden', type=int, default=10)
    parser.add_argument('--fc1_activation', type=str, default='relu')
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument('--epochs', type=int, default=1)
    args = parser.parse_args()

    experiment = Run(project='mnist')
    experiment.create(tags=['examples', 'mxnet'])
    experiment.log_inputs(conv1_kernel=args.conv1_kernel,
                          conv1_filters=args.conv1_filters,
                          conv1_activation=args.conv1_activation,
                          conv2_kernel=args.conv1_kernel,
                          conv2_filters=args.conv1_filters,
                          conv2_activation=args.conv1_activation,
                          fc1_hidden=args.fc1_hidden,
                          fc1_activation=args.fc1_activation,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          epochs=args.epochs)

    logger.info('Downloading data ...')
    mnist = mx.test_utils.get_mnist()
    train_iter = mx.io.NDArrayIter(mnist['train_data'],
                                   mnist['train_label'],
                                   args.batch_size,
                                   shuffle=True)
    val_iter = mx.io.NDArrayIter(mnist['test_data'], mnist['test_label'],
                                 args.batch_size)
예제 #6
0
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument('--epochs', type=int, default=1)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='mnist', artifacts_path='/tmp/mnist')
    experiment.create(tags=['examples', 'tensorflow'])
    experiment.log_inputs(conv1_size=args.conv1_size,
                          conv1_out=args.conv1_out,
                          conv1_activation=args.conv1_activation,
                          pool1_size=args.pool1_size,
                          conv2_size=args.conv2_size,
                          conv2_out=args.conv2_out,
                          conv2_activation=args.conv2_activation,
                          pool2_size=args.pool2_size,
                          fc1_activation=args.fc1_activation,
                          fc1_size=args.fc1_size,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          batch_size=args.batch_size,
                          dropout=args.dropout,
                          epochs=args.epochs)

    (x_train, y_train), (x_test, y_test) = load_mnist_data()

    # Polyaxon
    experiment.log_data_ref(content=x_train, name='x_train')
    experiment.log_data_ref(content=y_train, name='y_train')
    experiment.log_data_ref(content=x_test, name='x_test')
    experiment.log_data_ref(content=y_test, name='y_test')
예제 #7
0
# Polyaxon
if hvd.rank() == 0:
    experiment = Run()

# Horovod: pin GPU to be used to process local rank (one GPU per process)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.visible_device_list = str(hvd.local_rank())
K.set_session(tf.Session(config=config))

batch_size = 128
num_classes = 10
# Polyaxon
if hvd.rank() == 0:
    experiment.log_inputs(batch_size=128, num_classes=10)

# Horovod: adjust number of epochs based on number of GPUs.
epochs = int(math.ceil(12.0 / hvd.size()))

# Input image dimensions
img_rows, img_cols = 28, 28

# The data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Polyaxon
if hvd.rank() == 0:
    experiment.log_data_ref(content=x_train, name='x_train')
    experiment.log_data_ref(content=y_train, name='y_train')
    experiment.log_data_ref(content=x_test, name='x_test')
예제 #8
0
    )
    return cross_val_score(classifier, X, y, cv=5)


if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--n_estimators', type=int, default=3)
    parser.add_argument('--max_features', type=int, default=3)
    parser.add_argument('--min_samples_leaf', type=int, default=80)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='random-forest')
    experiment.create(tags=['examples', 'scikit-learn'])
    experiment.log_inputs(n_estimators=args.n_estimators,
                          max_features=args.max_features,
                          min_samples_leaf=args.min_samples_leaf)

    (X, y) = load_data()

    # Polyaxon
    experiment.log_data_ref(content=X, name='dataset_X')
    experiment.log_data_ref(content=y, name='dataset_y')

    accuracies = model(X=X,
                       y=y,
                       n_estimators=args.n_estimators,
                       max_features=args.max_features,
                       min_samples_leaf=args.min_samples_leaf)
    accuracy_mean, accuracy_std = (np.mean(accuracies), np.std(accuracies))
    print('Accuracy: {} +/- {}'.format(accuracy_mean, accuracy_std))
예제 #9
0
    parser.add_argument('--num_nodes', type=int, default=8)
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--dropout', type=float, default=0.8)
    parser.add_argument('--epochs', type=int, default=1)
    parser.add_argument('--seed', type=int, default=234)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='bidirectional-lstm')
    experiment.create(tags=['examples', 'keras'])
    experiment.log_inputs(max_features=args.max_features,
                          skip_top=args.skip_top,
                          maxlen=args.maxlen,
                          batch_size=args.batch_size,
                          num_nodes=args.num_nodes,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          dropout=args.dropout,
                          epochs=args.epochs,
                          seed=args.seed)

    logger.info('Loading data...')
    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=args.max_features,
                                                  skip_top=args.skip_top,
                                                  seed=args.seed)
    logger.info('train sequences %s', len(x_train))
    logger.info('test sequences %s', len(x_test))

    # Polyaxon
    experiment.log_data_ref(content=x_train, name='x_train')
예제 #10
0
    parser.add_argument('--lstm_output_size', type=int, default=70)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--epochs', type=int, default=1)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='cnn-lstm')
    experiment.create(framework='keras', tags=['examples'])
    experiment.log_inputs(max_features=args.max_features,
                          skip_top=args.skip_top,
                          maxlen=args.maxlen,
                          epochs=args.epochs,
                          embedding_size=args.embedding_size,
                          pool_size=args.pool_size,
                          kernel_size=args.kernel_size,
                          filters=args.filters,
                          lstm_output_size=args.lstm_output_size,
                          batch_size=args.batch_size,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate)

    logger.info('Loading data...')
    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=args.max_features,
                                                  skip_top=args.skip_top)

    logger.info('train sequences %s', len(x_train))
    logger.info('test sequences %s', len(x_test))

    # Polyaxon
예제 #11
0
    parser.add_argument('--max_df',
                        type=float,
                        default=1.0,
                        help='the maximum document frequency.')
    parser.add_argument(
        '--C',
        type=float,
        default=1.0,
        help='Inverse of regularization strength of LogisticRegression')
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='newsgroup')
    experiment.create(tags=['examples', 'scikit-learn'])
    experiment.log_inputs(ngram_range=(args.ngram, args.ngram),
                          max_features=args.max_features,
                          max_df=args.max_df,
                          C=args.C)

    # Train and eval the model with given parameters.
    # Polyaxon
    metrics = train_and_eval(ngram_range=(args.ngram, args.ngram),
                             max_features=args.max_features,
                             max_df=args.max_df,
                             C=args.C)

    # Logging metrics
    print("Testing metrics: {}", metrics)
    # Polyaxon
    experiment.log_outputs(**metrics)