コード例 #1
0
def _run(ctx, name, owner, project_name, description, tags, specification,
         log):
    docker = DockerOperator()
    if not docker.check():
        raise PolyaxonException("Docker is required to run this command.")

    # Create Build
    project = "{}.{}".format(owner, project_name)
    build_job = Run(project=project)

    specification = CompiledOperationSpecification.apply_operation_contexts(
        specification)
    content = specification.to_dict(dump=True)
    build_job.create(name=name,
                     description=description,
                     tags=tags,
                     content=content)
    image = _create_docker_build(build_job, specification, project)

    experiment = Run(project=project)
    experiment.create(name=name,
                      tags=tags,
                      description=description,
                      content=content)

    cmd_args = ["run", "--rm"]
    data_paths, bind_mounts = _get_data_bind_mounts(specification.data_refs)
    for key, value in _get_env_vars(
            project=project,
            experiment_id=experiment.experiment_id,
            params=specification.params,
            data_paths=data_paths,
    ):
        cmd_args += ["-e", "{key}={value}".format(key=key, value=value)]
    cmd_args += _get_config_volume()
    cmd_args += _get_data_volumes(bind_mounts)
    cmd_args += [image]

    # Add cmd.run
    _, args = specification.container.get_container_command_args()
    for arg in args:
        cmd_args += arg
    try:
        print(cmd_args)
        docker.execute(cmd_args, stream=True)
    except Exception as e:
        handle_cli_error(e, message="Could start local run.")
        sys.exit(1)
コード例 #2
0
ファイル: model.py プロジェクト: zhaohb/polyaxon-examples
    model.fit(X_train, y_train)
    pred = model.predict(X_test)
    return accuracy_score(pred, y_test)


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--max_depth', type=int, default=3)
    parser.add_argument('--num_rounds', type=int, default=10)
    parser.add_argument('--min_child_weight', type=int, default=5)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='iris')
    experiment.create(tags=['examples', 'xgboost'])
    experiment.log_inputs(log_learning_rate=args.log_learning_rate,
                          max_depth=args.max_depth,
                          num_rounds=args.num_rounds,
                          min_child_weight=args.min_child_weight)

    iris = load_iris()
    X = iris.data
    Y = iris.target

    X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2)

    # Polyaxon
    experiment.log_data_ref(content=X_train, name='x_train')
    experiment.log_data_ref(content=y_train, name='y_train')
    experiment.log_data_ref(content=X_test, name='X_test')
コード例 #3
0
ファイル: model.py プロジェクト: zhaohb/polyaxon-examples

if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--conv1_size', type=int, default=32)
    parser.add_argument('--conv2_size', type=int, default=64)
    parser.add_argument('--dropout', type=float, default=0.8)
    parser.add_argument('--hidden1_size', type=int, default=500)
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--epochs', type=int, default=1)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='mnist')
    experiment.create(tags=['keras'])
    experiment.log_inputs(conv1_size=args.conv1_size,
                          conv2_size=args.conv2_size,
                          dropout=args.dropout,
                          hidden1_size=args.hidden1_size,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          epochs=args.epochs)

    (x_train, y_train), (x_test, y_test) = mnist.load_data()

    # Polyaxon
    experiment.log_data_ref(content=x_train, name='x_train')
    experiment.log_data_ref(content=y_train, name='y_train')
    experiment.log_data_ref(content=x_test, name='x_test')
    experiment.log_data_ref(content=y_test, name='y_test')
コード例 #4
0
ファイル: model.py プロジェクト: zhaohb/polyaxon-examples
        type=float,
        default=1.0)
    parser.add_argument(
        '--max_iter',
        type=int,
        default=1000)
    parser.add_argument(
        '--tol',
        type=float,
        default=0.001
    )
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='sgd-classifier')
    experiment.create(tags=['examples', 'scikit-learn'])
    experiment.log_inputs(loss=args.loss,
                          penalty=args.penalty,
                          l1_ratio=args.l1_ratio,
                          max_iter=args.max_iter,
                          tol=args.tol)

    (X, y) = load_data()

    # Polyaxon
    experiment.log_data_ref(content=X, name='dataset_X')
    experiment.log_data_ref(content=y, name='dataset_y')

    accuracies = model(X=X,
                       y=y,
                       loss=args.loss,
コード例 #5
0
ファイル: model.py プロジェクト: zhaohb/polyaxon-examples
import tensorflow as tf
from polyaxon.tracking import Run

mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

experiment = Run(project='mnist', artifacts_path='/tmp/mnist/')
experiment.create(tags=['examples', 'tensorflow'])


def create_model():
    return tf.keras.models.Sequential([
        tf.keras.layers.Flatten(input_shape=(28, 28)),
        tf.keras.layers.Dense(512, activation='relu'),
        tf.keras.layers.Dropout(0.2),
        tf.keras.layers.Dense(10, activation='softmax')
    ])


model = create_model()
model.compile(optimizer='adam',
              loss='sparse_categorical_crossentropy',
              metrics=['accuracy'])


model.fit(x=x_train,
          y=y_train,
          epochs=5,
          validation_data=(x_test, y_test))
コード例 #6
0
    parser.add_argument('--conv1_kernel', type=int, default=5)
    parser.add_argument('--conv1_filters', type=int, default=10)
    parser.add_argument('--conv1_activation', type=str, default='relu')
    parser.add_argument('--conv2_kernel', type=int, default=5)
    parser.add_argument('--conv2_filters', type=int, default=10)
    parser.add_argument('--conv2_activation', type=str, default='relu')
    parser.add_argument('--fc1_hidden', type=int, default=10)
    parser.add_argument('--fc1_activation', type=str, default='relu')
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--batch_size', type=int, default=100)
    parser.add_argument('--epochs', type=int, default=1)
    args = parser.parse_args()

    experiment = Run(project='mnist')
    experiment.create(tags=['examples', 'mxnet'])
    experiment.log_inputs(conv1_kernel=args.conv1_kernel,
                          conv1_filters=args.conv1_filters,
                          conv1_activation=args.conv1_activation,
                          conv2_kernel=args.conv1_kernel,
                          conv2_filters=args.conv1_filters,
                          conv2_activation=args.conv1_activation,
                          fc1_hidden=args.fc1_hidden,
                          fc1_activation=args.fc1_activation,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          epochs=args.epochs)

    logger.info('Downloading data ...')
    mnist = mx.test_utils.get_mnist()
    train_iter = mx.io.NDArrayIter(mnist['train_data'],
コード例 #7
0
ファイル: model.py プロジェクト: zhaohb/polyaxon-examples
                        type=int,
                        default=30,
                        help='Top occurring words to skip')
    parser.add_argument('--maxlen', type=int, default=100)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--num_nodes', type=int, default=8)
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--dropout', type=float, default=0.8)
    parser.add_argument('--epochs', type=int, default=1)
    parser.add_argument('--seed', type=int, default=234)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='bidirectional-lstm')
    experiment.create(tags=['examples', 'keras'])
    experiment.log_inputs(max_features=args.max_features,
                          skip_top=args.skip_top,
                          maxlen=args.maxlen,
                          batch_size=args.batch_size,
                          num_nodes=args.num_nodes,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate,
                          dropout=args.dropout,
                          epochs=args.epochs,
                          seed=args.seed)

    logger.info('Loading data...')
    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=args.max_features,
                                                  skip_top=args.skip_top,
コード例 #8
0
                        help='Top occurring words to skip')
    parser.add_argument('--maxlen', type=int, default=100)
    parser.add_argument('--embedding_size', type=int, default=128)
    parser.add_argument('--pool_size', type=int, default=4)
    parser.add_argument('--kernel_size', type=int, default=5)
    parser.add_argument('--filters', type=int, default=64)
    parser.add_argument('--lstm_output_size', type=int, default=70)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--optimizer', type=str, default='adam')
    parser.add_argument('--log_learning_rate', type=int, default=-3)
    parser.add_argument('--epochs', type=int, default=1)
    args = parser.parse_args()

    # Polyaxon
    experiment = Run(project='cnn-lstm')
    experiment.create(framework='keras', tags=['examples'])
    experiment.log_inputs(max_features=args.max_features,
                          skip_top=args.skip_top,
                          maxlen=args.maxlen,
                          epochs=args.epochs,
                          embedding_size=args.embedding_size,
                          pool_size=args.pool_size,
                          kernel_size=args.kernel_size,
                          filters=args.filters,
                          lstm_output_size=args.lstm_output_size,
                          batch_size=args.batch_size,
                          optimizer=args.optimizer,
                          log_learning_rate=args.log_learning_rate)

    logger.info('Loading data...')
    (x_train, y_train), (x_test,