def create_experiment(self, name=None, framework=None, tags=None, description=None, config=None): experiment = Experiment(project=self.project, group_id=self.group_id, client=self.client, track_logs=self.track_logs, track_code=self.track_code, track_env=self.track_env, outputs_store=self.outputs_store) experiment.create(name=name, framework=framework, tags=tags, description=description, config=config, base_outputs_path=self.base_outputs_path) return experiment
) parser.add_argument( '--batch_size', type=int, default=100 ) parser.add_argument( '--epochs', type=int, default=1 ) args = parser.parse_args() # Polyaxon experiment = Experiment('mnist') experiment.create(framework='tensorflow', tags=['examples']) experiment.log_params( conv1_size=args.conv1_size, conv1_out=args.conv1_out, conv1_activation=args.conv1_activation, pool1_size=args.pool1_size, conv2_size=args.conv2_size, conv2_out=args.conv2_out, conv2_activation=args.conv2_activation, pool2_size=args.pool2_size, fc1_activation=args.fc1_activation, fc1_size=args.fc1_size, optimizer=args.optimizer, log_learning_rate=args.log_learning_rate, batch_size=args.batch_size, dropout=args.dropout,
type=int, default=30, help='Top occurring words to skip') parser.add_argument('--maxlen', type=int, default=100) parser.add_argument('--batch_size', type=int, default=32) parser.add_argument('--num_nodes', type=int, default=8) parser.add_argument('--optimizer', type=str, default='adam') parser.add_argument('--log_learning_rate', type=int, default=-3) parser.add_argument('--dropout', type=float, default=0.8) parser.add_argument('--epochs', type=int, default=1) parser.add_argument('--seed', type=int, default=234) args = parser.parse_args() # Polyaxon experiment = Experiment('bidirectional-lstm') experiment.create(framework='keras', tags=['examples']) experiment.log_params(max_features=args.max_features, skip_top=args.skip_top, maxlen=args.maxlen, batch_size=args.batch_size, num_nodes=args.num_nodes, optimizer=args.optimizer, log_learning_rate=args.log_learning_rate, dropout=args.dropout, epochs=args.epochs, seed=args.seed) logger.info('Loading data...') (x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=args.max_features, skip_top=args.skip_top,
h = .02 # step size in the mesh # Create color maps cmap_light = ListedColormap(['#FFAAAA', '#AAFFAA', '#AAAAFF']) cmap_bold = ListedColormap(['#FF0000', '#00FF00', '#0000FF']) for weights in ['uniform', 'distance']: # we create an instance of Neighbours Classifier and fit the data. clf = neighbors.KNeighborsClassifier(n_neighbors, weights=weights) clf.fit(X, y) y_model = clf.predict(X) model_accuracy = accuracy_score(y, y_model) experiment = Experiment() experiment.create() experiment.log_metrics(model_accuracy=model_accuracy) experiment.log_params(weights=weights, n_neighbors=n_neighbors) # Plot the decision boundary. For that, we will assign a color to each # point in the mesh [x_min, x_max]x[y_min, y_max]. x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h)) Z = clf.predict(np.c_[xx.ravel(), yy.ravel()]) # Put the result into a color plot Z = Z.reshape(xx.shape) plt.figure() plt.pcolormesh(xx, yy, Z, cmap=cmap_light)
max_features=max_features, min_samples_leaf=min_samples_leaf, ) return cross_val_score(classifier, X, y, cv=5) if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument('--n_estimators', type=int, default=3) parser.add_argument('--max_features', type=int, default=3) parser.add_argument('--min_samples_leaf', type=int, default=80) args = parser.parse_args() # Polyaxon experiment = Experiment(project='random-forest') experiment.create(framework='scikit-learn', tags=['examples']) experiment.log_params(n_estimators=args.n_estimators, max_features=args.max_features, min_samples_leaf=args.min_samples_leaf) (X, y) = load_data() # Polyaxon experiment.log_data_ref(data=X, data_name='dataset_X') experiment.log_data_ref(data=y, data_name='dataset_y') accuracies = model(X=X, y=y, n_estimators=args.n_estimators, max_features=args.max_features, min_samples_leaf=args.min_samples_leaf)
) parser.add_argument( '--log_learning_rate', type=int, default=-3 ) parser.add_argument( '--epochs', type=int, default=1 ) args = parser.parse_args() # Polyaxon experiment = Experiment(project='mnist') experiment.create(tags=['keras']) experiment.log_params(conv1_size=args.conv1_size, conv2_size=args.conv2_size, dropout=args.dropout, hidden1_size=args.hidden1_size, optimizer=args.optimizer, log_learning_rate=args.log_learning_rate, epochs=args.epochs) (x_train, y_train), (x_test, y_test) = mnist.load_data() # Polyaxon experiment.log_data_ref(data=x_train, data_name='x_train') experiment.log_data_ref(data=y_train, data_name='y_train') experiment.log_data_ref(data=x_test, data_name='x_test') experiment.log_data_ref(data=y_test, data_name='y_test')
parser.add_argument('--conv1_kernel', type=int, default=5) parser.add_argument('--conv1_filters', type=int, default=10) parser.add_argument('--conv1_activation', type=str, default='relu') parser.add_argument('--conv2_kernel', type=int, default=5) parser.add_argument('--conv2_filters', type=int, default=10) parser.add_argument('--conv2_activation', type=str, default='relu') parser.add_argument('--fc1_hidden', type=int, default=10) parser.add_argument('--fc1_activation', type=str, default='relu') parser.add_argument('--optimizer', type=str, default='adam') parser.add_argument('--log_learning_rate', type=int, default=-3) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--epochs', type=int, default=1) args = parser.parse_args() experiment = Experiment('mnist') experiment.create(tags=['mxnet']) experiment.log_params(conv1_kernel=args.conv1_kernel, conv1_filters=args.conv1_filters, conv1_activation=args.conv1_activation, conv2_kernel=args.conv1_kernel, conv2_filters=args.conv1_filters, conv2_activation=args.conv1_activation, fc1_hidden=args.fc1_hidden, fc1_activation=args.fc1_activation, optimizer=args.optimizer, log_learning_rate=args.log_learning_rate, epochs=args.epochs) logger.info('Downloading data ...') mnist = mx.test_utils.get_mnist() train_iter = mx.io.NDArrayIter(mnist['train_data'],
parser.add_argument('--conv1_kernel', type=int, default=5) parser.add_argument('--conv1_filters', type=int, default=10) parser.add_argument('--conv1_activation', type=str, default='relu') parser.add_argument('--conv2_kernel', type=int, default=5) parser.add_argument('--conv2_filters', type=int, default=10) parser.add_argument('--conv2_activation', type=str, default='relu') parser.add_argument('--fc1_hidden', type=int, default=10) parser.add_argument('--fc1_activation', type=str, default='relu') parser.add_argument('--optimizer', type=str, default='adam') parser.add_argument('--log_learning_rate', type=int, default=-3) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--epochs', type=int, default=1) args = parser.parse_args() experiment = Experiment('mnist') experiment.create(framework='mxnet', tags=['examples']) experiment.log_params(conv1_kernel=args.conv1_kernel, conv1_filters=args.conv1_filters, conv1_activation=args.conv1_activation, conv2_kernel=args.conv1_kernel, conv2_filters=args.conv1_filters, conv2_activation=args.conv1_activation, fc1_hidden=args.fc1_hidden, fc1_activation=args.fc1_activation, optimizer=args.optimizer, log_learning_rate=args.log_learning_rate, epochs=args.epochs) logger.info('Downloading data ...') mnist = mx.test_utils.get_mnist() train_iter = mx.io.NDArrayIter(mnist['train_data'],
parser.add_argument('--conv2_size', type=int, default=5) parser.add_argument('--conv2_out', type=int, default=64) parser.add_argument('--conv2_activation', type=str, default='relu') parser.add_argument('--pool2_size', type=int, default=2) parser.add_argument('--dropout', type=float, default=0.2) parser.add_argument('--fc1_size', type=int, default=1024) parser.add_argument('--fc1_activation', type=str, default='sigmoid') parser.add_argument('--optimizer', type=str, default='adam') parser.add_argument('--log_learning_rate', type=int, default=-3) parser.add_argument('--batch_size', type=int, default=100) parser.add_argument('--epochs', type=int, default=1) args = parser.parse_args() # Polyaxon experiment = Experiment('mnist') experiment.create(tags=['tensorflow']) experiment.log_params(conv1_size=args.conv1_size, conv1_out=args.conv1_out, conv1_activation=args.conv1_activation, pool1_size=args.pool1_size, conv2_size=args.conv2_size, conv2_out=args.conv2_out, conv2_activation=args.conv2_activation, pool2_size=args.pool2_size, fc1_activation=args.fc1_activation, fc1_size=args.fc1_size, optimizer=args.optimizer, log_learning_rate=args.log_learning_rate, batch_size=args.batch_size, dropout=args.dropout, epochs=args.epochs)
model.fit(X_train, y_train) pred = model.predict(X_test) return accuracy_score(pred, y_test) if __name__ == '__main__': parser = argparse.ArgumentParser() parser.add_argument('--log_learning_rate', type=int, default=-3) parser.add_argument('--max_depth', type=int, default=3) parser.add_argument('--num_rounds', type=int, default=10) parser.add_argument('--min_child_weight', type=int, default=5) args = parser.parse_args() # Polyaxon experiment = Experiment('iris') experiment.create(framework='xgboost', tags=['examples']) experiment.log_params(log_learning_rate=args.log_learning_rate, max_depth=args.max_depth, num_rounds=args.num_rounds, min_child_weight=args.min_child_weight) iris = load_iris() X = iris.data Y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) # Polyaxon experiment.log_data_ref(data=X_train, data_name='x_train') experiment.log_data_ref(data=y_train, data_name='y_train') experiment.log_data_ref(data=X_test, data_name='X_test')
) parser.add_argument( '--num_rounds', type=int, default=10 ) parser.add_argument( '--min_child_weight', type=int, default=5 ) args = parser.parse_args() # Polyaxon experiment = Experiment('iris') experiment.create(tags=['xgboost']) experiment.log_params(log_learning_rate=args.log_learning_rate, max_depth=args.max_depth, num_rounds=args.num_rounds, min_child_weight=args.min_child_weight) iris = load_iris() X = iris.data Y = iris.target X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.2) # Polyaxon experiment.log_data_ref(data=X_train, data_name='x_train') experiment.log_data_ref(data=y_train, data_name='y_train') experiment.log_data_ref(data=X_test, data_name='X_test')