Exemple #1
0
def downloadArtifact(proj, exp_name, exp_run, serialization):
    client = Client("http://localhost:3000")
    proj = client.set_project(proj)
    expt = client.set_experiment(exp_name)
    run = client.set_experiment_run(exp_run)
    if serialization.lower() == 'pickle':
        run.download_model('model.pkl')
Exemple #2
0
from verta import Client
import cloudpickle

client = Client('https://dev.verta.ai')
proj = client.set_project('Demo - Jenkins+Prometheus')
run = proj.expt_runs[0]

model = run.get_model()
with open('model.pkl', 'wb') as f:
    cloudpickle.dump(model, f)
import os
import random
import multiprocessing

from verta import Client
from verta.utils import ModelAPI

os.environ['VERTA_EMAIL'] = '*****@*****.**'
os.environ['VERTA_DEV_KEY'] = '3e078522-e479-4cd2-b78c-04ffcacae3f4'

HOST = "dev.verta.ai"
EXPERIMENT_NAME = "Scaling"

client = Client(HOST)
proj = client.set_project('Scaling Test 100 jobs of 500k models')
expt = client.set_experiment(EXPERIMENT_NAME)

# Hyperparam random choice of values
c_list = [0.0001, 0.0002, 0.0004]
solver_list = ['lgfgs', 'grad']
max_iter_list = [7, 15, 28]

# results into 30 metric or hyp keys
paramKeyLimit = 10


def getMetrics(key_limit):
    metric_obj = {}
    for i in range(key_limit):
        metric_obj['val_acc' + str(i)] = random.uniform(0.5, 0.9)
        metric_obj['loss' + str(i)] = random.uniform(0.6, 0.8)
Exemple #4
0
from verta import Client
from verta.utils import ModelAPI

HOST = "http://localhost:3009"
PROJECT_NAME = "readmission_shared_data_preprocess_v0"
EXPERIMENT_NAME = "readmission_shared_data_preprocess_v0_first_run"

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='Template Main Function')
    parser.add_argument('--input', type=str, help='input folder')
    parser.add_argument('--output', type=str, help='output folder')
    parser.add_argument('--vis', type=str, help='vis folder')
    args = parser.parse_args()

    client = Client(HOST)
    proj = client.set_project(PROJECT_NAME)
    expt = client.set_experiment(EXPERIMENT_NAME)
    run = client.set_experiment_run()

    cmd = "readmission_shared_data_preprocess_trainvalidationtest.sh"  # modify
    lib_param = {}
    lib_param["--input"] = args.input
    lib_param["--output"] = args.output
    lib_param["--vis"] = args.vis

    for k, v in lib_param.items():
        cmd = cmd + " " + str(k) + " " + str(v)

    cmd = "bash " + cmd
    print("executing cmd: \n", cmd)
    os.system(cmd)  # modify
Exemple #5
0
from nntoolbox.metrics import Accuracy, Loss
from nntoolbox.losses import SmoothedCrossEntropy

from verta import Client
from verta.client import ExperimentRun
from experii.verta import ModelDBCB
from experii.ax import AxTuner

torch.backends.cudnn.benchmark = True
EXPERIMENT_NAME = "Hyperparameter Tuning"

# Set up ModelDB:
client = Client(
    CLIENT_PARA
)  # supply your own ModelDB'S client parameters here (see VertaAI's notebooks)
proj = client.set_project("My second ModelDB project")
exp = client.set_experiment(EXPERIMENT_NAME)


# Define model generating function:
def model_fn(parameterization: Dict[str, Any]) -> nn.Module:
    model = Sequential(
        ConvolutionalLayer(in_channels=3,
                           out_channels=16,
                           kernel_size=3,
                           activation=nn.ReLU),
        ResidualBlockPreActivation(in_channels=16, activation=nn.ReLU),
        ConvolutionalLayer(in_channels=16,
                           out_channels=32,
                           kernel_size=3,
                           activation=nn.ReLU),