Exemple #1
0
import flor
log = flor.log


@flor.track
def fib(idx):
    fib = {}
    fib[log.param(0)] = log.metric(0)
    fib[log.param(1)] = log.metric(1)
    fib[log.param(2)] = log.metric(2)

    for i in range(3, idx + 1):
        fib[log.param(i)] = log.metric(fib[i - 1] + fib[i - 2])


with flor.Context('fib'):
    fib(5)
Exemple #2
0
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            if (i + 1) % 100 == 0:
                log.metric(epoch)
                log.metric(i)
                log.metric(loss.item())

    # Test the model
    # In test phase, we don't need to compute gradients (for memory efficiency)
    with torch.no_grad():
        correct = 0
        total = 0
        for images, labels in test_loader:
            images = images.reshape(-1, 28 * 28).to(device)
            labels = labels.to(device)
            outputs = model(images)
            _, predicted = torch.max(outputs.data, 1)
            total += labels.size(0)
            correct += (predicted == labels).sum().item()

            acc = 100 * (correct / total)
            log.metric(acc)

    print('Accuracy of the network on the 10000 test images: {} %'.format(acc))


with flor.Context('pytorch_demo_nn'):
    main()
Exemple #3
0
import flor
from sklearn import datasets
from sklearn import svm
from sklearn.model_selection import train_test_split

log = flor.log


@flor.track
def fit_and_score_model(gamma, C, test_size, random_state):

    iris = datasets.load_iris()
    X_tr, X_te, y_tr, y_te = train_test_split(iris.data, iris.target,
                                                  test_size=log.param(test_size),
                                                  random_state=log.param(random_state))

    clf = svm.SVC(gamma=log.param(gamma), C=log.param(C))
    clf.fit(X_tr, y_tr)

    score = log.metric(clf.score(X_te, y_te))

with flor.Context('iris'):
    fit_and_score_model(gamma=0.001, C=100.0, test_size=0.15, random_state=430)
Exemple #4
0
@flor.track
def main(x, y, z):
    # Load the Data
    movie_reviews = pd.read_json(log.read('data.json'))

    movie_reviews['rating'] = movie_reviews['rating'].map(lambda x: 0
                                                          if x < z else 1)

    # Do train/test split-
    X_tr, X_te, y_tr, y_te = train_test_split(movie_reviews['text'],
                                              movie_reviews['rating'],
                                              test_size=log.param(x),
                                              random_state=log.param(y))

    # Vectorize the English sentences
    vectorizer = TfidfVectorizer()
    vectorizer.fit(X_tr)
    X_tr = vectorizer.transform(X_tr)
    X_te = vectorizer.transform(X_te)

    # Fit the stateful
    for i in [1, 5]:
        clf = train_model(i, X_tr, y_tr)
        test_model(clf, X_te, y_te)

    the_answer_to_everything = log.param(42)


with flor.Context('basic'):
    main(0.2, 92, 5)