def __init__(self, image_nr, image_resize, model_name):
     super().__init__()
     self.model_name = model_name
     self.ctx = neptune.Context()
     self.epoch_loss_averager = Averager()
     self.image_nr = image_nr
     self.image_resize = image_resize
示例#2
0
 def __init__(self, min_lr=1e-8, multipy_factor=1.05, add_factor=0.0):
     super().__init__()
     self.ctx = neptune.Context()
     self.ctx.channel_reset('Learning Rate Finder')
     self.min_lr = min_lr
     self.multipy_factor = multipy_factor
     self.add_factor = add_factor
示例#3
0
 def __init__(self, metric_name, minimize, reduce_factor, reduce_patience, min_lr):
     super().__init__()
     self.ctx = neptune.Context()
     self.ctx.channel_reset('Learning Rate')
     self.metric_name = metric_name
     self.minimize = minimize
     self.reduce_factor = reduce_factor
     self.reduce_patience = reduce_patience
     self.min_lr = min_lr
示例#4
0
 def __init__(self, image_nr, image_resize, image_every, model_name,
              use_depth):
     super().__init__()
     self.model_name = model_name
     self.ctx = neptune.Context()
     self.epoch_loss_averager = Averager()
     self.image_resize = image_resize
     self.image_every = image_every
     self.image_nr = image_nr
     self.use_depth = use_depth
示例#5
0
    def __init__(self, number_of_batches_per_full_cycle, max_lr, enabled=1, momentum_range=(0.95, 0.8),
                 prcnt_annihilate=10,
                 div=10):
        super().__init__()

        self.enabled = enabled
        self.number_of_batches_per_full_cycle = number_of_batches_per_full_cycle
        self.max_lr = max_lr
        self.momentum_range = momentum_range
        self.prcnt_annihilate = prcnt_annihilate
        self.div = div
        self.ctx = neptune.Context()
def main():
    """
    Load data and train a model on it.
    """
    context = neptune.Context()
    context.integrate_with_tensorflow()
    final_train_channel = context.create_channel('final_train_accuracy',
                                                 neptune.ChannelType.NUMERIC)
    final_test_channel = context.create_channel('final_test_accuracy',
                                                neptune.ChannelType.NUMERIC)
    args = neptune_args(context)
    print('args:\n', args)
    random.seed(args.seed)

    train_set, test_set = split_dataset(read_dataset(args.omniglot_src))
    train_set = list(augment_dataset(train_set))
    test_set = list(test_set)

    model = ProgressiveOmniglotModel(args.classes, **model_kwargs(args))

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = args.allow_growth
    with tf.Session(config=config) as sess:
        if not args.pretrained:
            print('Training...')
            train(sess, model, train_set, test_set, args.checkpoint,
                  **train_kwargs(args))
        else:
            print('Restoring from checkpoint...')
            tf.train.Saver().restore(
                sess, tf.train.latest_checkpoint(args.checkpoint))

        print('Evaluating...')
        eval_kwargs = evaluate_kwargs(args)

        final_train_accuracy = evaluate(sess, model, train_set, **eval_kwargs)
        print('final_train_accuracy:', final_train_accuracy)
        final_train_channel.send(final_train_accuracy)

        final_test_accuracy = evaluate(sess, model, test_set, **eval_kwargs)
        print('final_test_accuracy:', final_test_accuracy)
        final_test_channel.send(final_test_accuracy)
from attrdict import AttrDict
import neptune
import numpy as np
import pandas as pd
from sklearn.externals import joblib
from steppy.base import Step, IdentityOperation
from steppy.adapter import Adapter, E

from common_blocks import augmentation as aug
from common_blocks import metrics
from common_blocks import models
from common_blocks import pipelines
from common_blocks import utils
from common_blocks import postprocessing

CTX = neptune.Context()
LOGGER = utils.init_logger()

#    ______   ______   .__   __.  _______  __    _______      _______.
#   /      | /  __  \  |  \ |  | |   ____||  |  /  _____|    /       |
#  |  ,----'|  |  |  | |   \|  | |  |__   |  | |  |  __     |   (----`
#  |  |     |  |  |  | |  . `  | |   __|  |  | |  | |_ |     \   \
#  |  `----.|  `--'  | |  |\   | |  |     |  | |  |__| | .----)   |
#   \______| \______/  |__| \__| |__|     |__|  \______| |_______/
#

EXPERIMENT_DIR = '/output/experiment'
CLONE_EXPERIMENT_DIR_FROM = ''  # When running eval in the cloud specify this as for example /input/SAL-14/output/experiment
OVERWRITE_EXPERIMENT_DIR = False
DEV_MODE = False
示例#8
0
 def __init__(self):
     super().__init__()
     self.ctx = neptune.Context()
     self.best_loss = None
import neptune

ctx = neptune.Context()


def neptune_send_plot(logs):
    epoch_data = logs[-1]
    for metrics, value in epoch_data.items():
        ctx.channel_send(name=metrics, y=value)