コード例 #1
0
def configure_neptune(specification):
    if 'NEPTUNE_API_TOKEN' not in os.environ:
        raise NeptuneAPITokenException()

    git_info = specification.get('git_info', None)
    if git_info:
        git_info.commit_date = datetime.datetime.now()

    neptune.init(project_qualified_name=specification['project'])

    properties = {'pwd': os.getcwd()}
    neptune.create_experiment(name=specification['name'],
                              tags=specification['tags'],
                              params=specification['parameters'],
                              properties=properties,
                              git_info=git_info)
    atexit.register(neptune.stop)

    def connect_to_neptune_experiment_add_logger(project_id, experiment_id):
        neptune.init(project_id)
        exp = neptune.project.get_experiments(id=experiment_id)[0]
        metric_logging.register_logger(NeptuneLogger(exp))

    worker_utils.register_init_hook(
        functools.partial(
            connect_to_neptune_experiment_add_logger,
            project_id=neptune.project.full_id,
            experiment_id=neptune.get_experiment().id,
        ))

    return NeptuneLogger(neptune.get_experiment())
コード例 #2
0
def configure_neptune(experiment: Experiment, cluster_config=None):
    """Configures the Neptune experiment, then returns the Neptune logger."""
    if 'NEPTUNE_API_TOKEN' not in os.environ:
        raise NeptuneAPITokenException()

    neptune.init(project_qualified_name=experiment.project_qualified_name)
    # Set pwd property with path to experiment.
    properties = {'pwd': os.getcwd()}
    with Capturing() as neptune_link:
        neptune.create_experiment(name=experiment.name,
                                  tags=experiment.tags,
                                  params=experiment.params,
                                  description=experiment.description,
                                  properties=properties,
                                  upload_stdout=False)
    atexit.register(neptune.stop)

    # Add hook for Ray workers to make  them connect with appropriate neptune
    # experiment and set neptune logger.
    def connect_to_neptune_experiment_add_logger(project_id, experiment_id):
        neptune.init(project_id)
        exp = neptune.project.get_experiments(id=experiment_id)[0]
        metric_logging.register_logger(NeptuneLogger(exp))

    ray.register_worker_init_hook(
        functools.partial(
            connect_to_neptune_experiment_add_logger,
            project_id=neptune.project.full_id,
            experiment_id=neptune.get_experiment().id,
        ))

    return NeptuneLogger(neptune.get_experiment()), neptune_link[0]
コード例 #3
0
ファイル: neptuneplus.py プロジェクト: denised/LandCover
    def on_train_begin(self, metrics_names, **kwargs):  # pylint: disable=arguments-differ
        # check to see if the user has turned neptune off
        if getattr(self.learn, 'do_neptune', True) == False:
            return

        try:  # if the caller has already created an experiment, use that instead.
            self.exp = neptune.get_experiment()
        except neptune.exceptions.NoExperimentContext:
            # we normally expect to end up here.
            # get the parameters of this training to pass to neptune (depends on TrainTracker to have set them)
            p = self.learn.parameters
            name = p.get('train_id', "none")
            params = {}
            # pass a subset of the parameters to Neptune
            for k in ['arch', 'loss_func', 'parameters', 'machine']:
                if k in p: params[k] = p[k]
            description = p.get('description', "")
            try:
                self.exp = neptune.create_experiment(name=name,
                                                     description=description,
                                                     params=params,
                                                     upload_source_files=[],
                                                     upload_stdout=False,
                                                     upload_stderr=False)
                self.own_exp = True
            except neptune.exceptions.Uninitialized:
                _logger.warn(
                    "Neptune not initialized; no tracing will be done")

        # This would not work in regular fastai, because metrics_names is not updated with additional names
        # But in our case, we use this inside a CycleHandler, which does update them.
        self.metrics_names = ['train_loss'] + metrics_names
コード例 #4
0
    def before_fit(self):
        try:
            self.experiment = neptune.get_experiment()
        except ValueError:
            print(
                'No active experiment. Please invoke `neptune.create_experiment()` before this callback.'
            )

        try:
            self.experiment.set_property('n_epoch', str(self.learn.n_epoch))
            self.experiment.set_property('model_class',
                                         str(type(self.learn.model)))
        except:
            print(
                f'Did not log all properties. Check properties in the {neptune.get_experiment()}.'
            )

        try:
            with tempfile.NamedTemporaryFile(mode='w') as f:
                with open(f.name, 'w') as g:
                    g.write(repr(self.learn.model))
                self.experiment.log_artifact(f.name, 'model_summary.txt')
        except:
            print(
                'Did not log model summary. Check if your model is PyTorch model.'
            )

        if self.log_model_weights and not hasattr(self.learn, 'save_model'):
            print(
                'Unable to log model to Neptune.\n',
                'Use "SaveModelCallback" to save model checkpoints that will be logged to Neptune.'
            )
コード例 #5
0
ファイル: designer.py プロジェクト: befeltingu/VikingZero
 def load_exp_id(self):
     if self._run:
         return self._run.id
     else:
         try:
             return neptune.get_experiment().id
         except:
             return None
コード例 #6
0
ファイル: designer.py プロジェクト: befeltingu/VikingZero
    def init_neptune(self):
        print("INIT NEPTUNE")
        neptune_api_token = self._exp_config["neptune_api_token"]
        neptune_name = self._exp_config["neptune_name"]
        exp_name = self._exp_config["exp_name"]

        data = {
            "agent_config": self._agent_config,
            "exp_config": self._exp_config
        }

        neptune.init(neptune_name, api_token=neptune_api_token)

        neptune.create_experiment(exp_name, params=data)

        self.exp_id = neptune.get_experiment().id

        return neptune.get_experiment()
コード例 #7
0
    def modify_tags(self):
        neptune.append_tags("tag1")
        neptune.append_tag(["tag2_to_remove", "tag3"])
        neptune.remove_tag("tag2_to_remove")
        neptune.remove_tag("tag4_remove_non_existing")

        exp = neptune.get_experiment()
        assert set(exp.get_tags()) == {
            "initial tag 1", "initial tag 2", "tag1", "tag3"
        }
コード例 #8
0
    def handle_directories(self):
        exp = neptune.get_experiment()

        # download_artifacts
        neptune.send_artifact(self.data_dir)
        if self._api_version == 1:
            with self.with_check_if_file_appears("output.zip"):
                exp.download_artifacts()
        else:
            with self.with_assert_raises(
                    DownloadArtifactsUnsupportedException):
                exp.download_artifacts()

        # create some nested artifacts
        neptune.log_artifact(self.img_path,
                             destination="main dir/sub dir/art1")
        neptune.log_artifact(self.img_path,
                             destination="main dir/sub dir/art2")
        neptune.log_artifact(self.img_path,
                             destination="main dir/sub dir/art3")

        # downloading artifact - download_artifact
        # non existing artifact
        if self._api_version == 1:
            with self.with_assert_raises(FileNotFound):
                exp.download_artifact("main dir/sub dir/art100")
        else:
            with self.with_assert_raises(DownloadArtifactUnsupportedException):
                exp.download_artifact("main dir/sub dir/art100")
        # artifact directories
        if self._api_version == 1:
            with self.with_assert_raises(HTTPError):
                exp.download_artifact("main dir/sub dir")
        else:
            with self.with_assert_raises(DownloadArtifactUnsupportedException):
                exp.download_artifact("main dir/sub dir")

        # deleting artifacts
        neptune.delete_artifacts("main dir/sub dir/art1")

        # delete non existing artifact
        if self._api_version == 1:
            neptune.delete_artifacts("main dir/sub dir/art100")
        else:
            with self.with_assert_raises(
                    DeleteArtifactUnsupportedInAlphaException):
                neptune.delete_artifacts("main dir/sub dir/art100")

        # delete dir
        if self._api_version == 1:
            neptune.delete_artifacts("main dir/sub dir")
        else:
            with self.with_assert_raises(
                    DeleteArtifactUnsupportedInAlphaException):
                neptune.delete_artifacts("main dir/sub dir")
コード例 #9
0
def start_experiment(params: dict) -> Tuple[str, str]:
    api_token = 'eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vYXBwLm5lcHR1bmUuYWkiLCJhcGlfdXJsIjoiaHR0cHM6Ly9hcHAubmVwdHVuZS5haSIsImFwaV9rZXkiOiI4ZTBhNWNhZS0xNTNkLTQ2NjktODI0ZC1kOTAyMzhmNzllNDAifQ=='
    neptune.init('flianza/laboratorio-1', api_token=api_token)
    neptune.create_experiment('tuning', params=params)

    experiment_number = neptune.get_experiment().id

    experiments_folder = '../experimentos'
    experiment_path = f'{experiments_folder}/{experiment_number}'
    if not Path(experiments_folder).exists():
        os.mkdir(experiments_folder)
    os.mkdir(experiment_path)
    return experiment_number, f'{experiment_path}/{experiment_number}'
コード例 #10
0
def _validate_experiment(experiment):
    if experiment is not None:
        if not isinstance(experiment, neptune.experiments.Experiment):
            ValueError(
                'Passed experiment is not Neptune experiment. Create one by using "create_experiment()"'
            )
    else:
        try:
            experiment = neptune.get_experiment()
        except neptune.exceptions.NeptuneNoExperimentContextException:
            raise neptune.exceptions.NeptuneNoExperimentContextException()

    return experiment
コード例 #11
0
def configure_neptune(specification):
    """Configures the Neptune experiment, then returns the Neptune logger."""
    if 'NEPTUNE_API_TOKEN' not in os.environ:
        raise NeptuneAPITokenException()

    git_info = specification.get('git_info', None)
    if git_info:
        git_info.commit_date = datetime.datetime.now()

    neptune.init(project_qualified_name=specification['project'])
    # Set pwd property with path to experiment.
    properties = {'pwd': os.getcwd()}
    neptune.create_experiment(name=specification['name'],
                              tags=specification['tags'],
                              params=specification['parameters'],
                              properties=properties,
                              git_info=git_info)
    atexit.register(neptune.stop)

    # Add hook for Ray workers to make  them connect with appropriate neptune
    # experiment and set neptune logger.
    def connect_to_neptune_experiment_add_logger(project_id, experiment_id):
        neptune.init(project_id)
        exp = neptune.project.get_experiments(
            id=experiment_id
        )[0]
        metric_logging.register_logger(NeptuneLogger(exp))

    ray.register_worker_init_hook(
        functools.partial(
            connect_to_neptune_experiment_add_logger,
            project_id=neptune.project.full_id,
            experiment_id=neptune.get_experiment().id,
        )
    )

    return NeptuneLogger(neptune.get_experiment())
コード例 #12
0
    def handle_files_and_images(self):
        # image
        # `image_name` and `description` will be lost (`send_image` the same as `log_image`)
        neptune.send_image("image",
                           self.img_path,
                           name="name",
                           description="desc")

        # artifact with default dest
        neptune.send_artifact(self.text_file_path)
        exp = neptune.get_experiment()
        with self.with_check_if_file_appears("text.txt"):
            exp.download_artifact("text.txt")
        with self.with_check_if_file_appears("custom_dest/text.txt"):
            exp.download_artifact("text.txt", "custom_dest")

        # artifact with custom dest
        neptune.send_artifact(self.text_file_path, destination="something.txt")
        exp = neptune.get_experiment()
        with self.with_check_if_file_appears("something.txt"):
            exp.download_artifact("something.txt")
        with self.with_check_if_file_appears("custom_dest/something.txt"):
            exp.download_artifact("something.txt", "custom_dest")

        # destination dirs
        neptune.log_artifact(self.text_file_path,
                             destination="dir/text file artifact")
        neptune.log_artifact(self.text_file_path,
                             destination="dir/artifact_to_delete")

        # deleting
        neptune.delete_artifacts("dir/artifact_to_delete")

        # streams
        with open(self.text_file_path, mode="r") as f:
            neptune.send_artifact(f, destination="file stream.txt")
コード例 #13
0
    def __init__(self):
        super().__init__()
        neptune.init()
        neptune.create_experiment(
            name="const project name",
            description="exp description",
            params=self.params,
            properties=self.properties,
            tags=["initial tag 1", "initial tag 2"],
            abort_callback=None,
            run_monitoring_thread=False,
            hostname="hostname value",
            # notebook_id='test1',  # TODO: Error 500 when wrong value
            upload_source_files="alpha_integration_dev/*.py",
        )

        exp = neptune.get_experiment()

        self._api_version = get_api_version(exp)

        properties = exp.get_properties()
        assert properties["init_text_property"] == "some text"
        assert properties["init_number property"] == "42"
        assert properties["init_list"] == "[1, 2, 3]"

        assert set(exp.get_tags()) == {"initial tag 1", "initial tag 2"}

        # download sources
        if self._api_version == 1:
            # old domain

            with self.with_check_if_file_appears("old_client.py.zip"):
                exp.download_sources("alpha_integration_dev/old_client.py")
            with self.with_check_if_file_appears("alpha_integration_dev.zip"):
                exp.download_sources("alpha_integration_dev")

            with self.with_assert_raises(FileNotFound):
                exp.download_sources("non_existing")
        else:
            # new api

            with self.with_check_if_file_appears("files.zip"):
                exp.download_sources()
            with self.with_assert_raises(DownloadSourcesException):
                exp.download_sources("whatever")
            with self.with_check_if_file_appears("file_set_sources/files.zip"):
                exp.download_sources(destination_dir="file_set_sources")
コード例 #14
0
ファイル: designer.py プロジェクト: befeltingu/VikingZero
    def load_logger(self):

        logger_type = self._exp_config["logger_type"]

        if type(logger_type) == str:
            if logger_type == "neptune":
                self.init_neptune()
                self._run = neptune.get_experiment()

            elif logger_type == "tensorboard":
                self._run = self.init_tensorboard()

            else:
                raise Exception(f"Unknown logger type {logger_type} given")

        else:
            print(f"Running without logger {logger_type} passed")
            return
コード例 #15
0
def configure_neptune(specification):
    """Configures the Neptune experiment, then returns the Neptune logger."""
    if 'NEPTUNE_API_TOKEN' not in os.environ:
        raise KeyError('NEPTUNE_API_TOKEN environment variable is not set!')

    git_info = specification.get('git_info', None)
    if git_info:
        git_info.commit_date = datetime.datetime.now()

    neptune.init(project_qualified_name=specification['project'])
    # Set pwd property with path to experiment.
    properties = {'pwd': os.getcwd()}
    neptune.create_experiment(name=specification['name'],
                              tags=specification['tags'],
                              params=specification['parameters'],
                              properties=properties,
                              git_info=git_info)
    atexit.register(neptune.stop)

    return NeptuneLogger(neptune.get_experiment())
コード例 #16
0
    def modify_properties(self):
        neptune.set_property("prop", "some text")
        neptune.set_property("prop_number", 42)
        neptune.set_property("nested/prop", 42)
        neptune.set_property("prop_to_del", 42)
        neptune.set_property("prop_list", [1, 2, 3])
        with open(self.text_file_path, mode="r") as f:
            neptune.set_property("prop_IO", f)
        neptune.set_property("prop_datetime", datetime.now())
        neptune.remove_property("prop_to_del")

        exp = neptune.get_experiment()
        properties = exp.get_properties()
        assert properties["prop"] == "some text"
        assert properties["prop_number"] == "42"
        assert properties["nested/prop"] == "42"
        assert properties["prop_list"] == "[1, 2, 3]"
        assert "prop_to_del" not in properties
        assert (properties["prop_IO"] ==
                "<_io.TextIOWrapper name='alpha_integration_dev/data/text.txt'"
                " mode='r' encoding='UTF-8'>")
        print(f"Properties: {properties}")
コード例 #17
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')
    if FLAGS.run_mode == 'actor':
        if not FLAGS.is_local:
            get_configuration(config_file=FLAGS.mrunner_config,
                              inject_parameters_to_FLAGS=True)
        actor.actor_loop(env.create_environment)
    elif FLAGS.run_mode == 'learner':
        if not FLAGS.is_local:
            get_configuration(config_file=FLAGS.mrunner_config,
                              print_diagnostics=True,
                              with_neptune=True,
                              inject_parameters_to_FLAGS=True)
            experiment = neptune.get_experiment()
            experiment.append_tag(tag=FLAGS.nonce)
            neptune_tensorboard.integrate_with_tensorflow()
        learner.learner_loop(env.create_environment, create_agent,
                             create_optimizer)
    elif FLAGS.run_mode == 'visualize':
        visualize.visualize(env.create_environment, create_agent,
                            create_optimizer)
    else:
        raise ValueError('Unsupported run mode {}'.format(FLAGS.run_mode))
コード例 #18
0
    def log_series(self):
        # floats
        neptune.log_metric("m1", 1)
        neptune.log_metric("m1", 2)
        neptune.log_metric("m1", 3)
        neptune.log_metric("m1", 2)
        neptune.log_metric("nested/m1", 1)

        # texts
        neptune.log_text("m2", "a")
        neptune.log_text("m2", "b")
        neptune.log_text("m2", "c")

        # images
        # `image_name` and `description` will be lost
        neptune.log_image("g_img",
                          self.img_path,
                          image_name="name",
                          description="desc")
        neptune.log_image("g_img", self.img_path)

        # see what we've logged
        logs = neptune.get_experiment().get_logs()
        print(f"Logs: {logs}")
コード例 #19
0
# Step 1 - Initialize Neptune

import neptune

neptune.init(project_qualified_name='shared/onboarding', # change this to your `workspace_name/project_name`
             api_token='ANONYMOUS', # change this to your api token
            )

# Step 2 - Create an experiment

neptune.create_experiment()

# Step 3 - Log metrics during training

import numpy as np
from time import sleep

neptune.log_metric('single_metric', 0.62)

for i in range(100):
    sleep(0.2) # to see logging live
    neptune.log_metric('random_training_metric', i * np.random.random())
    neptune.log_metric('other_random_training_metric', 0.5 * i * np.random.random())

# tests
current_exp = neptune.get_experiment()

correct_logs = ['single_metric', 'random_training_metric', 'other_random_training_metric']

if set(current_exp.get_logs().keys()) != set(correct_logs):
    raise ValueError()
コード例 #20
0
neptune.init('shared/sklearn-integration', api_token='ANONYMOUS')

## Step 3: Create an Experiment

neptune.create_experiment(params=parameters,
                          name='regression-example',
                          tags=['RandomForestRegressor', 'regression'])

## Step 4: Log regressor summary

from neptunecontrib.monitoring.sklearn import log_regressor_summary

log_regressor_summary(rfr, X_train, X_test, y_train, y_test)

# tests
exp = neptune.get_experiment()

# check logs
correct_logs_set = {
    'evs_test_sklearn', 'me_test_sklearn', 'mae_test_sklearn',
    'r2_test_sklearn', 'charts_sklearn'
}
from_exp_logs = set(exp.get_logs().keys())
assert correct_logs_set == from_exp_logs, '{} - incorrect logs'.format(exp)

# check sklearn parameters
assert set(exp.get_properties().keys()) == set(
    rfr.get_params().keys()), '{} parameters do not match'.format(exp)

# check neptune parameters
assert set(exp.get_parameters().keys()) == set(
コード例 #21
0
ファイル: xgboost.py プロジェクト: neptune-ai/neptune-contrib
def neptune_callback(log_model=True,
                     log_importance=True,
                     max_num_features=None,
                     log_tree=None,
                     experiment=None,
                     **kwargs):
    """XGBoost callback for Neptune experiments.

    This is XGBoost callback that automatically logs training and evaluation metrics, feature importance chart,
    visualized trees and trained Booster to Neptune.

    Check Neptune documentation for the `full example <https://docs.neptune.ai/integrations/xgboost.html>`_.

    Make sure you created an experiment before you start XGBoost training using ``neptune.create_experiment()``
    (`check our docs <https://docs.neptune.ai/api-reference/neptune/projects/index.html
    #neptune.projects.Project.create_experiment>`_).

    You need to install graphviz and graphviz Python interface for ``log_tree`` feature to work.
    Check `Graphviz <https://graphviz.org/download/>`_ and
    `Graphviz Python interface <https://graphviz.readthedocs.io/en/stable/manual.html#installation>`_
    for installation info.

    Integration works with ``xgboost>=1.2.0``.

    Tip:
        Use this `Google Colab <https://colab.research.google.com//github/neptune-ai/neptune-examples/blob/master/
        integrations/xgboost/docs/Neptune-XGBoost.ipynb>`_
        run it as a "`neptuner`" user - zero setup, it just works.

    Note:
        If you use early stopping, make sure to log model, feature importance and trees on your own.
        Neptune logs these artifacts only after last iteration, which you may not reach because of early stop.

    Args:
        log_model (:obj:`bool`, optional, default is ``True``):
            | Log booster to Neptune after last boosting iteration.
            | If you run xgb.cv, log booster for all folds.
        log_importance (:obj:`bool`, optional, default is ``True``):
            | Log feature importance to Neptune as image after last boosting iteration.
            | Specify number of features using ``max_num_features`` parameter below.
            | If you run xgb.cv, log feature importance for each folds' booster.
        max_num_features (:obj:`int`, optional, default is ``None``):
            | Plot top ``max_num_features`` features on the importance plot.
            | If ``None``, plot all features.
        log_tree (:obj:`list` of :obj:`int`, optional, default is ``None``):
            | Log specified trees to Neptune as images after last boosting iteration.
            | If you run xgb.cv, log specified trees for each folds' booster.
            | Default is ``None`` - do not log any tree.
        experiment (:obj:`neptune.experiments.Experiment`, optional, default is ``None``):
            | For advanced users only. Pass Neptune ``Experiment``
              object if you want to control to which experiment data is logged.
            | If ``None``, log to currently active, and most recent experiment.
        kwargs:
            Parametrize XGBoost functions used in this callback:
            `xgboost.plot_importance <https://xgboost.readthedocs.io/en/latest/python/python_api.html
            ?highlight=plot_tree#xgboost.plot_importance>`_
            and `xgboost.to_graphviz <https://xgboost.readthedocs.io/en/latest/python/python_api.html
            ?highlight=plot_tree#xgboost.to_graphviz>`_.

    Returns:
        :obj:`callback`, function that you can pass directly to the XGBoost callbacks list, for example to the
        ``xgboost.cv()``
        (`see docs <https://xgboost.readthedocs.io/en/latest/python/python_api.html?highlight=plot_tree#xgboost.cv>`_)
        or ``XGBClassifier.fit()``
        (`check docs <https://xgboost.readthedocs.io/en/latest/python/python_api.html?highlight=plot_tree
        #xgboost.XGBClassifier.fit>`_).

    Examples:
        ``xgb.train`` examples

        .. code:: python3

            # basic usage
            xgb.train(param, dtrain, num_round, watchlist,
                      callbacks=[neptune_callback()])

            # do not log model
            xgb.train(param, dtrain, num_round, watchlist,
                      callbacks=[neptune_callback(log_model=False)])

            # log top 5 features' importance chart
            xgb.train(param, dtrain, num_round, watchlist,
                      callbacks=[neptune_callback(max_num_features=5)])

        ``xgb.cv`` examples

        .. code:: python3

            # log 5 trees per each folds' booster
            xgb.cv(param, dtrain, num_boost_round=num_round, nfold=7,
                   callbacks=neptune_callback(log_tree=[0,1,2,3,4]))

            # log only metrics
            xgb.cv(param, dtrain, num_boost_round=num_round, nfold=7,
                   callbacks=[neptune_callback(log_model=False,
                                               log_importance=False,
                                               max_num_features=None,
                                               log_tree=None)])

            # log top 3 features per each folds' booster and first tree
            xgb.cv(param, dtrain, num_boost_round=num_round, nfold=7,
                   callbacks=[neptune_callback(log_model=False,
                                               max_num_features=3,
                                               log_tree=[0,])])

        ``sklearn`` API examples

        .. code:: python3

            # basic usage with early stopping
            xgb.XGBRegressor().fit(X_train, y_train,
                                   early_stopping_rounds=10,
                                   eval_metric=['mae', 'rmse', 'rmsle'],
                                   eval_set=[(X_test, y_test)],
                                   callbacks=[neptune_callback()])

            # do not log model
            clf = xgb.XGBRegressor()
            clf.fit(X_train, y_train,
                    eval_metric=['mae', 'rmse', 'rmsle'],
                    eval_set=[(X_test, y_test)],
                    callbacks=[neptune_callback(log_model=False)])
            y_pred = clf.predict(X_test)

            # log 8 trees
            reg = xgb.XGBRegressor(**params)
            reg.fit(X_train, y_train,
                    eval_metric=['mae', 'rmse', 'rmsle'],
                    eval_set=[(X_test, y_test)],
                    callbacks=[neptune_callback(log_tree=[0,1,2,3,4,5,6,7])])
    """
    if experiment:
        _exp = experiment
    else:
        try:
            neptune.get_experiment()
            _exp = neptune
        except neptune.exceptions.NeptuneNoExperimentContextException:
            raise neptune.exceptions.NeptuneNoExperimentContextException()

    expect_not_a_run(_exp)

    assert isinstance(log_model, bool),\
        'log_model must be bool, got {} instead. Check log_model parameter.'.format(type(log_model))
    assert isinstance(log_importance, bool),\
        'log_importance must be bool, got {} instead. Check log_importance parameter.'.format(type(log_importance))
    if max_num_features is not None:
        assert isinstance(max_num_features, int),\
            'max_num_features must be int, got {} instead. ' \
            'Check max_num_features parameter.'.format(type(max_num_features))
    if log_tree is not None:
        if isinstance(log_tree, tuple):
            log_tree = list(log_tree)
        assert isinstance(log_tree, list),\
            'log_tree must be list of int, got {} instead. Check log_tree parameter.'.format(type(log_tree))

    def callback(env):
        # Log metrics after iteration
        for item in env.evaluation_result_list:
            if len(item) == 2:  # train case
                _exp.log_metric(item[0], item[1])
            if len(item) == 3:  # cv case
                _exp.log_metric('{}-mean'.format(item[0]), item[1])
                _exp.log_metric('{}-std'.format(item[0]), item[2])

        # Log booster, end of training
        if env.iteration + 1 == env.end_iteration and log_model:
            if env.cvfolds:  # cv case
                for i, cvpack in enumerate(env.cvfolds):
                    _log_model(cvpack.bst, 'cv-fold-{}-bst.model'.format(i),
                               _exp)
            else:  # train case
                _log_model(env.model, 'bst.model', _exp)

        # Log feature importance, end of training
        if env.iteration + 1 == env.end_iteration and log_importance:
            if env.cvfolds:  # cv case
                for i, cvpack in enumerate(env.cvfolds):
                    _log_importance(cvpack.bst,
                                    max_num_features,
                                    _exp,
                                    title='cv-fold-{}'.format(i),
                                    **kwargs)
            else:  # train case
                _log_importance(env.model, max_num_features, _exp, **kwargs)

        # Log trees, end of training
        if env.iteration + 1 == env.end_iteration and log_tree:
            if env.cvfolds:
                for j, cvpack in enumerate(env.cvfolds):
                    _log_trees(cvpack.bst, log_tree,
                               'trees-cv-fold-{}'.format(j), _exp, **kwargs)
            else:
                _log_trees(env.model, log_tree, 'trees', _exp, **kwargs)

    return callback