Beispiel #1
0
 def log_hyperparams(self, params: Union[Dict[str, Any],
                                         Namespace]) -> None:
     try:
         params = self._convert_params(params)
         params = self._flatten_dict(params)
         for key, val in params.items():
             self.experiment.set_property(f'param__{key}', val)
     except:
         neptune.init(api_token=self.api_key,
                      project_qualified_name=self.project_name)
         params = self._convert_params(params)
         params = self._flatten_dict(params)
         for key, val in params.items():
             self.experiment.set_property(f'param__{key}', val)
Beispiel #2
0
    def __init__(
        self,
        metric_names: List[str] = None,
        log_on_batch_end: bool = True,
        log_on_epoch_end: bool = True,
        offline_mode: bool = False,
        **logging_params,
    ):
        """
        Args:
            metric_names (List[str]): list of metric names to log,
                if none - logs everything
            log_on_batch_end (bool): logs per-batch metrics if set True
            log_on_epoch_end (bool): logs per-epoch metrics if set True
            offline_mode (bool): whether logging to Neptune server should
                 be turned off. It is useful for debugging.
        """
        super().__init__(
            order=CallbackOrder.Logging,
            node=CallbackNode.Master,
            scope=CallbackScope.Experiment,
        )
        self.metrics_to_log = metric_names
        self.log_on_batch_end = log_on_batch_end
        self.log_on_epoch_end = log_on_epoch_end

        if not (self.log_on_batch_end or self.log_on_epoch_end):
            raise ValueError("You have to log something!")

        if (self.log_on_batch_end and not self.log_on_epoch_end) \
                or (not self.log_on_batch_end and self.log_on_epoch_end):
            self.batch_log_suffix = ""
            self.epoch_log_suffix = ""
        else:
            self.batch_log_suffix = "_batch"
            self.epoch_log_suffix = "_epoch"

        if offline_mode:
            neptune.init(project_qualified_name="dry-run/project",
                         backend=neptune.OfflineBackend())
        else:
            neptune.init(
                api_token=logging_params["api_token"],
                project_qualified_name=logging_params["project_name"],
            )

        logging_params.pop("api_token")
        logging_params.pop("project_name")

        self.experiment = neptune.create_experiment(**logging_params)
def setup_neptune(cfg) -> None:
    neptune.init(project_qualified_name="mayu-ot/VGP")
    neptune.create_experiment(
        name=f"train {cfg.MODEL.GATE}",
        properties={
            "user": getpass.getuser(),
            "host": socket.gethostname(),
            "wd": os.getcwd(),
            "cmd": " ".join(sys.argv),
        },
        tags=["train"],
    )
    filename = os.path.join(cfg.LOG.OUTDIR, "config.yaml")
    neptune.log_artifact(filename, "config.yaml")
Beispiel #4
0
def build_experiment(conf, logpath, resultname, csvfile):

    with open(csvfile) as stats_f:
        params = stats_f.readline().strip().split(",")
        stats = stats_f.readlines()

    for s in stats:
        elems = s.strip().split(",")
        counter, scalar_fit, priority_fit = [elems[0], elems[4], elems[5]]
        print(counter, scalar_fit, priority_fit)

    exp_config = read_toml(conf)
    exp_number = 1

    champ = read_berb_log(logpath)
    exp_name = champ['chromosome']['name']
    exp_desc = str(champ['tag'])

    exp_params = {
        "some_param": 0.1,
        "other_param": 128,
        "yet_another_param": 31337
    }

    exp_log_artifact = ["data/champion_statistics.csv", "mean_statistics.csv"]

    #Neptune init
    neptune.init('special-circumstances/sandbox', api_token=None)

    neptune.create_experiment(name=exp_name, params=exp_params)

    for s in stats:
        elems = s.strip().split(",")
        counter, scalar_fit, priority_fit = [elems[0], elems[4], elems[5]]
        neptune.log_metric(params[0], int(counter))
        neptune.log_metric(params[4], float(scalar_fit))
        neptune.log_metric(params[5], float(priority_fit))

    neptune.log_image(
        'pleasures_1',
        "/home/armadilo/projects/neptune/data/clamp-liked-zeros-count-pleasures.png"
    )
    neptune.log_image(
        'pleasures_2',
        "/home/armadilo/projects/neptune/data/lamas-koala-zero-count-pleasures.png"
    )
    neptune.send_artifact(
        '/home/armadilo/projects/neptune/data/champion_statistics.csv')
    neptune.send_artifact(
        '/home/armadilo/projects/neptune/data/mean_statistics.csv')
Beispiel #5
0
def run_train(opt):
    print(opt)

    if opt.sendNeptune:
        neptune.init('andrzejzdobywca/pretrainingpp')
        exp = neptune.create_experiment(name=opt.sessionName,
                                        params=vars(opt),
                                        tags=[opt.main_tag, opt.tag, opt.tag2])
    dataset = Dataset(opt)
    net = Network(opt)
    utils.train_model(opt, net, dataset)

    if opt.sendNeptune:
        neptune.stop()
Beispiel #6
0
def init_experiment(exp_name:str, exp_description:str, tags: list, params: dict=None, upload_files:list=None, logger = None):
    '''Initalizes and creates a neptune experiment. '''  

    neptune.init('richt3211/thesis', api_token=NEPTUNE_TOKEN)

    exp:Experiment = neptune.create_experiment(
        name=exp_name,
        description=exp_description,
        params=params,
        tags=tags,
        logger=logger,
        upload_source_files=upload_files
    )
    return exp
 def train(self):
     for i_iter in range(self.config.num_steps):
         losses = self.iter(i_iter)
         if i_iter == 0 and self.config.neptune:
             neptune.init(project_qualified_name='solacex/segmentation-DA')
             neptune.create_experiment(params=self.config,
                                       name=self.config['note'])
         if i_iter % self.config.print_freq == 0:
             self.print_loss(i_iter)
         if i_iter % self.config.save_freq == 0 and i_iter != 0:
             self.save_model(i_iter)
         if self.config.val and i_iter % self.config.val_freq == 0 and i_iter != 0:
             self.validate()
     neptune.stop()
Beispiel #8
0
def obj2(args):
    nf, act_fn, scale_by_channel, scale_by_sample, scale_type = args
    scale_range = (-1, 1)
    bs = 32
    data = (ItemLists(Path("data"),
                      TSList(x_train), TSList(x_val)).label_from_lists(
                          y_train,
                          y_val).databunch(bs=bs, val_bs=bs * 2).scale(
                              scale_type=scale_type,
                              scale_by_channel=scale_by_channel,
                              scale_by_sample=scale_by_sample,
                              scale_range=scale_range))
    model = ResNet(data.features, data.c, act_fn=act_fn, nf=nf)
    neptune.init(project_qualified_name=
                 'andrijdavid/ClinicalBrainComputerInterfacesChallenge2020')
    neptune.create_experiment(name=f'ResNet Hyperparamter Search',
                              description="Optimizing accuracy",
                              params={
                                  'nf': nf,
                                  'act_fn': act_fn,
                                  'scale_by_channel': scale_by_channel,
                                  'scale_by_sample': scale_by_sample,
                                  'scale_type': scale_type,
                                  'bs': bs,
                                  'model': 'resnet',
                                  'epoch': 100
                              },
                              tags=['hyperopt'])
    name = names.get_first_name()
    #     kappa = KappaScore()
    loss_func = LabelSmoothingCrossEntropy()
    learn = Learner(data,
                    model,
                    metrics=[accuracy],
                    loss_func=loss_func,
                    opt_func=Ranger)
    with progress_disabled_ctx(learn) as learn:
        learn.fit_one_cycle(100, callbacks=[NeptuneMonitor()])
    learn.save(f"{name}")
    val = learn.validate()
    learn.destroy()
    data = None
    neptune.log_artifact(f'data/models/{name}.pth')
    neptune.stop()
    return {
        'loss': 1 - (val[1].item()),
        'status': STATUS_OK,
        'kappa': val[-1].item()
    }
Beispiel #9
0
    def __init__(self, log_backend, project_name):
        '''

        Parameters
        ----------
        log_backend : STR
            One of 'comet', 'neptune', 'all'
        project_name : STR
            one of available proyects ('yeast', 'jersey', 'wheat', 'debug', etc)
            
        Returns
        -------
        None.

        '''
        self.proj_name = project_name
        self.backend = log_backend
        #Bool indicating wether neptune logging is enabled
        self.neptune = log_backend == 'neptune' or log_backend == 'all'
        #Bool indicating wether comet logging is enabled
        self.comet = log_backend == 'comet' or log_backend == 'all'
        if self.neptune:
            if fing:
                neptune.init(
                    "dna-i/" + project_name,
                    api_token=
                    'eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5haSIsImFwaV91cmwiOiJodHRwczovL3VpLm5lcHR1bmUuYWkiLCJhcGlfa2V5IjoiMWYzMzhjMjItYjczNC00NzZhLWFlZTYtOTI2NzE5MzUwZmNkIn0=',
                    proxies={
                        'http': "http://httpproxy.fing.edu.uy:3128/",
                        'https': "http://httpproxy.fing.edu.uy:3128/",
                    })
            else:
                neptune.init(
                    "dna-i/" + project_name,
                    api_token=
                    'eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5haSIsImFwaV91cmwiOiJodHRwczovL3VpLm5lcHR1bmUuYWkiLCJhcGlfa2V5IjoiMWYzMzhjMjItYjczNC00NzZhLWFlZTYtOTI2NzE5MzUwZmNkIn0='
                )

            print("logging experiments on neptune project " + project_name)
            neptune.create_experiment()
        if self.comet:
            self.comet_experiment = Experiment(
                api_key="V0OXnWOi4KVNS4OkwLjdnxSgK",
                project_name=project_name,
                workspace="dna-i")
            print("logging experiments on comet project " + project_name)
        if not (self.neptune or self.comet):
            raise ValueError('Logging Backend NOT Available')
Beispiel #10
0
def get_registered_models():
    """ Get metadata about production models from Neptune """
    # connect to neptune
    try:
        project = neptune.init()
    except (MissingProjectQualifiedName):
        return 'NEPTUNE_PROJECT environment variable is missing'
    except (MissingApiToken):
        return 'NEPTUNE_API_TOKEN environment variable is missing'
    except (Exception) as e:
        return e.args[0]

    # get all the experiments that are tagged as "prod"
    experiments = project.get_experiments(tag='prod')

    # pull the id, name, and test dataset scores metrics
    experiment_metadata = []
    for experiment in experiments:
        logs = experiment.get_logs()
        metadata = {
            'id': experiment.id,
            'name': experiment.name,
        }
        for key, value in logs.items():
            if key.startswith('test'):
                metadata[key] = value.y
        experiment_metadata.append(metadata)

    return jsonify(experiment_metadata)
Beispiel #11
0
def _create_or_get_experiment2(self):
    """
  Super bad !!! Dont do this
  """
    proxies = {
        'http': 'http://proxy.ethz.ch:3128',
        'https': 'http://proxy.ethz.ch:3128',
    }
    if self.offline_mode:
        project = neptune.Session(
            backend=neptune.OfflineBackend()).get_project('dry-run/project')
    else:
        #project_qualified_name='jonasfrey96/ASL', api_token=os.environ["NEPTUNE_API_TOKEN"], proxies=proxies
        session = neptune.init(project_qualified_name='jonasfrey96/ASL',
                               api_token=self.api_key,
                               proxies=proxies)  # add your credential
        print(type(session))
        session = neptune.Session(api_token=self.api_key, proxies=proxies)
        project = session.get_project(self.project_name)

    if self.experiment_id is None:
        e = project.create_experiment(name=self.experiment_name,
                                      **self._kwargs)
        self.experiment_id = e.id
    else:
        e = project.get_experiments(id=self.experiment_id)[0]
        self.experiment_name = e.get_system_properties()['name']
        self.params = e.get_parameters()
        self.properties = e.get_properties()
        self.tags = e.get_tags()
    return e
Beispiel #12
0
    def __init__(self):
        super().__init__()
        neptune.init()
        neptune.create_experiment(
            name="const project name",
            description="exp description",
            params=self.params,
            properties=self.properties,
            tags=["initial tag 1", "initial tag 2"],
            abort_callback=None,
            run_monitoring_thread=False,
            hostname="hostname value",
            # notebook_id='test1',  # TODO: Error 500 when wrong value
            upload_source_files="alpha_integration_dev/*.py",
        )

        exp = neptune.get_experiment()

        self._api_version = get_api_version(exp)

        properties = exp.get_properties()
        assert properties["init_text_property"] == "some text"
        assert properties["init_number property"] == "42"
        assert properties["init_list"] == "[1, 2, 3]"

        assert set(exp.get_tags()) == {"initial tag 1", "initial tag 2"}

        # download sources
        if self._api_version == 1:
            # old domain

            with self.with_check_if_file_appears("old_client.py.zip"):
                exp.download_sources("alpha_integration_dev/old_client.py")
            with self.with_check_if_file_appears("alpha_integration_dev.zip"):
                exp.download_sources("alpha_integration_dev")

            with self.with_assert_raises(FileNotFound):
                exp.download_sources("non_existing")
        else:
            # new api

            with self.with_check_if_file_appears("files.zip"):
                exp.download_sources()
            with self.with_assert_raises(DownloadSourcesException):
                exp.download_sources("whatever")
            with self.with_check_if_file_appears("file_set_sources/files.zip"):
                exp.download_sources(destination_dir="file_set_sources")
def train_model_with_base(basenet, k_fold):
    import neptune
    neptune.init(
        'buco24/cancer-cnn',
        api_token='eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5haSIsImFwa'
        'V91cmwiOiJodHRwczovL3VpLm5lcHR1bmUuYWkiLCJhcGlfa2V5IjoiNzY'
        '5OTFmNDQtNjRkMS00NDgzLWJjYjUtYTc5Zjk1NzA0MDNhIn0=')
    PARAMS = {
        'batch_size': config.BATCH_SIZE,
        'epochs': config.EPOCHS,
        'augmentation': config.AUGMENTATION
    }
    neptune.create_experiment(name=f"{basenet}-{k_fold}-fold", params=PARAMS)
    ################ INITIALIZATION ###############################3
    trainGen, valGen, totalTrain, totalVal = load_sets(config.TRAIN_SET_PATH,
                                                       config.VALID_SET_PATH)
    if basenet == 'vgg-like':
        model_base = DM.create_model_vgg_like()
    elif basenet == 'resnet-like':
        model_base = DM.resnet_like(20)
    else:
        model_base, _, _ = DM.create_with_pretrained_model(basenet)
    # model = multi_gpu_model(model_base, gpus=2)
    model = model_base

    callbacks_train = callbacks.get_callbacks(config.SAVE_MODEL_PATH)
    # model = model_base
    print("[INFO] compiling model...")

    opt = Adam(lr=1e-4)
    model.compile(loss="categorical_crossentropy",
                  optimizer=opt,
                  metrics=["acc"])
    print("[INFO] training...")

    history = model.fit_generator(
        trainGen,
        steps_per_epoch=totalTrain // config.BATCH_SIZE,
        validation_data=valGen,
        validation_steps=totalVal // config.BATCH_SIZE,
        epochs=config.EPOCHS,
        callbacks=callbacks_train,
        use_multiprocessing=True,
        workers=8)
    K.clear_session()
    del model
    gc.collect()
Beispiel #14
0
def construct_neptune_experiment(
        project_name: str, exp_name: str,
        tags: List[str]) -> neptune.experiments.Experiment:
    neptune.init(project_name)
    neptune_exp = neptune.create_experiment(
        name=exp_name,
        properties={
            "user": getpass.getuser(),
            "host": socket.gethostname(),
            "wd": os.getcwd(),
            "cmd": " ".join(sys.argv),
        },
        tags=tags,
        upload_stdout=True,
    )

    return neptune_exp
Beispiel #15
0
def main(argv):
    if len(argv) > 1:
        raise app.UsageError('Too many command-line arguments.')
    if FLAGS.run_mode == 'actor':
        actor.actor_loop(env.create_environment)
    elif FLAGS.run_mode == 'learner':
        neptune.init('do-not-be-hasty/matrace')
        neptune.create_experiment(tags=[FLAGS.nonce])
        neptune_tensorboard.integrate_with_tensorflow()

        learner.learner_loop(env.create_environment, create_agent,
                             create_optimizer)
    elif FLAGS.run_mode == 'visualize':
        visualize.visualize(env.create_environment, create_agent,
                            create_optimizer)
    else:
        raise ValueError('Unsupported run mode {}'.format(FLAGS.run_mode))
Beispiel #16
0
    def __init__(self, learning_rate):
        neptune.init(
            api_token="eyJhcGlfYWRkcmVzcyI6Imh0dHBzOi8vdWkubmVwdHVuZS5tbCIsImFwaV9rZXkiOiJjYjdhMGI5Ny02YTNmLTRlN2MtOTkyYi1jNDM0YjRmMjM5MDQifQ==",
            project_qualified_name="martinjms/examples",
        )
        neptune.create_experiment(
            name="caisim-example", params=dict(learning_rate=learning_rate)
        )
        neptune.append_tag("minimal-example")

        self.learning_rate = learning_rate

        # Then pytorch stuff..
        self.lin = torch.nn.Linear(50, 10)
        self.opt = torch.optim.SGD(self.lin.parameters(), learning_rate)
        self.step = 0
        self.lin.to(device)
Beispiel #17
0
def objfcn(args):
    d1, d2, act_fn, scale_by_channel, scale_by_sample, scale_type, randaugment = args
    scale_range = (-1, 1)
    bs = 128
    data = (ItemLists(Path("data"),
                      TSList(x_train), TSList(x_val)).label_from_lists(
                          y_train,
                          y_val).databunch(bs=bs, val_bs=bs * 2).scale(
                              scale_type=scale_type,
                              scale_by_channel=scale_by_channel,
                              scale_by_sample=scale_by_sample,
                              scale_range=scale_range))
    model = FCN(data.features, data.c, act_fn=act_fn, dilations=[d1, d2])
    neptune.init(project_qualified_name=
                 'andrijdavid/ClinicalBrainComputerInterfacesChallenge2020')
    neptune.create_experiment(
        name=f'FCN Hyperparamter Search',
        description="Optimizing accuracy by searching proper dilation",
        params={
            'pool': 'AdaptiveAvgPool1d',
            'dilation1': d1,
            'dilation2': d2,
            'act_fn': act_fn,
            'scale_by_channel': scale_by_channel,
            'scale_by_sample': scale_by_sample,
            'scale_type': scale_type,
            'randaugment': randaugment,
            'bs': bs,
            'model': 'fcn',
            'epoch': 100
        },
        tags=['hyperopt'])
    kappa = KappaScore()
    learn = Learner(data, model, metrics=[accuracy, kappa])
    if randaugment:
        learn = learn.randaugment()
    learn.fit_one_cycle(100, callbacks=[NeptuneMonitor()])
    val = learn.validate()
    learn.destroy()
    data = None
    neptune.stop()
    return {
        'loss': 1 - (val[1].item()),
        'status': STATUS_OK,
        'kappa': val[-1].item()
    }
Beispiel #18
0
def main(config: DictConfig) -> None:

    # set data
    os.environ['GOOGLE_CLOUD_PROJECT'] = config['project_id']
    params = dict(config['model']['parameters'])

    # get base directory
    base_dir = os.path.dirname(hydra.utils.get_original_cwd())

    # load training API
    module = importlib.import_module(config['model']['file'])

    # load data
    X_train_all, y_train_all, X_test = load_data(config, base_dir)

    # start logging
    neptune.init(api_token=API_TOKEN,
                 project_qualified_name='tokuma09/Example')
    neptune.create_experiment(params=params,
                              name='sklearn-quick',
                              tags=[config['model']['name']])

    # train model using CV
    print('***** Train model *****')
    y_test_preds, oof_preds, models, scores = train(X_train_all, y_train_all,
                                                    X_test, module, config)

    #  CV score
    print('***** log CV score *****')
    score = np.mean(scores)
    neptune.log_metric('CV score', score)

    for i in range(NUM_FOLDS):
        neptune.log_metric('fold score', scores[i])

    # save model
    save_models(models, config, base_dir)

    # save oof result
    save_oof(oof_preds, config, base_dir)

    # prepare submission
    prepare_submission(y_test_preds, config, base_dir)

    neptune.stop()
Beispiel #19
0
def init_neptune(args,
                 api_key,
                 project_name,
                 experiment_name,
                 experiment_tags=[]):
    import neptune
    from pytorch_lightning.loggers.neptune import NeptuneLogger

    params = vars(args)

    neptune.init(project_qualified_name=project_name, api_token=api_key)

    neptune_logger = NeptuneLogger(api_key=api_key,
                                   project_name=project_name,
                                   experiment_name=experiment_name,
                                   tags=experiment_tags,
                                   params=params)
    return neptune_logger
    def create_experiment(self):
        # dvclive.init()

        if self.dry_run:
            neptune.init(project_qualified_name="dry-run/debug",
                         backend=neptune.OfflineBackend())
        else:
            neptune.init(
                api_token=self.api_token,
                project_qualified_name=self.project_name,
            )

        self.tags = list({get_current_branch()} | set(self.tags))

        self.experiment = neptune.create_experiment(
            tags=self.tags,
            **self.kwargs,
        )
def covid_with_neptune(params_):
    neptune.init('eyalasulin/covid',
                 api_token='token_hidden_before_submitting')
    # ## configuration

    neptune.create_experiment(name='contrastive_covid', params=params_)

    loss, accuracy, history, auc = united_training.main(params_)

    if history is not None:
        neptune.log_metric('loss', loss)
        neptune.log_metric('accuracy', accuracy)
        for key in history.history.keys():
            for item in history.history[key]:
                neptune.log_metric(f'h_{key}', item)

    neptune.stop()
    return accuracy
def main():
    neptune.init(api_token=os.getenv('NEPTUNE_API_TOKEN'), project_qualified_name=os.getenv('NEPTUNE_PROJECT'))

    bureau_raw_path = os.path.join(RAW_DATA_DIRPATH,'bureau.csv.zip')
    bureau_raw = pd.read_csv(bureau_raw_path, nrows=NROWS)

    with neptune.create_experiment(name='feature_extraction',
                                   tags=['interim',
                                         'bureau',
                                         'feature_extraction'],
                                   upload_source_files=get_filepaths()):

        bureau_features, numeric_cols = extract(bureau_raw)
        bureau_features.to_csv(INTERIM_FEATURES_FILEPATH, index=None)

        neptune.set_property('numeric_features', str(numeric_cols))
        neptune.set_property('features_version', md5_hash(INTERIM_FEATURES_FILEPATH))
        neptune.set_property('features_path', INTERIM_FEATURES_FILEPATH)
Beispiel #23
0
def run(args):
    """"""
    # setting up neptune experiment
    neptune_project = neptune.init(api_token=os.environ["NEPTUNE_API_TOKEN"],
                                   project_qualified_name='{}/{}'.format(
                                       args["neptune_username"],
                                       args["neptune_project"]))

    # updating config with the provided flags
    config.update(args)
    logging.info("Used config: {}".format(config))

    model_config = config.model_config
    model_class = config.model_class
    tokenizer_class = config.tokenizer_class

    model_config = model_config.from_pretrained(
        config["model_name"],
        num_labels=config["num_labels"],
        finetuning_task=config["task_name"])
    model_config.update(config)

    tokenizer = tokenizer_class.from_pretrained(config["tokenizer_name"])
    model = model_class(model_config).to(device)

    # resume training
    if config["model_path"]:
        model.load_state_dict(
            torch.load(config["model_path"], map_location=device))

    if config["task_name"] == "multi-label":
        dataset = datasets.load_dataset("data/toxic_dataset.py")
        dataset = features_loader_toxicity(dataset,
                                           tokenizer,
                                           max_length=config["max_length"])
    elif config["task_name"] == "multi-class":
        dataset = datasets.load_dataset("data/conference_dataset.py")
        dataset = features_loader_conference(dataset,
                                             tokenizer,
                                             max_length=config["max_length"])
    else:
        raise ValueError(f"Task name '{config['task_name']}' not supported")
    train_data, test_data = dataset["train"], dataset["test"]
    train_dataset, test_dataset = get_featurized_dataset(
        tokenizer, train_data, test_data)

    if config["do_train"]:
        global_step, tr_loss = train(train_dataset, test_dataset, model,
                                     config, neptune_project)
        logging.info(" global_step = %s, average loss = %s", global_step,
                     tr_loss)

    if config["do_eval"]:
        report = evaluate(test_dataset, model, config)
        logging.info(
            "---------------------- Evaluation report ----------------------\n{}"
            .format(report))
Beispiel #24
0
def get_exp_params(exp_name, project_name):
    project = neptune.init(project_name)
    exp_list = project.get_experiments(exp_name)
    if len(exp_list) == 1:
        with tempfile.TemporaryDirectory(dir="/tmp") as temp:
            exp_list[0].download_artifact(params_pickle_name, temp)
            return pickle.load(open(os.path.join(temp, params_pickle_name), "rb"))
    else:
        raise Exception("Wrong exp id!")
Beispiel #25
0
    def init_neptune(self):
        print("INIT NEPTUNE")
        neptune_api_token = self._exp_config["neptune_api_token"]
        neptune_name = self._exp_config["neptune_name"]
        exp_name = self._exp_config["exp_name"]

        data = {
            "agent_config": self._agent_config,
            "exp_config": self._exp_config
        }

        neptune.init(neptune_name, api_token=neptune_api_token)

        neptune.create_experiment(exp_name, params=data)

        self.exp_id = neptune.get_experiment().id

        return neptune.get_experiment()
def run_neptune(head, tail):
    neptune.init(project_qualified_name="OneOneFour/Ising-Model")
    neptune_tb.integrate_with_tensorflow()
    ttf = IsingData(train_ratio=1, test_ratio=0.5, validation_ratio=0.20)
    ttf.load_json(tail)
    (train_image, train_label), (test_image,
                                 test_label), (val_image,
                                               val_label) = ttf.get_data()

    # normalise and reshape

    train_image = train_image.reshape(
        (len(train_image), ttf.size, ttf.size, 1))
    test_image = test_image.reshape((len(test_image), ttf.size, ttf.size, 1))
    val_image = val_image.reshape((len(val_image), ttf.size, ttf.size, 1))

    exp_name = f"Convolutional {tail} {datetime.now().strftime('%Y_%m_%d')}"
    with neptune.create_experiment(name=exp_name, params=PARAMS) as exp:
        logdir = "..\\logs\\fit\\" + datetime.now().strftime("%Y%m%d-%H%M%S")
        callback = TensorBoard(
            log_dir=logdir)  # Make sure to save callback as a regular variable
        model = get_convolutional_network(
            ttf.size,
            exp.get_parameters()['periodic_padding'])
        model.compile(optimizer=exp.get_parameters()['optimizer'],
                      loss=exp.get_parameters()['loss'],
                      metrics=ast.literal_eval(
                          exp.get_parameters()['metrics']))

        history = model.fit(train_image,
                            train_label,
                            epochs=PARAMS['epochs'],
                            validation_data=(val_image, val_label),
                            callbacks=[callback],
                            batch_size=PARAMS['batch_size'])
        print(model.summary())
        loss, acc = model.evaluate(test_image, test_label)
        print(f"Model accuracy: {acc}")
        exp.send_text("test-accuracy", str(acc))
        exp.send_text("test-loss", str(loss))
        weights_name = f"convolutional_weights {datetime.now().strftime('%Y_%m_%d %H_%M')}.h5"
        model.save_weights(weights_name)
        exp.send_artifact(weights_name)
    return acc
Beispiel #27
0
def update_models():
    """ Download models that are tagged with 'prod' from neptune """
    # remove the models dir if it exists
    try:
        shutil.rmtree(ARTIFACTS_DIR)
        sleep(1)
        mkdir(ARTIFACTS_DIR)
    except (FileNotFoundError):
        pass

    # connect to neptune
    try:
        project = neptune.init()
    except (MissingProjectQualifiedName):
        return 'NEPTUNE_PROJECT environment variable is missing'
    except (MissingApiToken):
        return 'NEPTUNE_API_TOKEN environment variable is missing'
    except (Exception) as e:
        return e.args[0]

    # for each model that is in prod, download the artifacts (which should just be a pkl)
    # grab the experiments
    experiments = project.get_experiments(tag='prod')

    # get the artifact for each experiment, and unzip it
    for experiment in experiments:
        artifact_dir = path.join(ARTIFACTS_DIR, experiment.id)
        experiment.download_artifacts(destination_dir=artifact_dir)

        # the downloaded artifact will be in a zip file, so lets extract it
        with ZipFile(artifact_dir + sep + 'output.zip') as zip_ref:
            zip_ref.extractall(path=artifact_dir)
            sleep(
                0.25
            )  # delay a little as sometimes ZipFile.extractall doesn't report well on windows

        # cleanup after the zip extraction
        for filename in listdir(path.join(artifact_dir, 'output')):
            if fnmatch.fnmatch(filename, '*.pkl'):
                rename(path.join(artifact_dir, 'output', filename),
                       path.join(artifact_dir, filename))
                shutil.rmtree(path.join(artifact_dir, 'output'))

        remove(path.join(artifact_dir, 'output.zip'))

        # grab the metrics
        logs = experiment.get_logs()
        metrics = {}
        for key, value in logs.items():
            metrics[key] = value['y']

        with open(artifact_dir + sep + 'metrics.json', 'w') as f:
            json.dump(metrics, f)

    return redirect(url_for('case.newCase'))
Beispiel #28
0
def remove_image_logs(idx):
    """ Remove all image logs from experiment"""
    proj = neptune.init("reformer-tts/reformer-tts")
    exp = proj.get_experiments(idx)[0]
    logs = exp.get_channels()

    for name, channel in logs.items():
        if channel.channelType == 'image':
            exp.reset_log(name)

    exp.set_property('cleaned_image_logs', True)
def configure_neptune(specification):
    """Configures the Neptune experiment, then returns the Neptune logger."""
    if 'NEPTUNE_API_TOKEN' not in os.environ:
        raise KeyError('NEPTUNE_API_TOKEN environment variable is not set!')

    git_info = specification.get('git_info', None)
    if git_info:
        git_info.commit_date = datetime.datetime.now()

    neptune.init(project_qualified_name=specification['project'])
    # Set pwd property with path to experiment.
    properties = {'pwd': os.getcwd()}
    neptune.create_experiment(name=specification['name'],
                              tags=specification['tags'],
                              params=specification['parameters'],
                              properties=properties,
                              git_info=git_info)
    atexit.register(neptune.stop)

    return NeptuneLogger(neptune.get_experiment())
Beispiel #30
0
   def build_experiment(self):
      _exp_config = self.config_toml
      exp_number=1
      #This is a bit messy, but more literal
      _exp_params={'num_islands': _exp_config['num_islands'],'mutation_rate': _exp_config['mutation_rate'],
                  'mutation_exponent': _exp_config['mutation_exponent'],
                  'crossover_period': _exp_config['crossover_period'],
                  'crossover_rate': _exp_config['crossover_rate'],
                  'max_init_len': _exp_config['max_init_len'],
                  'min_init_len': _exp_config['min_init_len'],
                  'pop_size': _exp_config['pop_size'],
                  'max_length': _exp_config['max_length']
                  }

      #_exp_name=_exp_config['population_name']
      _exp_name="scute-halux-orbit-naris"

      neptune.init(self.exp_pqn,api_token=None) 
      self.experiment=neptune.create_experiment(name=_exp_name,params=_exp_params)
      print(f" Created experiment {_exp_name} in {self.exp_pqn}")