Ejemplo n.º 1
0
    def save_experiment(self):

        with open(f"./output/{self._experiment._id}.ckp", "wb") as fh:
            pickle.dump(self._agent, fh)
        neptune.log_artifact(f"./output/{self._experiment._id}.ckp")
        neptune.append_tag(self._args.agent)
        neptune.append_tag(self._args.environment)
Ejemplo n.º 2
0
def plot_oof(output: torch.Tensor, tile_ids: list, img: torch.Tensor,
             target: torch.Tensor, predictions_dir: str) -> None:
    output = torch.sigmoid(output)
    output = output.cpu().numpy().copy()
    target = target.cpu().numpy().copy()
    img = img.cpu().numpy().transpose(0, 2, 3, 1)
    for num, (pred, im, tar) in enumerate(zip(output, img, target), start=0):
        tile_name = tile_ids[num]
        if pred.ndim == 3:
            pred = np.squeeze(pred, axis=0)
        prob_mask = np.rint(pred * 255).astype(np.uint8)
        prob_mask_rgb = np.repeat(prob_mask[..., None], 3,
                                  2)  # repeat array for three channels
        # image
        input_image = np.rint(im * 255).astype(np.uint8)
        overlayed_im = np.rint(input_image * 0.5 + prob_mask_rgb * 0.5).clip(
            0, 255).astype(np.uint8)
        # target
        if tar.ndim == 3:
            tar = np.squeeze(tar, axis=0)
        tar = np.rint(tar * 255).astype(np.uint8)
        target_rgb = np.repeat(tar[..., None], 3, axis=2)
        plot_im = np.vstack(
            [input_image, overlayed_im, prob_mask_rgb, target_rgb])
        cv2.imwrite(f"{predictions_dir}/{tile_name}.png", plot_im)
        # send image (pass path to file)
        neptune.log_image(f'oof_{tile_name}', plot_im)
        neptune.log_artifact(plot_im, destination='oof_img')
Ejemplo n.º 3
0
def test(net, testloader, IMG_CHANNELS, IMG_SIZE, OUTPUT_LABEL_SIZE, device):
    predictions = []
    with torch.no_grad():
        for i, sample in enumerate(testloader):
            predict = []
            x, y = sample

            filter_input(x)
            x = x.view(-1, IMG_CHANNELS, IMG_SIZE, IMG_SIZE)

            x, y = x.to(device), y.to(device)

            d_, A_, B_, t_ = net(x)

            final_result = torch.cat([d_, A_, B_, t_], dim=1)
            final_result = final_result.to("cpu")

            predictions.append(final_result.numpy())

    print(predictions)

    predictions = np.array(predictions)
    predictions = predictions.reshape(-1, OUTPUT_LABEL_SIZE)
    df = DataFrame(predictions)
    df.to_excel('predictions.xlsx', header=None, index=None)
    neptune.log_artifact('predictions.xlsx')
Ejemplo n.º 4
0
def main(arguments):
    with open(arguments.filepath, 'r') as fp:
        json_exp = json.load(fp)

    neptune.init(api_token=arguments.api_token,
                 project_qualified_name=arguments.project_name)

    with neptune.create_experiment(
            name=json_exp['name'],
            description=json_exp['description'],
            params=json_exp['params'],
            properties=json_exp['properties'],
            tags=json_exp['tags'],
            upload_source_files=json_exp['upload_source_files']):

        for name, channel_xy in json_exp['log_metric'].items():
            for x, y in zip(channel_xy['x'], channel_xy['y']):
                neptune.log_metric(name, x=x, y=y)

        for name, channel_xy in json_exp['log_text'].items():
            for x, y in zip(channel_xy['x'], channel_xy['y']):
                neptune.log_text(name, x=x, y=y)

        for name, channel_xy in json_exp['log_image'].items():
            for x, y in zip(channel_xy['x'], channel_xy['y']):
                neptune.log_image(name, x=x, y=y)

        for filename in json_exp['log_artifact']:
            neptune.log_artifact(filename)
Ejemplo n.º 5
0
    def _upload_images_to_neptune(self, images_dict: Dict):
        def fig2img(fig):
            buf = io.BytesIO()
            fig.savefig(buf)
            buf.seek(0)
            return buf

        for k, v in images_dict.items():
            neptune.log_artifact(fig2img(v), f"{k}.png")
Ejemplo n.º 6
0
def run_dqn(experiment_name):
    current_dir = pathlib.Path().absolute()
    directories = Save_paths(data_dir=f'{current_dir}/data', experiment_name=experiment_name)

    game = Winter_is_coming(setup=PARAMS['setup'])
    environment = wrappers.SinglePrecisionWrapper(game)
    spec = specs.make_environment_spec(environment)

    # Build the network.
    def _make_network(spec) -> snt.Module:
        network = snt.Sequential([
            snt.Flatten(),
            snt.nets.MLP([50, 50, spec.actions.num_values]),
        ])
        tf2_utils.create_variables(network, [spec.observations])
        return network

    network = _make_network(spec)

    # Setup the logger
    if neptune_enabled:
        agent_logger = NeptuneLogger(label='DQN agent', time_delta=0.1)
        loop_logger = NeptuneLogger(label='Environment loop', time_delta=0.1)
        PARAMS['network'] = f'{network}'
        neptune.init('cvasquez/sandbox')
        neptune.create_experiment(name=experiment_name, params=PARAMS)
    else:
        agent_logger = loggers.TerminalLogger('DQN agent', time_delta=1.)
        loop_logger = loggers.TerminalLogger('Environment loop', time_delta=1.)

    # Build the agent
    agent = DQN(
        environment_spec=spec,
        network=network,
        params=PARAMS,
        checkpoint=True,
        paths=directories,
        logger=agent_logger
    )
    # Try running the environment loop. We have no assertions here because all
    # we care about is that the agent runs without raising any errors.
    loop = acme.EnvironmentLoop(environment, agent, logger=loop_logger)
    loop.run(num_episodes=PARAMS['num_episodes'])

    last_checkpoint_path = agent.save()

    # Upload last checkpoint
    if neptune_upload_checkpoint and last_checkpoint_path:
        files = os.listdir(last_checkpoint_path)
        for f in files:
            neptune.log_artifact(os.path.join(last_checkpoint_path, f))

    if neptune_enabled:
        neptune.stop()

    do_example_run(game,agent)
Ejemplo n.º 7
0
    def train(self, n_epochs):
        bar = progressbar.ProgressBar()
        for epoch in bar(range(n_epochs)):
            train_stats = self.run_model(train=True)
            test_stats = self.test()

            self.update_logger(train_stats, test_stats, epoch)

        torch.save(self.net, f'trained models_{self.run_name}.pt')
        neptune.log_artifact(f'trained models_{self.run_name}.pt')
Ejemplo n.º 8
0
def setup_neptune(cfg) -> None:
    neptune.init(project_qualified_name="mayu-ot/VGP")
    neptune.create_experiment(
        name=f"train {cfg.MODEL.GATE}",
        properties={
            "user": getpass.getuser(),
            "host": socket.gethostname(),
            "wd": os.getcwd(),
            "cmd": " ".join(sys.argv),
        },
        tags=["train"],
    )
    filename = os.path.join(cfg.LOG.OUTDIR, "config.yaml")
    neptune.log_artifact(filename, "config.yaml")
Ejemplo n.º 9
0
def obj2(args):
    nf, act_fn, scale_by_channel, scale_by_sample, scale_type = args
    scale_range = (-1, 1)
    bs = 32
    data = (ItemLists(Path("data"),
                      TSList(x_train), TSList(x_val)).label_from_lists(
                          y_train,
                          y_val).databunch(bs=bs, val_bs=bs * 2).scale(
                              scale_type=scale_type,
                              scale_by_channel=scale_by_channel,
                              scale_by_sample=scale_by_sample,
                              scale_range=scale_range))
    model = ResNet(data.features, data.c, act_fn=act_fn, nf=nf)
    neptune.init(project_qualified_name=
                 'andrijdavid/ClinicalBrainComputerInterfacesChallenge2020')
    neptune.create_experiment(name=f'ResNet Hyperparamter Search',
                              description="Optimizing accuracy",
                              params={
                                  'nf': nf,
                                  'act_fn': act_fn,
                                  'scale_by_channel': scale_by_channel,
                                  'scale_by_sample': scale_by_sample,
                                  'scale_type': scale_type,
                                  'bs': bs,
                                  'model': 'resnet',
                                  'epoch': 100
                              },
                              tags=['hyperopt'])
    name = names.get_first_name()
    #     kappa = KappaScore()
    loss_func = LabelSmoothingCrossEntropy()
    learn = Learner(data,
                    model,
                    metrics=[accuracy],
                    loss_func=loss_func,
                    opt_func=Ranger)
    with progress_disabled_ctx(learn) as learn:
        learn.fit_one_cycle(100, callbacks=[NeptuneMonitor()])
    learn.save(f"{name}")
    val = learn.validate()
    learn.destroy()
    data = None
    neptune.log_artifact(f'data/models/{name}.pth')
    neptune.stop()
    return {
        'loss': 1 - (val[1].item()),
        'status': STATUS_OK,
        'kappa': val[-1].item()
    }
Ejemplo n.º 10
0
    def model_checkpoint(self, train_loss, val_loss, epoch):

        new_best_train, new_best_val = False, False

        # criteria_map = {'train_rec': train_loss[1], 'val_rec': val_loss[1]}
        criteria_map = {'train_rec': train_loss, 'val_rec': val_loss}

        loss = criteria_map.get(self.criteria)

        if loss < self.best_loss:
            for m, f in zip(self.models, self.filenames):
                if self.keep_all:
                    f = f.replace('.h5', '') + '_t' + str(train_loss).replace('0.', '') + \
                        '_v' + str(val_loss).replace('0.', '') + '.h5'
                tf.keras.models.save_model(m, os.path.join(self.ckpt_dir, f))
                # m.save_weights(os.path.join(self.ckpt_dir, f))
                if self.neptune_ckpt:
                    neptune.log_artifact(os.path.join(self.ckpt_dir, f))
            self.best_loss = loss

        if train_loss < self.best_train_loss:
            self.best_train_loss = train_loss
            self.best_train_epoch = epoch + 1
            new_best_train = True

        if val_loss < self.best_val_loss:
            self.best_val_loss = val_loss
            self.best_val_epoch = epoch + 1
            new_best_val = True

        if new_best_train:
            print(
                colored(
                    'Best train loss: %.7f, epoch  %d' %
                    (self.best_train_loss, self.best_train_epoch), 'magenta'))
        if new_best_val:
            print(
                colored(
                    'Best val loss: %.7f, epoch %d' %
                    (self.best_val_loss, self.best_val_epoch), 'green'))
        return
Ejemplo n.º 11
0
def bias_benchmark(task, feature, fraction_range, plot=True):

    if task == 'classification':
        get_datasets = get_classification_datasets
    else:
        get_datasets = get_regression_datasets

    SEED = 42

    for d in get_datasets():
        X_train, X_test, y_train, y_test, dataset_name = d
        print(dataset_name)
        X = np.concatenate([X_train, X_test], axis=0)
        y = np.concatenate([y_train, y_test], axis=0)

        features = [f'f{i + 1}' for i in range(X.shape[1])]
        df = pd.DataFrame(X, columns=features)
        image_path = f'./logs/{task}_{dataset_name}_{feature}'

        correlations, rf_scores, sf_scores, permutation_importances = bias_experiment(
            df, y, task, feature, fraction_range, SEED)
        if plot:
            plot_bias(fraction_range, correlations, rf_scores, sf_scores,
                      permutation_importances, dataset_name,
                      image_path + '.png')

            # Log chart and raw results into Neptune
            neptune.send_image(f'{dataset_name}', image_path + '.png')

        results_dict = {
            'correlations': correlations.tolist(),
            'rf_scores': rf_scores.tolist(),
            'sf_scores': sf_scores.tolist(),
            'permutation_importances': permutation_importances
        }
        results_json = json.dumps(results_dict)
        result_file_path = f'./logs/{task}_{dataset_name}_{feature}_results.json'
        with open(result_file_path, 'w+') as f:
            json.dump(results_json, f)
            print(f'Saved results to: {result_file_path}')
        neptune.log_artifact(result_file_path)
Ejemplo n.º 12
0
def run_experiments(datasets, parameters):

    # Populate tags with some info.
    tags = parameters['tags']
    if parameters['run_mlp']:
        tags.append("MLP")
    if parameters['run_fcn']:
        tags.append("FCN")
    if parameters['run_resnet']:
        tags.append("ResNet")

    # Create Neptune client.
    neptune.init(project_qualified_name=parameters['neptune_project'])
    neptune.create_experiment(
        upload_source_files=[],
        params=parameters,
        tags=tags
    )
    neptune.log_artifact("ConvNet.py")
    neptune.log_artifact("MultiLayerPerceptron.py")
    neptune.log_artifact("ResNet.py")

    try:
        run_train_models(datasets, parameters)
    except KeyboardInterrupt:
        pass
    finally:
        neptune.stop()
Ejemplo n.º 13
0
    def handle_files_and_images(self):
        # image
        # `image_name` and `description` will be lost (`send_image` the same as `log_image`)
        neptune.send_image("image",
                           self.img_path,
                           name="name",
                           description="desc")

        # artifact with default dest
        neptune.send_artifact(self.text_file_path)
        exp = neptune.get_experiment()
        with self.with_check_if_file_appears("text.txt"):
            exp.download_artifact("text.txt")
        with self.with_check_if_file_appears("custom_dest/text.txt"):
            exp.download_artifact("text.txt", "custom_dest")

        # artifact with custom dest
        neptune.send_artifact(self.text_file_path, destination="something.txt")
        exp = neptune.get_experiment()
        with self.with_check_if_file_appears("something.txt"):
            exp.download_artifact("something.txt")
        with self.with_check_if_file_appears("custom_dest/something.txt"):
            exp.download_artifact("something.txt", "custom_dest")

        # destination dirs
        neptune.log_artifact(self.text_file_path,
                             destination="dir/text file artifact")
        neptune.log_artifact(self.text_file_path,
                             destination="dir/artifact_to_delete")

        # deleting
        neptune.delete_artifacts("dir/artifact_to_delete")

        # streams
        with open(self.text_file_path, mode="r") as f:
            neptune.send_artifact(f, destination="file stream.txt")
Ejemplo n.º 14
0
def main(argv):
    gin.parse_config_files_and_bindings(FLAGS.gin_file, FLAGS.gin_param, skip_unknown=True)
    print("Gin parameter bindings:\n{}".format(gin.config_str()))

    use_neptune = "NEPTUNE_API_TOKEN" in os.environ
    exp_id = ''

    if use_neptune:
        neptune.init(project_qualified_name='bbeatrix/curl')
        exp = neptune.create_experiment(params=gin_config_to_dict(gin.config_str()),
                                        name=FLAGS.gin_file[0].split('/')[-1][:-4],
                                        upload_source_files=['./*.py'])
        exp_id = exp.id
    else:
        neptune.init('shared/onboarding', 'ANONYMOUS', backend=neptune.OfflineBackend())

    neptune.log_text('gin_config', gin.config_str())
    neptune.log_artifact(*FLAGS.gin_file, 'gin_config_{}.gin'.format(exp_id))

    exp_manager = ExperimentManager(prefix=exp_id)
    exp_manager.run_experiment()

    neptune.stop()
    print("Fin")
Ejemplo n.º 15
0
    def handle_directories(self):
        exp = neptune.get_experiment()

        # download_artifacts
        neptune.send_artifact(self.data_dir)
        if self._api_version == 1:
            with self.with_check_if_file_appears("output.zip"):
                exp.download_artifacts()
        else:
            with self.with_assert_raises(
                    DownloadArtifactsUnsupportedException):
                exp.download_artifacts()

        # create some nested artifacts
        neptune.log_artifact(self.img_path,
                             destination="main dir/sub dir/art1")
        neptune.log_artifact(self.img_path,
                             destination="main dir/sub dir/art2")
        neptune.log_artifact(self.img_path,
                             destination="main dir/sub dir/art3")

        # downloading artifact - download_artifact
        # non existing artifact
        if self._api_version == 1:
            with self.with_assert_raises(FileNotFound):
                exp.download_artifact("main dir/sub dir/art100")
        else:
            with self.with_assert_raises(DownloadArtifactUnsupportedException):
                exp.download_artifact("main dir/sub dir/art100")
        # artifact directories
        if self._api_version == 1:
            with self.with_assert_raises(HTTPError):
                exp.download_artifact("main dir/sub dir")
        else:
            with self.with_assert_raises(DownloadArtifactUnsupportedException):
                exp.download_artifact("main dir/sub dir")

        # deleting artifacts
        neptune.delete_artifacts("main dir/sub dir/art1")

        # delete non existing artifact
        if self._api_version == 1:
            neptune.delete_artifacts("main dir/sub dir/art100")
        else:
            with self.with_assert_raises(
                    DeleteArtifactUnsupportedInAlphaException):
                neptune.delete_artifacts("main dir/sub dir/art100")

        # delete dir
        if self._api_version == 1:
            neptune.delete_artifacts("main dir/sub dir")
        else:
            with self.with_assert_raises(
                    DeleteArtifactUnsupportedInAlphaException):
                neptune.delete_artifacts("main dir/sub dir")
Ejemplo n.º 16
0
gbm = lgb.train(
    params,
    lgb_train,
    num_boost_round=500,
    valid_sets=[lgb_train, lgb_eval],
    valid_names=['train', 'valid'],
    callbacks=[neptune_monitor()],
)

## Save Model Artifacts.

gbm.save_model('lightgbm.pkl')

# Log model
neptune.log_artifact('lightgbm.pkl')

## Log Interactive Charts.

### 1. Install dependencies

get_ipython().system(
    ' pip install --quiet scikit-plot matplotlib==3.2.0 plotly==4.12.0')

### 2. Create an ROC AUC curve

import matplotlib.pyplot as plt
from scikitplot.metrics import plot_roc

y_test_pred = gbm.predict(X_test)
from tqdm import tqdm
import numpy as np
import neptune
from pathlib import Path

from src.trainer import create_trainer, create_model
from src.validation import validate

tokenizer = load_tokenizer("artifacts")

model = create_model()

neptune.init("oversbyg/cook-mlm")
neptune.create_experiment(name="example")

trainer = create_trainer(tokenizer, model)

for epoch in range(40):
    validation_result = validate(model, tokenizer)[0]

    neptune.log_metric("top1", validation_result)
    trainer.save_model("./artifacts")

    trainer.train()

    shuffle_data()
    trainer = create_trainer(tokenizer, model)

for file in Path("./artifacts").iterdir():
    neptune.log_artifact(str(file))
neptune.log_metric('test_f1', f1)

import matplotlib.pyplot as plt
from scikitplot.metrics import plot_confusion_matrix, plot_roc

fig, ax = plt.subplots(figsize=(16, 12))
plot_confusion_matrix(y_test, y_test_pred_class, ax=ax)
neptune.log_image('diagnostic_charts', fig)

fig, ax = plt.subplots(figsize=(16, 12))
plot_roc(y_test, y_test_pred, ax=ax)
neptune.log_image('diagnostic_charts', fig)

model.save('my_model.h5')
neptune.log_artifact('my_model.h5')

# tests
current_exp = neptune.get_experiment()

correct_logs = [
    'batch_loss', 'batch_accuracy', 'epoch_loss', 'epoch_accuracy',
    'epoch_val_loss', 'epoch_val_accuracy', 'test_f1', 'diagnostic_charts'
]

if set(current_exp.get_logs().keys()) != set(correct_logs):
    raise ValueError()

neptune.stop()

# Access data you logged programatically
        for image, prediction in zip(data, outputs):
            description = '\n'.join([
                'class {}: {}'.format(i, pred)
                for i, pred in enumerate(F.softmax(prediction, dim=0))
            ])
            neptune.log_image('predictions',
                              image.squeeze(),
                              description=description)

    if batch_idx == PARAMS['iterations']:
        break

## Log model weight to experiment

torch.save(model.state_dict(), 'model_dict.pth')
neptune.log_artifact('model_dict.pth')

# tests
exp = neptune.get_experiment()

## Stop Neptune experiment after training

neptune.stop()

# tests
# check logs
correct_logs_set = {'batch_loss', 'batch_acc', 'predictions'}
from_exp_logs = set(exp.get_logs().keys())

assert correct_logs_set == from_exp_logs, '{} - incorrect logs'.format(exp)
Ejemplo n.º 20
0
 def log_csv(data: dt.Frame, filename: str):
     data.to_csv(filename)
     neptune.log_artifact(filename)
Ejemplo n.º 21
0
            plt.clf()
            plt.close()


    # log results
    sf_mean_silhouette = np.mean(sf_silhouette)
    sf_mean_db = np.mean(sf_db)
    neptune.log_metric(f'{dataset} SF silhouette', sf_mean_silhouette)
    neptune.log_metric(f'{dataset} SF Davies Bouldin', sf_mean_db)

    other_mean_silhouette = np.mean(other_silhouette)
    other_mean_db = np.mean(other_db)
    neptune.log_metric(f'{dataset} {other_algorithm} silhouette', other_mean_silhouette)
    neptune.log_metric(f'{dataset} {other_algorithm} Davies Bouldin', other_mean_db)

    # compare
    ts, ps = ttest_ind(sf_silhouette, other_silhouette)
    neptune.log_metric(f'{dataset} t-stat silhouette', ts)
    neptune.log_metric(f'{dataset} p-val silhouette', ps)

    tdb, pdb = ttest_ind(sf_db, other_db)
    neptune.log_metric(f'{dataset} t-stat db', tdb)
    neptune.log_metric(f'{dataset} p-val db', pdb)

    # log
    df.loc[d_idx] = [dataset, sf_mean_silhouette, other_mean_silhouette, ps, sf_mean_db, other_mean_db, pdb]
    df.to_csv(log_name, index=False)

neptune.log_artifact(log_name)
neptune.stop()
Ejemplo n.º 22
0
                'class {}: {}'.format(i, pred)
                for i, pred in enumerate(F.softmax(prediction))
            ])
            neptune.log_image('predictions',
                              image.squeeze(),
                              description=description)

    if batch_idx == PARAMS['iterations']:
        break

## Log model weights

torch.save(model.state_dict(), 'model_dict.ckpt')

# log model
neptune.log_artifact('model_dict.ckpt')

# Explore results in the Neptune UI

# tests

exp = neptune.get_experiment()
all_logs = exp.get_logs()

## check logs
correct_logs = ['batch_loss', 'predictions']

assert set(
    all_logs.keys()) == set(correct_logs), 'Expected: {}. Actual: {}'.format(
        set(correct_logs), set(all_logs.keys()))
Ejemplo n.º 23
0
y_pred_val = pd.DataFrame(y_pred_val,
                          index=X_val.index,
                          columns=['prediction'])
y_pred_val_filename = f'data/preds/h{forecast_horizon}_y_pred_val.parquet'
y_pred_val.to_parquet(output_filename)
# save test predictions
y_pred_test = model.predict(X_test, num_iteration=model.best_iteration)
y_pred_test = pd.DataFrame(y_pred_test,
                           index=X_test.index,
                           columns=['prediction'])
y_pred_test_filename = f'data/preds/h{forecast_horizon}_y_pred_test.parquet'
y_pred_test.to_parquet(output_filename)

if NEPTUNE:
    neptune.log_metric(f"h{forecast_horizon}_val_rmse", val_rmse)
    neptune.log_artifact(model_filename)
    neptune.log_image(importance_filename, fig)
    neptune.log_artifact(y_pred_val_filename)
    neptune.log_artifact(y_pred_test_filename)
    neptune.stop()


def get_y_weights(y: pd.Series, normalize=False):
    """
    For each series, compute the denominator in the MSSE loss function, i.e. the
    day-to-day variations squared, averaged by number of training observations.
    The weights can be normalized so that they add up to 1.
    This is provided to the lgb.Dataset for computing loss function and evaluation metric
    """
    scales = (y.unstack(level='date').diff(axis=1)**2).mean(axis=1)
    scales = scales.replace(0, pd.NA)
    model.summary(print_fn=lambda x: neptune.log_text('model_summary', x))

    # train model
    model.fit(train_images,
              train_labels,
              batch_size=PARAMS['batch_size'],
              epochs=PARAMS['n_epochs'],
              shuffle=PARAMS['shuffle'],
              callbacks=[
                  keras.callbacks.LambdaCallback(
                      on_epoch_end=lambda epoch, logs: log_data(logs)),
                  keras.callbacks.EarlyStopping(
                      patience=PARAMS['early_stopping'],
                      monitor='accuracy',
                      restore_best_weights=True),
                  keras.callbacks.LearningRateScheduler(lr_scheduler)
              ])

    # log model weights
    with tempfile.TemporaryDirectory(dir='.') as d:
        prefix = os.path.join(d, 'model_weights')
        model.save_weights(os.path.join(prefix, 'model'))
        for item in os.listdir(prefix):
            neptune.log_artifact(os.path.join(prefix, item),
                                 os.path.join('model_weights', item))

    # evaluate model
    eval_metrics = model.evaluate(test_images, test_labels, verbose=0)
    for j, metric in enumerate(eval_metrics):
        neptune.log_metric('eval_' + model.metrics_names[j], metric)
Ejemplo n.º 25
0
 def __call__(self, trainer: BaseTrainer):
     tmp = BytesIO()
     torch.save(trainer.model.state_dict(), tmp)
     tmp.seek(0)
     neptune.log_artifact(tmp, destination='last_model.ckpt')
     neptune.stop()
          batch_size=parameters['batch_size'],
          epochs=parameters['n_epochs'],
          validation_split=0.2,
          callbacks=[NeptuneMonitor()])

# Log model evaluation metrics

eval_metrics = model.evaluate(x_test, y_test, verbose=0)

for j, metric in enumerate(eval_metrics):
    neptune.log_metric('test_{}'.format(model.metrics_names[j]), metric)

# Log model weights after training

model.save('model')
neptune.log_artifact('model')

# Log predictions as table

import numpy as np
import pandas as pd
from neptunecontrib.api import log_table

y_pred_proba = model.predict(x_test)
y_pred = np.argmax(y_pred_proba, axis=1)
y_pred = y_pred
df = pd.DataFrame(
    data={
        'y_test': y_test,
        'y_pred': y_pred,
        'y_pred_probability': y_pred_proba.max(axis=1)
def send_best_checkpoint(best_checkpoint, best_step):
    if best_checkpoint and best_step != 0:
        neptune.log_artifact(best_checkpoint)
Ejemplo n.º 28
0
 def log_artifact(self, path: Path, name: str = None):
     if not self.disabled:
         path = str(path)
         neptune.log_artifact(path)
Ejemplo n.º 29
0
 def artifact_event(self, name, filename, metadata=None, content_type=None):
     neptune.log_artifact(filename)
        neptune.log_metric(f'std {dataset} RF accuracy', std_rf_acc)
        neptune.log_metric(f'std {dataset} SF accuracy', std_sf_acc)

    t_f1, p_f1 = ttest_ind(rf_f1, sf_f1)
    t_roc, p_roc = ttest_ind(rf_roc, sf_roc)
    t_acc, p_acc = ttest_ind(rf_acc, sf_acc)

    if use_neptune:
        neptune.log_metric(f'{dataset} t-stat', t_f1)
        neptune.log_metric(f'{dataset} p-val', p_f1)

        neptune.log_metric(f'{dataset} t-stat', t_roc)
        neptune.log_metric(f'{dataset} p-val', p_roc)

        neptune.log_metric(f'{dataset} t-stat', t_acc)
        neptune.log_metric(f'{dataset} p-val', p_acc)

    df.loc[d_idx] = [dataset,
                     mean_sf_f1, mean_sf_roc, mean_sf_acc, std_sf_f1, std_sf_roc, std_sf_acc,
                     mean_rf_f1, mean_rf_roc, mean_rf_acc, std_rf_f1, std_rf_roc, std_rf_acc,
                     p_f1, p_roc, p_acc,
                     sf.get_params(), rf.get_params()]

    print(df.loc[d_idx])
    df.to_csv(log_name + '.csv', index=False)

if use_neptune:
    neptune.log_artifact(log_name + '.csv')
    neptune.log_artifact(log_name + '_cv_std_score.csv')
    neptune.stop()