Esempio n. 1
0
    def log_figure(self, figure=None, figure_name=None, step=None):
        '''
        Logs pyplot figure

        Parameters
        ----------
        figure : pyplot figure, optional in comet mandatory in neptune.
            The default is None, uses global pyplot figure.
        figure_name : STR, optional in comet mandatory in neptune.
             The default is None.
        step : INT, optional
            An index. The default is None.

        Returns
        -------
        None.

        '''
        if self.neptune:
            if figure is not None:
                if figure_name is None:
                    print("Figure name must be given to neptune logger")
                    print("Using dummy name: figure")
                    figure_name = 'figure'
                if step is None:
                    neptune.log_image(figure_name, figure)
                else:
                    neptune.log_image(figure_name, step, y=figure)
            else:
                print("A figure must be passed to neptune logger")
        if self.comet:
            self.comet_experiment.log_figure(figure_name=figure_name,
                                             figure=figure,
                                             step=step)
Esempio n. 2
0
def show_metrics(metrics, all_fitness, all_populations, config):
    """
    Method for showing best result and best individual
    :param metrics: values of metrics
    :param all_fitness: values of all fitnesses.
    :param all_populations: all populations.
    :param config: experiment configuration
    :return:
    """
    fit_idx = np.argsort(all_fitness[-1])[::-1]
    best_fit = all_populations[-1][fit_idx[0]]

    config['experiment_time'] = datetime.now().strftime("%d-%m-%Y_%H:%M:%S")
    config['saving_path'] = resolve_saving_path(config=config)

    plot_saving_path = os.path.join(
        config['saving_path'], f'plot_fitness_{config["experiment_time"]}.png')

    plt.figure(figsize=(14, 7))
    plt.plot(metrics['generation'], metrics['best_fit'], label='Best fit')
    plt.plot(metrics['generation'], metrics['avg_fit'], label='Avg git')
    plt.xlabel('Iteration')
    plt.ylabel('Fitness value')
    plt.title(
        f"SELECTION : {config['selection']['type']}; MUTATIONS : {', '.join(config['mutations'].keys())}"
    )
    plt.suptitle(f"Experiment date and time: {config['experiment_time']}")
    plt.legend()
    plt.grid()
    plt.savefig(plot_saving_path)

    logger.info(f'best result: {max(all_fitness[-1])}')
    logger.info(f'best individual: {best_fit}')

    neptune.log_image('fitness_figure', plot_saving_path)
    def on_epoch_end(self, epoch, logs=None):
        self._verbose_print("Calculating metrics...")
        last_weights_path = self._load_weights_for_model()

        images, gt_boxes, gt_class_ids, gt_masks, results = detect(
            self.inference_model, self.dataset)
        metrics = compute_metrics(images, gt_boxes, gt_class_ids, gt_masks,
                                  results)

        pprint.pprint(metrics)

        # Images
        for i, img in enumerate(images):
            if img.shape[2] != 3:
                img = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
            visualize_result(img, results[i], gt_masks[i], scores=True)
            neptune.log_image(f'image_epoch_{epoch}', img[..., ::-1])

        # Metrics
        for key, value in metrics:
            neptune.log_metric(key, epoch, value)

        # Save best result
        name, mAP = metrics[0]
        if mAP > self.best_mAP:
            self.best_mAP = mAP
            self.best_epoch = epoch
            self.best_model = last_weights_path
Esempio n. 4
0
def test_accuracy(target_vars, saver, sess, logger, dataloader):
    X = target_vars['X']
    Y = target_vars['Y']
    LABEL = target_vars['LABEL']
    label_prediction = target_vars['predicted_label']

    set_seed(0)
    np.random.seed(0)
    random.seed(0)

    dataloader_iterator = iter(dataloader)

    output = [label_prediction]
    total, correct = 0, 0
    for i in tqdm(range(50000 // FLAGS.test_batch_size + 1)):
        try:
            data_corrupt, data, label = dataloader_iterator.next()
        except BaseException:
            dataloader_iterator = iter(dataloader)
            data_corrupt, data, label = dataloader_iterator.next()

        data_corrupt, data, label = data_corrupt.numpy(), data.numpy(
        ), label.numpy()

        feed_dict = {X: data_corrupt, Y: label}
        if FLAGS.cclass:
            feed_dict[LABEL] = label
        predicted_label = sess.run(output, feed_dict)[0]
        true_class = np.argmax(label, axis=1)
        pred_class = np.argmax(predicted_label, axis=1)
        correct += np.count_nonzero(true_class == pred_class)
        total += len(predicted_label)
        print('true class: ', true_class, '\npred_class: ', pred_class)
        print('#correct pred: ', correct, '\n#total pred: ', total)
    if FLAGS.dataset == 'cifar10':
        cifar10_map = {
            0: 'airplane',
            1: 'automobile',
            2: 'bird',
            3: 'cat',
            4: 'deer',
            5: 'dog',
            6: 'frog',
            7: 'horse',
            8: 'ship',
            9: 'truck'
        }
        imgs = data
        for idx, img in enumerate(imgs[:20, :, :, :]):
            neptune.log_image(
                'test_input_images',
                rescale_im(imgs[idx]),
                description='true label: {}({}) \npredicted label: {}'.format(
                    str(int(true_class[idx])),
                    cifar10_map[int(true_class[idx])],
                    str(int(pred_class[idx]))))

    accuracy = (correct / total)
    print('Accuracy: ', accuracy)
    return accuracy
Esempio n. 5
0
def main(arguments):
    with open(arguments.filepath, 'r') as fp:
        json_exp = json.load(fp)

    neptune.init(api_token=arguments.api_token,
                 project_qualified_name=arguments.project_name)

    with neptune.create_experiment(
            name=json_exp['name'],
            description=json_exp['description'],
            params=json_exp['params'],
            properties=json_exp['properties'],
            tags=json_exp['tags'],
            upload_source_files=json_exp['upload_source_files']):

        for name, channel_xy in json_exp['log_metric'].items():
            for x, y in zip(channel_xy['x'], channel_xy['y']):
                neptune.log_metric(name, x=x, y=y)

        for name, channel_xy in json_exp['log_text'].items():
            for x, y in zip(channel_xy['x'], channel_xy['y']):
                neptune.log_text(name, x=x, y=y)

        for name, channel_xy in json_exp['log_image'].items():
            for x, y in zip(channel_xy['x'], channel_xy['y']):
                neptune.log_image(name, x=x, y=y)

        for filename in json_exp['log_artifact']:
            neptune.log_artifact(filename)
def display(preds,
            imgs,
            obj_list,
            imshow=True,
            imwrite=False,
            send=False,
            step=0,
            tag=''):
    for i in range(len(imgs)):
        if len(preds[i]['rois']) == 0:
            continue

        imgs[i] = imgs[i].copy()

        for j in range(len(preds[i]['rois'])):
            (x1, y1, x2, y2) = preds[i]['rois'][j].astype(np.int)
            obj = obj_list[preds[i]['class_ids'][j]]
            score = float(preds[i]['scores'][j])

            plot_one_box(imgs[i], [x1, y1, x2, y2],
                         label=obj,
                         score=score,
                         color=color_list[get_index_label(obj, obj_list)])
        if imshow:
            cv2.imshow('img', imgs[i])
            cv2.waitKey(0)

        if imwrite:
            name = uuid.uuid4().hex
            os.makedirs('test/', exist_ok=True)
            cv2.imwrite(f'test/{name}.jpg', imgs[i])

        if send:
            neptune.log_image(f'image_{tag}_step_{step}', imgs[i][..., ::-1])
Esempio n. 7
0
def plot_oof(output: torch.Tensor, tile_ids: list, img: torch.Tensor,
             target: torch.Tensor, predictions_dir: str) -> None:
    output = torch.sigmoid(output)
    output = output.cpu().numpy().copy()
    target = target.cpu().numpy().copy()
    img = img.cpu().numpy().transpose(0, 2, 3, 1)
    for num, (pred, im, tar) in enumerate(zip(output, img, target), start=0):
        tile_name = tile_ids[num]
        if pred.ndim == 3:
            pred = np.squeeze(pred, axis=0)
        prob_mask = np.rint(pred * 255).astype(np.uint8)
        prob_mask_rgb = np.repeat(prob_mask[..., None], 3,
                                  2)  # repeat array for three channels
        # image
        input_image = np.rint(im * 255).astype(np.uint8)
        overlayed_im = np.rint(input_image * 0.5 + prob_mask_rgb * 0.5).clip(
            0, 255).astype(np.uint8)
        # target
        if tar.ndim == 3:
            tar = np.squeeze(tar, axis=0)
        tar = np.rint(tar * 255).astype(np.uint8)
        target_rgb = np.repeat(tar[..., None], 3, axis=2)
        plot_im = np.vstack(
            [input_image, overlayed_im, prob_mask_rgb, target_rgb])
        cv2.imwrite(f"{predictions_dir}/{tile_name}.png", plot_im)
        # send image (pass path to file)
        neptune.log_image(f'oof_{tile_name}', plot_im)
        neptune.log_artifact(plot_im, destination='oof_img')
Esempio n. 8
0
def logging_classification(y_true, y_pred, name=None):
    """ logging_classification logging metrics for classification

    - metrics: accuracy, precision, recall
    - figure: confusion matrix
    - score by metrics
    Parameters
    ----------
    y_true : 1d array-like
        Ground truth target values
    y_pred : 1d or 2d array-like
        Estimated targets

    Returns
    -------
    None
    """

    is_multiclass = False
    if len(set(y_true)) > 2:
        is_multiclass = True

    # if prediction values are  probability, choose the maximum index.
    if y_pred.ndim == 2:
        y_pred = np.argmax(y_pred, axis=1)

    # accuracy
    acc = accuracy_score(y_true, y_pred)
    log_metric('Accuracy', acc)

    # recall
    if is_multiclass:
        recall = recall_score(y_true, y_pred, average='micro')
        log_metric('Recall(micro)', recall)
    else:
        recall = recall_score(y_true, y_pred)
        log_metric('Recall', recall)

    # precision
    if is_multiclass:
        precision = precision_score(y_true, y_pred, average='micro')
        log_metric('Precision(micro)', precision)
    else:
        precision = precision_score(y_true, y_pred)
        log_metric('Recall', precision)

    # confusion matrix
    cm = confusion_matrix(y_true, y_pred)
    fig = plot_confusion_matrix(cm)
    log_image('performance charts', fig)

    # other metrics
    for metric_name, values in zip(
        ['precision', 'recall', 'fbeta_score', 'support'],
            precision_recall_fscore_support(y_true, y_pred)):
        for i, value in enumerate(values):
            log_metric('{}_class_{}_sklearn'.format(metric_name, i), value)
    return None
Esempio n. 9
0
 def on_eval_begin(self, trainer):
     if self.vis_function:
         vis = self.vis_function(trainer.out['inputs'],
                                 trainer.out['outputs'],
                                 trainer.out['targets'])
         for name, value in vis.items():
             if value.shape[0] > 512:
                 value = Image.fromarray(value)
                 value.thumbnail((512, 512))
             neptune.log_image(name, value.transpose(1, 2, 0))
Esempio n. 10
0
def plot_pred(training_data_loader, testing_data_loader, net_g, device, epoch):
    plt.ioff()

    fig, axes = plt.subplots(2, 4, figsize=(10, 5))
    data, nrooms, name = get_data_to_plot(iter(training_data_loader), net_g)
    axes[0] = plot_sample(axes[0], data, 'train', nrooms, name, epoch)
    data, nrooms, name = get_data_to_plot(iter(testing_data_loader), net_g)
    axes[1] = plot_sample(axes[1], data, 'test', nrooms, name, epoch)

    neptune.log_image('pred', fig)
    del fig, axes
Esempio n. 11
0
 def __call__(self, trainer: BaseTrainer):
     print('Samples drawn from model', flush=True)
     samples = trainer.model.sample(self.n_samples)
     figure, axs = plt.subplots(
         *self.sample_grid_shape,
         figsize=(self.figsize_mult * self.sample_grid_shape[1],
                  self.figsize_mult * self.sample_grid_shape[0]))
     for i, ax in enumerate(axs.flat):
         ax.imshow(samples[i].squeeze().detach().cpu())
     neptune.log_image('samples', figure)
     figure.show()
     plt.show()
Esempio n. 12
0
        def add_log(self, img, counter=None, name=None):
            '''
            Intention is to generalize this to an abstract class for logging to any experiment management platform (e.g. neptune, mlflow, etc)

            Currently takes a filepath pointing to an image file and logs to current neptune experiment.
            '''
            scaled_img = (img - np.min(img))/(np.max(img) - np.min(img)) * 255.0
            scaled_img = scaled_img.astype(np.uint32)

            neptune.log_image(log_name= name or self.name,
                              x=counter,
                              y=scaled_img)
            return scaled_img
Esempio n. 13
0
def build_experiment(conf, logpath, resultname, csvfile):

    with open(csvfile) as stats_f:
        params = stats_f.readline().strip().split(",")
        stats = stats_f.readlines()

    for s in stats:
        elems = s.strip().split(",")
        counter, scalar_fit, priority_fit = [elems[0], elems[4], elems[5]]
        print(counter, scalar_fit, priority_fit)

    exp_config = read_toml(conf)
    exp_number = 1

    champ = read_berb_log(logpath)
    exp_name = champ['chromosome']['name']
    exp_desc = str(champ['tag'])

    exp_params = {
        "some_param": 0.1,
        "other_param": 128,
        "yet_another_param": 31337
    }

    exp_log_artifact = ["data/champion_statistics.csv", "mean_statistics.csv"]

    #Neptune init
    neptune.init('special-circumstances/sandbox', api_token=None)

    neptune.create_experiment(name=exp_name, params=exp_params)

    for s in stats:
        elems = s.strip().split(",")
        counter, scalar_fit, priority_fit = [elems[0], elems[4], elems[5]]
        neptune.log_metric(params[0], int(counter))
        neptune.log_metric(params[4], float(scalar_fit))
        neptune.log_metric(params[5], float(priority_fit))

    neptune.log_image(
        'pleasures_1',
        "/home/armadilo/projects/neptune/data/clamp-liked-zeros-count-pleasures.png"
    )
    neptune.log_image(
        'pleasures_2',
        "/home/armadilo/projects/neptune/data/lamas-koala-zero-count-pleasures.png"
    )
    neptune.send_artifact(
        '/home/armadilo/projects/neptune/data/champion_statistics.csv')
    neptune.send_artifact(
        '/home/armadilo/projects/neptune/data/mean_statistics.csv')
Esempio n. 14
0
 def __call__(self, trainer: BaseTrainer):
     print('Reconstructed images', flush=True)
     batch_x, batch_y = next(iter(trainer.val_dataloader))
     images = batch_x[:self.num_recos].to(trainer.device)
     figure, axs = plt.subplots(2,
                                self.num_recos,
                                figsize=(self.figsize_mult * self.num_recos,
                                         self.figsize_mult * 2))
     for i in range(self.num_recos):
         axs[0, i].imshow(images[i].squeeze().detach().cpu())
         axs[1, i].imshow(
             trainer.model(images[i:i + 1]).squeeze().detach().cpu())
     neptune.log_image('reconstructions', figure)
     figure.show()
     plt.show()
Esempio n. 15
0
def plot_training(models, model_type, score):

    if type(models) != list:
        models = [models]
    data = pd.DataFrame()

    if model_type == "keras":
        for i, m in enumerate(models):
            aux_train = pd.DataFrame()
            aux_val = pd.DataFrame()
            aux_train["model_" + str(i) + "_train"] = m.history["val_loss"]
            aux_val["model_" + str(i) + "_val"] = m.history["loss"]
            data = pd.concat([data, aux_train, aux_val], axis=1)

    if model_type == "lgb":
        for i, m in enumerate(models):
            aux_train = pd.DataFrame()
            aux_val = pd.DataFrame()
            aux_train["model_" + str(i) +
                      "_train"] = m.evals_result_["valid_1"][score]
            aux_val["model_" + str(i) +
                    "_val"] = m.evals_result_["valid_0"][score]
            data = pd.concat([data, aux_train, aux_val], axis=1)

    val_cols = [col for col in data.columns if "val" in col]
    train_cols = [col for col in data.columns if "train" in col]
    data["val_" + score] = data[val_cols].apply("mean", axis=1)
    data["train_" + score] = data[train_cols].apply("mean", axis=1)

    fig = plt.figure(figsize=(12, 8))

    for i in val_cols:
        plt.plot(data[i], label=i, color="red")
    for i in train_cols:
        plt.plot(data[i], label=i, color="blue")

    plt.plot(data["val_" + score], color="red", linewidth=3)
    plt.plot(data["train_" + score], color="blue", linewidth=3)

    plt.legend(loc="best")
    plt.title("Train and validation " + score)

    for n in range(len(data["val_" + score])):
        neptune.log_metric("val_" + score, data["val_" + score][n])
        neptune.log_metric("train_" + score, data["train_" + score][n])

    neptune.log_image("charts", fig)
    gc.collect()
Esempio n. 16
0
    def on_epoch_end(self, epoch, logs={}):
        for log_name, log_value in logs.items():
            neptune.log_metric(f'epoch_{log_name}', log_value)

        y_pred = np.asarray(self.model.predict(self.validation_data[0]))
        y_true = self.validation_data[1]

        y_pred_class = np.argmax(y_pred, axis=1)

        fig, ax = plt.subplots(figsize=(16, 12))
        plot_confusion_matrix(y_true, y_pred_class, ax=ax)
        neptune.log_image('confusion_matrix', fig)

        fig, ax = plt.subplots(figsize=(16, 12))
        plot_roc(y_true, y_pred, ax=ax)
        neptune.log_image('roc_curve', fig)
Esempio n. 17
0
def logging_regression(y_true, y_pred):
    """logging_regression logging metrics for regression problem

    - rmse
    - mae
    - r2
    - explained variance
    - yyplot

    Parameters
    ----------
    y_true : 1d array like
        ground truth target value
    y_pred : 1d array like
        estimated target value

    Returns
    -------
    None

    """
    # rmse
    rmse = np.sqrt(mean_squared_error(y_true, y_pred))
    log_metric('RMSE', rmse)

    # mae
    mae = mean_absolute_error(y_true, y_pred)
    log_metric('MAE', mae)

    # r2
    r2 = r2_score(y_true, y_pred)
    log_metric('R2', r2)

    # explained variance
    evs = explained_variance_score(y_true, y_pred)
    log_metric('Explained Variance', evs)

    # 相関
    corr = np.corrcoef(y_true, y_pred)[0, 1]
    log_metric('corr', corr)

    # scatter plot
    fig = yyplot(y_true, y_pred)
    log_image('performance charts', fig)

    return None
Esempio n. 18
0
    def log_confusion_matrix(self, model, imgs, labels, epoch, norm_cm=False):

        pred_labels = model.predict_classes(
            imgs
        )  # = tf.reshape(imgs, (-1,PARAMS['image_size'], PARAMS['num_channels'])))
        pred_labels = pred_labels[:, None]

        con_mat = tf.math.confusion_matrix(labels=labels,
                                           predictions=pred_labels,
                                           num_classes=len(
                                               self.classes)).numpy()
        if norm_cm:
            con_mat = np.around(con_mat.astype('float') /
                                con_mat.sum(axis=1)[:, np.newaxis],
                                decimals=2)
        con_mat_df = pd.DataFrame(con_mat,
                                  index=self.classes,
                                  columns=self.classes)

        figure = plt.figure(figsize=(16, 16))
        sns.heatmap(con_mat_df, annot=True, cmap=plt.cm.Blues)
        plt.tight_layout()
        plt.ylabel('True label')
        plt.xlabel('Predicted label')

        buf = io.BytesIO()
        plt.savefig(buf, format='png')
        buf.seek(0)

        image = tf.image.decode_png(buf.getvalue(), channels=4)
        image = tf.expand_dims(image, 0)

        with self.file_writer.as_default(
        ), tf.contrib.summary.always_record_summaries():
            tf.contrib.summary.image(name='val_confusion_matrix',
                                     tensor=image,
                                     step=self._counter)

        neptune.log_image(log_name='val_confusion_matrix',
                          x=self._counter,
                          y=figure)
        plt.close(figure)

        self._counter += 1

        return image
Esempio n. 19
0
        def add_log(self, img, counter=None, name=None):
            '''
            Intention is to generalize this to an abstract class for logging to any experiment management platform (e.g. neptune, mlflow, etc)

            Currently takes a filepath pointing to an image file and logs to current neptune experiment.
            '''

            # scaled_images = (img - tf.math.reduce_min(img))/(tf.math.reduce_max(img) - tf.math.reduce_min(img))
            # keep = 0
            # scaled_images = tf.image.convert_image_dtype(tf.squeeze(scaled_images[keep,:,:,:]), dtype=tf.uint8)
            # scaled_images = tf.expand_dims(scaled_images, 0)
            # tf.summary.image(name=self.name, data=scaled_images, step=self._counter, max_outputs=self.max_images)


            scaled_img = (img - np.min(img))/(np.max(img) - np.min(img)) * 255.0
            scaled_img = scaled_img.astype(np.uint32)

            neptune.log_image(log_name= name or self.name,
                              x=counter,
                              y=scaled_img)
            return scaled_img
Esempio n. 20
0
    def on_epoch_end(self, trainer):
        for metric, value in trainer.metrics.items():
            if 'val' in metric:
                neptune.log_metric(metric,
                                   value,
                                   timestamp=trainer.global_step)

        if self.vis_function:
            vis = self.vis_function(trainer.out['inputs'],
                                    trainer.out['outputs'],
                                    trainer.out['targets'])
            for name, value in vis.items():
                if value.shape[0] > 512:
                    value = Image.fromarray(value)
                    value.thumbnail((512, 512))
                neptune.log_image(name, value.transpose(1, 2, 0))

        cb = self.get_callback(trainer.callbacks, ConfusionMatrix)
        if cb:
            train_vis = plot_confusion_matrix(cb.train_matrix,
                                              cb.class_names,
                                              as_array=True)
            val_vis = plot_confusion_matrix(cb.val_matrix,
                                            cb.class_names,
                                            as_array=True)
            neptune.log_image('train_confusion_matrix',
                              train_vis.transpose(1, 2, 0),
                              timestamp=trainer.global_step)
            neptune.log_image('val_confusion_matrix',
                              val_vis.transpose(1, 2, 0),
                              timestamp=trainer.global_step)
Esempio n. 21
0
    def log_series(self):
        # floats
        neptune.log_metric("m1", 1)
        neptune.log_metric("m1", 2)
        neptune.log_metric("m1", 3)
        neptune.log_metric("m1", 2)
        neptune.log_metric("nested/m1", 1)

        # texts
        neptune.log_text("m2", "a")
        neptune.log_text("m2", "b")
        neptune.log_text("m2", "c")

        # images
        # `image_name` and `description` will be lost
        neptune.log_image("g_img",
                          self.img_path,
                          image_name="name",
                          description="desc")
        neptune.log_image("g_img", self.img_path)

        # see what we've logged
        logs = neptune.get_experiment().get_logs()
        print(f"Logs: {logs}")
Esempio n. 22
0
    def on_epoch_end(self, epoch, logs=None):
        self.exp.send_metric('epoch end loss', logs['loss'])

        msg_loss = 'End of epoch {}, categorical crossentropy loss is {:.4f}'.format(
            epoch, logs['loss'])
        self.exp.send_text(channel_name='loss information',
                           x=epoch,
                           y=msg_loss)

        if self.current_epoch % 5 == 0:
            # Reconstruction
            n_imgs = 10  # how many images we will display
            x_test_decoded = self.model.predict(self.X_images[:n_imgs])
            fig = plt.figure(figsize=(20, 4))
            for i in range(n_imgs):
                # display original
                ax = plt.subplot(2, n_imgs, i + 1)
                plt.imshow(self.X_images[i].reshape(self.img_size,
                                                    self.img_size))
                plt.gray()
                ax.get_xaxis().set_visible(False)
                ax.get_yaxis().set_visible(False)

                # display reconstruction
                ax = plt.subplot(2, n_imgs, i + 1 + n_imgs)
                plt.imshow(x_test_decoded[i])
                plt.gray()
                ax.get_xaxis().set_visible(False)
                ax.get_yaxis().set_visible(False)

            plt.title("epoch #{}".format(epoch))
            neptune.log_image('predictions', fig)

            plt.close('all')

        self.current_epoch += 1
y_test_pred = np.asarray(model.predict(x_test))
y_test_pred_class = np.argmax(y_test_pred, axis=1)

from sklearn.metrics import f1_score

f1 = f1_score(y_test, y_test_pred_class, average='micro')

neptune.log_metric('test_f1', f1)

import matplotlib.pyplot as plt
from scikitplot.metrics import plot_confusion_matrix, plot_roc

fig, ax = plt.subplots(figsize=(16, 12))
plot_confusion_matrix(y_test, y_test_pred_class, ax=ax)
neptune.log_image('diagnostic_charts', fig)

fig, ax = plt.subplots(figsize=(16, 12))
plot_roc(y_test, y_test_pred, ax=ax)
neptune.log_image('diagnostic_charts', fig)

model.save('my_model.h5')
neptune.log_artifact('my_model.h5')

# tests
current_exp = neptune.get_experiment()

correct_logs = [
    'batch_loss', 'batch_accuracy', 'epoch_loss', 'epoch_accuracy',
    'epoch_val_loss', 'epoch_val_accuracy', 'test_f1', 'diagnostic_charts'
]
Esempio n. 24
0
        sf_db[i] = davies_bouldin_score(X, sf_clusters)

        # fit_predict second algorithm
        other_clusters = hdbscan.HDBSCAN().fit_predict(X)
        other_silhouette[i] = silhouette_score(X, other_clusters)
        other_db[i] = davies_bouldin_score(X, other_clusters)

        if plot:
            if X.shape[1] > 2:
                X = PCA(random_state=42, n_components=2).fit_transform(X)
            figure, axs = plt.subplots(1, 2, figsize=(10, 5))
            axs[0].scatter(X[:, 0], X[:, 1], c=sf_clusters, cmap='Set1', alpha=0.6)
            axs[0].set_title('SimilarityForestCluster')
            axs[1].scatter(X[:, 0], X[:, 1], c=other_clusters, cmap='Set1', alpha=0.6)
            axs[1].set_title(f'{other_algorithm}')
            neptune.log_image(f'{dataset} Plot', plt.gcf())
            plt.clf()
            plt.close()


    # log results
    sf_mean_silhouette = np.mean(sf_silhouette)
    sf_mean_db = np.mean(sf_db)
    neptune.log_metric(f'{dataset} SF silhouette', sf_mean_silhouette)
    neptune.log_metric(f'{dataset} SF Davies Bouldin', sf_mean_db)

    other_mean_silhouette = np.mean(other_silhouette)
    other_mean_db = np.mean(other_db)
    neptune.log_metric(f'{dataset} {other_algorithm} silhouette', other_mean_silhouette)
    neptune.log_metric(f'{dataset} {other_algorithm} Davies Bouldin', other_mean_db)
Esempio n. 25
0
import neptune
import numpy as np

# Select project
neptune.init('neptune-workshops/AII-Optimali')

# Define parameters
PARAMS = {'decay_factor': 0.5, 'n_iterations': 117}

# Create experiment
neptune.create_experiment(name='minimal-extended', params=PARAMS)

# Log some metrics
for i in range(1, PARAMS['n_iterations']):
    neptune.log_metric('iteration', i)
    neptune.log_metric('loss', PARAMS['decay_factor'] / i**0.5)
    neptune.log_text('text_info', 'some value {}'.format(0.95 * i**2))

# Add tag to the experiment
neptune.append_tag('quick_start')

# Log some images
for j in range(5):
    array = np.random.rand(10, 10, 3) * 255
    array = np.repeat(array, 30, 0)
    array = np.repeat(array, 30, 1)
    neptune.log_image('mosaics', array)
Esempio n. 26
0
                          index=X_val.index,
                          columns=['prediction'])
y_pred_val_filename = f'data/preds/h{forecast_horizon}_y_pred_val.parquet'
y_pred_val.to_parquet(output_filename)
# save test predictions
y_pred_test = model.predict(X_test, num_iteration=model.best_iteration)
y_pred_test = pd.DataFrame(y_pred_test,
                           index=X_test.index,
                           columns=['prediction'])
y_pred_test_filename = f'data/preds/h{forecast_horizon}_y_pred_test.parquet'
y_pred_test.to_parquet(output_filename)

if NEPTUNE:
    neptune.log_metric(f"h{forecast_horizon}_val_rmse", val_rmse)
    neptune.log_artifact(model_filename)
    neptune.log_image(importance_filename, fig)
    neptune.log_artifact(y_pred_val_filename)
    neptune.log_artifact(y_pred_test_filename)
    neptune.stop()


def get_y_weights(y: pd.Series, normalize=False):
    """
    For each series, compute the denominator in the MSSE loss function, i.e. the
    day-to-day variations squared, averaged by number of training observations.
    The weights can be normalized so that they add up to 1.
    This is provided to the lgb.Dataset for computing loss function and evaluation metric
    """
    scales = (y.unstack(level='date').diff(axis=1)**2).mean(axis=1)
    scales = scales.replace(0, pd.NA)
    weights = 1 / scales
    loss.backward()
    optimizer.step()

    # log loss
    neptune.log_metric('batch_loss', loss)

    # log predicted images
    if batch_idx % 50 == 1:
        for image, prediction in zip(data, outputs):
            description = '\n'.join([
                'class {}: {}'.format(i, pred)
                for i, pred in enumerate(F.softmax(prediction))
            ])
            neptune.log_image('predictions',
                              image.squeeze(),
                              description=description)

    if batch_idx == PARAMS['iterations']:
        break

## Log model weights

torch.save(model.state_dict(), 'model_dict.ckpt')

# log model
neptune.log_artifact('model_dict.ckpt')

# Explore results in the Neptune UI

# tests
neptune.create_experiment('tensorflow-keras-advanced', params=PARAMS)

model.fit(x_train, y_train,
          epochs=PARAMS['epochs'],
          batch_size=PARAMS['batch_size'],
          callbacks=[NeptuneMonitor()])

## Log image predictions

x_test_sample = x_test[:100]
y_test_sample_pred = model.predict(x_test_sample)

for image, y_pred in zip(x_test_sample, y_test_sample_pred):
    description = '\n'.join(['class {}: {}'.format(i, pred)
                                for i, pred in enumerate(y_pred)])
    neptune.log_image('predictions',
                      image,
                      description=description)

## Log model weights

model.save('my_model')

# log model
neptune.log_artifact('my_model')

# Explore results in the Neptune UI

## Stop logging

neptune.stop()
        'Shirt', 'Sneaker', 'Bag', 'Ankle boot'
    ]

    neptune.set_property('class_names', class_names)

    for j, class_name in enumerate(class_names):
        plt.figure(figsize=(10, 10))
        label_ = np.where(train_labels == j)
        for i in range(9):
            plt.subplot(3, 3, i + 1)
            plt.xticks([])
            plt.yticks([])
            plt.grid(False)
            plt.imshow(train_images[label_[0][i]], cmap=plt.cm.binary)
            plt.xlabel(class_names[j])
        neptune.log_image('example_images', plt.gcf())

    # model
    model = keras.Sequential([
        keras.layers.Flatten(input_shape=(28, 28)),
        keras.layers.Dense(PARAMS['dense_units'],
                           activation=PARAMS['activation']),
        keras.layers.Dropout(PARAMS['dropout']),
        keras.layers.Dense(PARAMS['dense_units'],
                           activation=PARAMS['activation']),
        keras.layers.Dropout(PARAMS['dropout']),
        keras.layers.Dense(PARAMS['dense_units'],
                           activation=PARAMS['activation']),
        keras.layers.Dropout(PARAMS['dropout']),
        keras.layers.Dense(10, activation='softmax')
    ])
df = pd.DataFrame(
    data={
        'y_test': y_test,
        'y_pred': y_pred,
        'y_pred_probability': y_pred_proba.max(axis=1)
    })
log_table('predictions', df)

# Log model performance visualizations

import matplotlib.pyplot as plt
from scikitplot.metrics import plot_roc, plot_precision_recall

fig, ax = plt.subplots()
plot_roc(y_test, y_pred_proba, ax=ax)
neptune.log_image('model-performance-visualizations', fig, image_name='ROC')

fig, ax = plt.subplots()
plot_precision_recall(y_test, y_pred_proba, ax=ax)
neptune.log_image('model-performance-visualizations',
                  fig,
                  image_name='precision recall')
plt.close('all')

# Log train data sample (images per class)

for j, class_name in enumerate(class_names):
    plt.figure(figsize=(10, 10))
    label_ = np.where(y_train == j)
    for i in range(9):
        plt.subplot(3, 3, i + 1)