コード例 #1
0
 def send_image_with_proposals(self,
                               time_step,
                               im,
                               proposals,
                               shape,
                               rois=False):
     width = 340
     height = 150
     im_ = cv2.resize(im, (width, height))
     im_ = np.uint8(im_ * 255.)
     for proposal in proposals:
         x1 = int(width * proposal[0] / float(shape[1]))
         y1 = int(height * proposal[1] / float(shape[0]))
         x2 = int(width * proposal[2] / float(shape[1]))
         y2 = int(height * proposal[3] / float(shape[0]))
         cv2.rectangle(im_, (x1, y1), (x2, y2), (255, 0, 0), 1)
     pil_im = Image.fromarray(im_)
     if rois:
         neptune_im = neptune.Image(name='all the RoIs',
                                    description='region proposals',
                                    data=pil_im)
         self.im_channels[0].send(x=time_step, y=neptune_im)
     else:
         neptune_im = neptune.Image(name='chosen RoIs',
                                    description='object detections',
                                    data=pil_im)
         self.im_channels[1].send(x=time_step, y=neptune_im)
コード例 #2
0
    def _send_image_channels(self):
        self.model.eval()
        pred_masks = self.get_prediction_masks()
        self.model.train()

        for name, pred_mask in pred_masks.items():
            for i, image_duplet in enumerate(pred_mask):
                h, w = image_duplet.shape[1:]
                image_glued = np.zeros((h, 2 * w + 10))

                image_glued[:, :w] = image_duplet[0, :, :]
                image_glued[:, (w + 10):] = image_duplet[1, :, :]

                pill_image = Image.fromarray(
                    (image_glued * 255.).astype(np.uint8))
                h_, w_ = image_glued.shape
                pill_image = pill_image.resize(
                    (int(self.image_resize * w_), int(self.image_resize * h_)),
                    Image.ANTIALIAS)

                self.ctx.channel_send(
                    '{} {}'.format(self.model_name, name),
                    neptune.Image(name='epoch{}_batch{}_idx{}'.format(
                        self.epoch_id, self.batch_id, i),
                                  description="true and prediction masks",
                                  data=pill_image))

                if i == self.image_nr:
                    break
コード例 #3
0
    def on_epoch_end(self, epoch, logs={}):
        self.epoch_id += 1

        # logging numeric channels
        ctx.channel_send('Log-loss training', self.epoch_id, logs['loss'])
        ctx.channel_send('Log-loss validation', self.epoch_id,
                         logs['val_loss'])
        ctx.channel_send('Accuracy training', self.epoch_id, logs['acc'])
        ctx.channel_send('Accuracy validation', self.epoch_id, logs['val_acc'])

        # Predict the digits for images of the test set.
        validation_predictions = model.predict_classes(X_test)
        scores = model.predict(X_test)

        # Identify the incorrectly classified images and send them to Neptune Dashboard.
        image_per_epoch = 0
        for index, (prediction, actual) in enumerate(
                zip(validation_predictions, Y_test.argmax(axis=1))):
            if prediction != actual:
                if image_per_epoch == self.images_per_epoch:
                    break
                image_per_epoch += 1

                ctx.channel_send(
                    'false_predictions',
                    neptune.Image(name='[{}] pred: {} true: {}'.format(
                        self.epoch_id, letters[prediction], letters[actual]),
                                  description="\n".join([
                                      "{} {:5.1f}% {}".format(
                                          letters[i], 100 * score,
                                          "!!!" if i == actual else "")
                                      for i, score in enumerate(scores[index])
                                  ]),
                                  data=array_2d_to_image(X_test[index, :, :,
                                                                0])))
コード例 #4
0
def get_neptune_image(raw_image, epoch_number):
    neptune_image = Image.fromarray(raw_image)
    image_name = '(epoch {})'.format(epoch_number)
    return neptune.Image(
        name=image_name,
        description=u"",
        data=neptune_image)
コード例 #5
0
 def send_neptune_image(self, raw_image, name, description):
     image = Image.fromarray(raw_image)
     neptune_image = neptune.Image(
         name=name,
         description=description,
         data=image)
     self.image_channel.send(x=time.time(), y=neptune_image)
コード例 #6
0
    def on_epoch_end(self, epoch, logs={}):
        self.epoch_id += 1

        # Predict the digits for images of the test set.
        batches = 0
        image_per_epoch = 0
        for x_batch, y_batch in self.validation_gen:
            scores = self.model.predict_on_batch(x_batch)
            validation_predictions = scores.argmax(axis=-1)

            # Identify the incorrectly classified images and send them to Neptune Dashboard.
            for index, (prediction, actual) in enumerate(
                    zip(validation_predictions, y_batch.argmax(axis=1))):
                if prediction != actual:
                    if image_per_epoch == self.images_per_epoch:
                        break
                    image_per_epoch += 1

                    ctx.job.channel_send(
                        'false_predictions',
                        neptune.Image(
                            name='[{}] pred: {} true: {}'.format(
                                self.epoch_id, prefiction_names[prediction],
                                prefiction_names[actual]),
                            description="{} {:5.1f}%".format(
                                prefiction_names[0], 100 * scores[index][0]),
                            data=array_3d_to_image(x_batch[index, :, :, :])))

            batches += 1
            if batches >= self.num_samples // ctx.params.batch_size:
                break
コード例 #7
0
    def on_epoch_end(self, epoch, logs={}):
        self.epoch_id += 1

        # Predict the digits for images of the test set.
        #validation_predictions = self.model.predict_classes(self.X_test)
        #print(validation_predictions)
        scores = self.model.predict(self.X_test)
        validation_predictions = scores.argmax(axis=-1)
        #print(scores)

        # Identify the incorrectly classified images and send them to Neptune Dashboard.
        image_per_epoch = 0
        for index, (prediction, actual) in enumerate(
                zip(validation_predictions, self.Y_test.argmax(axis=1))):
            if prediction != actual:
                if image_per_epoch == self.images_per_epoch:
                    break
                image_per_epoch += 1

                ctx.job.channel_send(
                    'false_predictions',
                    neptune.Image(
                        name='[{}] pred: {} true: {}'.format(
                            self.epoch_id, prefiction_names[prediction],
                            prefiction_names[actual]),
                        description="{} {:5.1f}%".format(
                            prefiction_names[0], 100 * scores[index][0]),
                        data=array_3d_to_image(self.X_test[index, :, :, :])))
コード例 #8
0
def false_prediction_neptune_image(raw_image, index, epoch_number, prediction,
                                   actual):
    false_prediction_image = Image.fromarray(raw_image)
    image_name = '(epoch {}) #{}'.format(epoch_number, index)
    image_description = 'Predicted: {}, actual: {}.'.format(prediction, actual)
    return neptune.Image(name=image_name,
                         description=image_description,
                         data=false_prediction_image)
コード例 #9
0
 def _send_image(self, channel_name: str, img: Image, label: str):
     img = img.resize((100, 100))
     neptune_img = neptune.Image(
         name=channel_name,
         description='Label: {}'.format(label),
         data=img
     )
     self.ctx.channel_send(channel_name, neptune_img)
コード例 #10
0
def send_sequence(ctx, channel_name, sequence):
    game = morpion.Game()
    for move in sequence:
        game.make_move(move)
    grid = game.get_grid()
    grid_image = grid.get_PILImage(800, 800)
    neptune_image = neptune.Image(name="Morpion Grid",
                                  description="A sequence of length " +
                                  str(len(sequence)),
                                  data=grid_image)
    ctx.channel_send(channel_name, neptune_image)
コード例 #11
0
    def send_to_image_channel(self):
        X_test, y_test = self.test_data
        y_pred = self.image_model.predict(X_test)
        y_pred = np.argmax(y_pred, axis=1)
        y_test = np.argmax(y_test, axis=1)

        for index, (prediction, actual) in enumerate(zip(y_pred, y_test)):
            if prediction != actual:
                self.false_predictions += 1
                pill_image = Image.fromarray(np.uint8(X_test[index] * 255.))

                self.ctx.channel_send('missclassification image channel', neptune.Image(
                    name='misclassification',
                    description="pred {} true {}".format(TARGET_NAMES[prediction], TARGET_NAMES[actual]),
                    data=pill_image))
コード例 #12
0
    def trigger_step(self):
        obj = self
        # print "Neptune instance:{} of type:{} and channels: {} with getter:{} in " \
        #       "thread: {}, pid: {}".format(id(obj), type(obj), obj.image_channel, obj.get_image_channel(), threading.current_thread, os.getpid())

        if not self.channels_created:
            self._create_channels()
            self._create_charts(
                (10, 10, 6, 6, 6),
                ("rewards", "levels", "lives0", "lives1", "lives2"))
            self.channels_created = True

        step = get_global_step()
        if step % self.logging_step == 0 and step >= 100:  #We skip some preliminary steps when the averages warm up
            summaryObject = tf.Summary.FromString(
                self.trainer.summary_op.eval())
            for val in summaryObject.value:
                if val.WhichOneof(
                        'value'
                ) == 'simple_value' and val.tag in self.summariesDict:
                    channel = self.summariesDict[val.tag]
                    channel.send(x=step, y=val.simple_value)
                    # if val.WhichOneof('value') == 'histo' and val.tag in self.summariesDict:
                    #     channel = self.summariesDict[val.tag]
                    image = val.image
                    # channel.send(
                    #     x=1,
                    #     y=neptune.Image(
                    #         name='#1 image name',
                    #         description='#1 image description',
                    #         data=Image.open("/home/ubuntu/image1.jpg")))

        images_list = glob.glob("/tmp/screen*.png")
        for f in images_list:
            try:
                self.image_index += 1
                pil_image = Image.open(f)
                self.image_channel.send(x=self.image_index,
                                        y=neptune.Image(
                                            name='screenshot',
                                            description=self.image_index,
                                            data=pil_image))
                os.remove(f)
            except IOError:
                print "Something went wrong with:{}".format(f)
コード例 #13
0
def neptune_log_images(channel_name, epoch, Xs, Ys, model):
    outputs = F.softmax(model(Xs), dim=1)
    _, predictions = torch.max(outputs, 1)
    for i in range(len(Xs)):
        prediction = predictions[i]
        actual = Ys[i]
        if prediction != actual:
            ctx.channel_send(
                channel_name,
                neptune.Image(
                    name='[{}] {} X {} V'.format(epoch, categories[prediction],
                                                 categories[actual]),
                    description="\n".join([
                        "{:5.1f}% {} {}".format(100 * score, categories[idx],
                                                "!!!" if i == actual else "")
                        for idx, score in enumerate(outputs[i])
                    ]),
                    data=array_2d_to_image(Xs[i].cpu().numpy())))
コード例 #14
0
    def _send_image_channels(self, pred_masks):
        for i, image_triplet in enumerate(pred_masks):
            h, w = image_triplet.shape[1:]
            image_glued = np.zeros((h, 3 * w + 20))

            image_glued[:, :w] = image_triplet[0, :, :]
            image_glued[:, w + 10:2 * w + 10] = image_triplet[1, :, :]
            image_glued[:, 2 * w + 20:] = image_triplet[2, :, :]

            pill_image = Image.fromarray((image_glued * 255.).astype(np.uint8))
            h_, w_ = image_glued.shape
            pill_image = pill_image.resize((int(self.image_resize * w_), int(self.image_resize * h_)), Image.ANTIALIAS)

            self.ctx.channel_send("masks", neptune.Image(
                name='epoch{}_batch{}_idx{}'.format(self.epoch_id, self.batch_id, i),
                description="true and prediction masks",
                data=pill_image))

            if i == self.image_nr: break
コード例 #15
0
ファイル: callbacks.py プロジェクト: mjl68/minerva-2
    def on_epoch_end(self, *args, **kwargs):
        epoch_avg_loss = self.epoch_loss_averager.value
        epoch_avg_acc = self.epoch_acc_averager.value
        self.epoch_loss_averager.reset()
        self.epoch_acc_averager.reset()

        self.model.eval()
        val_loss, val_acc = score_model_multi_output(self.model,
                                                     self.loss_function,
                                                     self.validation_datagen)
        self.model.train()

        logs = {
            'epoch_id': self.epoch_id,
            'batch_id': self.batch_id,
            'epoch_loss': epoch_avg_loss,
            'epoch_acc': epoch_avg_acc,
            'epoch_val_loss': val_loss,
            'epoch_val_acc': val_acc
        }

        self._send_numeric_channels(logs)

        for i, (image, y_pred, y_true) in enumerate(
                predict_on_batch_multi_output(self.model,
                                              self.validation_datagen)):
            image_with_keypoints = overlay_keypoints(image, y_pred, y_true,
                                                     self.bins_nr)
            pill_image = Image.fromarray(
                (image_with_keypoints * 255.).astype(np.uint8))
            self.ctx.channel_send(
                "plotted key points",
                neptune.Image(name='epoch{}_batch{}_idx{}'.format(
                    self.epoch_id, self.batch_id, i),
                              description="true and prediction key points",
                              data=pill_image))

            if i == self.img_nr:
                break
        self.epoch_id += 1
コード例 #16
0
def send_histogram(ctx, channel_name, histogram):
    data = copy.copy(histogram)

    while data[-1] == 0:
        data.pop()

    x = [i for i in range(0, len(data))]
    y = data

    fig = plt.figure(figsize=(10, 10), dpi=80)
    ax = fig.add_subplot(111)
    ax.bar(x, y, label='Histogram')
    fig.canvas.draw()
    data = fig.canvas.tostring_rgb()

    histogram_image = Image.frombuffer("RGB", (800, 800), data, "raw", "RGB",
                                       0, 1)

    neptune_image = neptune.Image(name="Histogram",
                                  description="Histogram",
                                  data=histogram_image)

    ctx.channel_send(channel_name, neptune_image)
コード例 #17
0
def stylish_neptune_image(raw_image):
    stylish_image = Image.fromarray(raw_image)
    return neptune.Image(
        name="Kuba in GDG style",
        description="style transfered image",
        data=stylish_image)
コード例 #18
0
def neptune_image(raw_image, description):
    stylish_image = Image.fromarray(raw_image)
    return neptune.Image(name="neptune neural art",
                         description=description,
                         data=stylish_image)
コード例 #19
0
ファイル: main.py プロジェクト: pitercl/neptune-examples
def transfer_style(stats, img):
    tf.reset_default_graph()

    if params.from_content:
        initial_image = img
    else:
        initial_image = tf.truncated_normal(img.shape,
                                            mean=0.,
                                            stddev=1,
                                            dtype=tf.float32,
                                            seed=None)
    image = tf.Variable(initial_image, name='image')

    conv1_1 = conv2d(image, 64, scope='conv1_1')
    conv1_2 = conv2d(conv1_1, 64, scope='conv1_2')
    pool1 = avg_pool(conv1_2, name='pool1')

    conv2_1 = conv2d(pool1, 128, scope='conv2_1')
    conv2_2 = conv2d(conv2_1, 128, scope='conv2_2')
    pool2 = avg_pool(conv2_2, name='pool2')

    conv3_1 = conv2d(pool2, 256, scope='conv3_1')
    conv3_2 = conv2d(conv3_1, 256, scope='conv3_2')
    conv3_3 = conv2d(conv3_2, 256, scope='conv3_3')
    pool3 = avg_pool(conv3_3, name='pool3')

    conv4_1 = conv2d(pool3, 512, scope='conv4_1')
    conv4_2 = conv2d(conv4_1, 512, scope='conv4_2')
    conv4_3 = conv2d(conv4_2, 512, scope='conv4_3')
    pool4 = avg_pool(conv4_3, name='pool4')

    conv5_1 = conv2d(pool4, 512, scope='conv5_1')
    conv5_2 = conv2d(conv5_1, 512, scope='conv5_2')
    conv5_3 = conv2d(conv5_2, 512, scope='conv5_3')
    pool5 = avg_pool(conv5_3, name='pool5')

    content_style_balance_param = tf.placeholder(dtype=tf.float32, shape=[])

    loss_content = 3e-4 * tf.reduce_sum(
        (stats['content_stats'] - conv4_2)**2) / 2

    loss_style1 = tf.reduce_sum(
        (stats['style_stats1'] - gram_matrix(conv1_1))**2) / 2
    loss_style2 = tf.reduce_sum(
        (stats['style_stats2'] - gram_matrix(conv2_1))**2) / 2
    loss_style3 = tf.reduce_sum(
        (stats['style_stats3'] - gram_matrix(conv3_1))**2) / 2
    loss_style4 = tf.reduce_sum(
        (stats['style_stats4'] - gram_matrix(conv4_1))**2) / 2
    loss_style5 = tf.reduce_sum(
        (stats['style_stats5'] - gram_matrix(conv5_1))**2) / 2

    loss_style = tf.add_n(
        [loss_style1, loss_style2, loss_style3, loss_style4, loss_style5]) / 5

    loss = 2 * (content_style_balance_param * loss_content +
                loss_style) / (1.0 + content_style_balance_param)

    train_op = tf.train.AdamOptimizer(params.learning_rate).minimize(
        loss, var_list=[image])
    vgg16 = tf.train.Saver(tf.global_variables()[1:27])

    cfg = tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)

    with tf.Session(config=cfg) as sess:
        sess.run(tf.global_variables_initializer())
        vgg16.restore(sess, params.path_to_model)

        for step in xrange(params.number_of_iterations):
            _, l, ls, lc = sess.run(
                [train_op, loss, loss_style, loss_content],
                feed_dict={content_style_balance_param: content_style_balance})

            if step % 10 == 0:
                ctx.channel_send('loss', step, l)
                ctx.channel_send('loss style', step, ls)
                ctx.channel_send('loss content', step, lc)
                ctx.channel_send('content/style balance', step,
                                 content_style_balance)

                retrieved = retrieve(sess.run(image))
                retrieved.thumbnail((300, 300), Image.ANTIALIAS)
                ctx.channel_send(
                    'image_channel', step,
                    neptune.Image(name=step, description=step, data=retrieved))

        final = retrieve(sess.run(image))
        final.save('/output/final.jpg')
コード例 #20
0
mse = np.mean((regr.predict(diabetes_X_test) - diabetes_y_test)**2)
mse_channel.send(x=time.time(), y=mse)
logs_channel.send(x=2, y="Mean squared error: %.2f" % mse)

# Explained variance score: 1 is perfect prediction
logs_channel.send(x=3,
                  y='Variance score: %.2f' %
                  regr.score(diabetes_X_test, diabetes_y_test))

# Plot outputs
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test,
         regr.predict(diabetes_X_test),
         color='blue',
         linewidth=3)

# Convert the chart to an image.
image_buffer = io.BytesIO()
plt.savefig(image_buffer, format='png')
image_buffer.seek(0)

# Send the chart to Neptune through an image channel.
regression_chart_channel.send(
    x=time.time(),
    y=neptune.Image(
        name='Regression chart',
        description='A chart containing predictions and target values '
        'for diabetes progression regression. '
        'Feature used: ' + used_feature_name,
        data=Image.open(image_buffer)))