예제 #1
0
import numpy as np
import IPython.display as display

from PIL import Image

cat_in_snow = tf.keras.utils.get_file(
    '320px-Felis_catus-cat_on_snow.jpg',
    'https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg'
)
williamsburg_bridge = tf.keras.utils.get_file(
    '194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg',
    'https://storage.googleapis.com/download.tensorflow.org/example_images/194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg'
)

display.display(display.Image(filename=cat_in_snow))
display.display(
    display.HTML(
        'Image cc-by: <a "href=https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg">Von.grzanka</a>'
    ))
img = Image.open(cat_in_snow)
print(cat_in_snow)
#img.show()

# The following functions can be used to convert a value to a type compatible
# with tf.Example.


def _bytes_feature(value):
    """Returns a bytes_list from a string / byte."""
    if isinstance(value, type(tf.constant(0))):
    plt.close(fig)
    return filename
### mkdir tmp # a temporary folder to contain plot jpegs
# let's take a quick look at the 3d Plot
first_sample_token = my_scene['first_sample_token']
sample = lyftdata.get('sample', first_sample_token)
lidar_token = sample['data']['LIDAR_TOP']
filename = draw_3d_plot(0, lidar_token)
Image.open(filename)
frames = []
first_sample_token = my_scene['first_sample_token']
token = first_sample_token
for i in tqdm(range(my_scene['nbr_samples'])):
    sample = lyftdata.get('sample', token)
    lidar_token = sample['data']['LIDAR_TOP']
    filename = draw_3d_plot(i, lidar_token)
    frames += [filename]
    token = sample['next']
#     break
clip = ImageSequenceClip(frames, fps=5)
clip.write_gif('pcl_data.gif', fps=5);
from IPython.core.interactiveshell import InteractiveShell
InteractiveShell.ast_node_interactivity = "all"
from IPython import display
with open('pcl_data.gif','rb') as f:
    display.Image(data=f.read(), format='png')
### rm -r tmp/*
# The rendering command below is commented out because it tends to crash in notebooks,
# lyftdata.render_scene(my_scene['token'])
HTML('<iframe width="700" height="500" src="https://www.youtube.com/embed/Vs8H8Fs-zTs" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
HTML('<iframe width="700" height="430" src="https://www.youtube.com/embed/ivmiN4zvRTo" frameborder="0" allow="accelerometer; autoplay; encrypted-media; gyroscope; picture-in-picture" allowfullscreen></iframe>')
예제 #3
0
def plot_model(model,
               to_file='model.png',
               show_shapes=False,
               show_dtype=False,
               show_layer_names=True,
               rankdir='TB',
               expand_nested=False,
               dpi=96):
    """Converts a Keras model to dot format and save to a file.

  Example:

  ```python
  input = tf.keras.Input(shape=(100,), dtype='int32', name='input')
  x = tf.keras.layers.Embedding(
      output_dim=512, input_dim=10000, input_length=100)(input)
  x = tf.keras.layers.LSTM(32)(x)
  x = tf.keras.layers.Dense(64, activation='relu')(x)
  x = tf.keras.layers.Dense(64, activation='relu')(x)
  x = tf.keras.layers.Dense(64, activation='relu')(x)
  output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x)
  model = tf.keras.Model(inputs=[input], outputs=[output])
  dot_img_file = '/tmp/model_1.png'
  tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
  ```

  Args:
    model: A Keras model instance
    to_file: File name of the plot image.
    show_shapes: whether to display shape information.
    show_dtype: whether to display layer dtypes.
    show_layer_names: whether to display layer names.
    rankdir: `rankdir` argument passed to PyDot,
        a string specifying the format of the plot:
        'TB' creates a vertical plot;
        'LR' creates a horizontal plot.
    expand_nested: Whether to expand nested models into clusters.
    dpi: Dots per inch.

  Returns:
    A Jupyter notebook Image object if Jupyter is installed.
    This enables in-line display of the model plots in notebooks.
  """
    dot = model_to_dot(model,
                       show_shapes=show_shapes,
                       show_dtype=show_dtype,
                       show_layer_names=show_layer_names,
                       rankdir=rankdir,
                       expand_nested=expand_nested,
                       dpi=dpi)
    to_file = path_to_string(to_file)
    if dot is None:
        return
    _, extension = os.path.splitext(to_file)
    if not extension:
        extension = 'png'
    else:
        extension = extension[1:]
    # Save image to disk.
    dot.write(to_file, format=extension)
    # Return the image as a Jupyter Image object, to be displayed in-line.
    # Note that we cannot easily detect whether the code is running in a
    # notebook, and thus we always return the Image if Jupyter is available.
    if extension != 'pdf':
        try:
            from IPython import display
            return display.Image(filename=to_file)
        except ImportError:
            pass
예제 #4
0
if __name__ == "__main__":
    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    trainer = trainer_init(config)
    trainer.train()

# %%
# @markdown (double click to see the code)

# example tensorboard visualization
# for more details refer to [link](https://github.com/facebookresearch/habitat-lab/tree/master/habitat_baselines#additional-utilities).

try:
    from IPython import display

    with open("./res/img/tensorboard_video_demo.gif", "rb") as f:
        display.display(display.Image(data=f.read(), format="png"))
except ImportError:
    pass
# %% [markdown]
# ## Key Concepts
#
# All the concepts link to their definitions:
#
# 1. [`habitat.sims.habitat_simulator.HabitatSim`](https://github.com/facebookresearch/habitat-lab/blob/master/habitat/sims/habitat_simulator/habitat_simulator.py#L159)
# Thin wrapper over `habitat_sim` providing seamless integration with experimentation framework.
#
#
# 2. [`habitat.core.env.Env`](https://github.com/facebookresearch/habitat-lab/blob/master/habitat/core/env.py)
# Abstraction for the universe of agent, task and simulator. Agents that you train and evaluate operate inside the environment.
#
#
예제 #5
0
def test_base64image():
    display.Image(
        "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABAQMAAAAl21bKAAAAA1BMVEUAAACnej3aAAAAAWJLR0QAiAUdSAAAAAlwSFlzAAALEwAACxMBAJqcGAAAAAd0SU1FB94BCRQnOqNu0b4AAAAKSURBVAjXY2AAAAACAAHiIbwzAAAAAElFTkSuQmCC"
    )
예제 #6
0
파일: usage.py 프로젝트: zkm98/test

train(train_dataset, EPOCHS)

checkpoint.restore(tf.train.latest_checkpoint(checkpoint_dir))
def display_image(epoch_no):
  return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))


display_image(EPOCHS)

with imageio.get_writer('dcgan.gif', mode='I') as writer:
  filenames = glob.glob('image*.png')
  filenames = sorted(filenames)
  last = -1
  for i,filename in enumerate(filenames):
    frame = 2*(i**0.5)
    if round(frame) > round(last):
      last = frame
    else:
      continue
    image = imageio.imread(filename)
    writer.append_data(image)
  image = imageio.imread(filename)
  writer.append_data(image)
    
# A hack to display the GIF inside this notebook
os.rename('dcgan.gif', 'dcgan.gif.png')

display.Image(filename="dcgan.gif.png")
예제 #7
0
attributions = (data_root / "LICENSE.txt").read_text().splitlines()[4:]
attributions = [line.split(' CC-BY') for line in attributions]
attributions = dict(attributions)

import IPython.display as display


def caption_image(image_path):
    image_rel = pathlib.Path(image_path).relative_to(data_root)
    return "Image (CC BY 2.0) " + ' - '.join(
        attributions[str(image_rel)].split(' - ')[:-1])


for n in range(3):
    image_path = random.choice(all_image_paths)
    display.display(display.Image(image_path))
    print(caption_image(image_path))
    print()

##############################
img_path = all_image_paths[0]


def preprocess_image(path):
    image = tf.read_file(path)
    image = tf.image.decode_jpeg(image, channels=3)
    image = tf.image.resize_images(image, [192, 192])
    image /= 255.0  # normalize to [0,1] range


import matplotlib.pyplot as plt
예제 #8
0
with tf.python_io.TFRecordWriter('assets/images.tfrecords') as writer:
    for filename, label in image_labels.items():
        image_string = open(filename, 'rb').read()
        tf_example = image_example(image_string, label)
        writer.write(tf_example.SerializeToString())

# Read tfrecords using tf.data.
raw_image_dataset = tf.data.TFRecordDataset('assets/images.tfrecords')

# Create a dictionary describing the features.
image_feature_description = {
    'height': tf.FixedLenFeature([], tf.int64),
    'width': tf.FixedLenFeature([], tf.int64),
    'depth': tf.FixedLenFeature([], tf.int64),
    'label': tf.FixedLenFeature([], tf.int64),
    'image_raw': tf.FixedLenFeature([], tf.string),
}


# Parse the input tf.Example proto using the dictionary above.
def _parse_image_function(example_proto):
    return tf.parse_single_example(example_proto, image_feature_description)


parsed_image_dataset = raw_image_dataset.map(_parse_image_function)
parsed_image_dataset

for image_features in parsed_image_dataset:
    image_raw = image_features['image_raw'].numpy()
    display.display(display.Image(data=image_raw))
# In[5]:

iframe(refs['torus'])

# In[6]:

get_ipython().run_cell_magic(
    'display', 'Markdown _',
    '{{pandas.read_html(\n    "<table>%s</table>" % \n    requests.get(refs[\'kernels\']).text.split(\'<table>\')[1].split(\'</table>\')[0]\n)[0].to_html()}}'
)

# In[7]:

# Model-View-Controller wiki
display.Image(refs['mvc'])

# In[8]:

display.Image(refs['workflow'])

# In[9]:

iframe(refs['typography'])

# In[10]:

iframe(refs['pn_styleguide'])

# In[11]:
예제 #10
0
anim_file = 'dcgan.gif'

folder_exist = os.path.isdir(model_save_dir)
if not folder_exist:
    os.makedirs(model_save_dir)
    print("Created folder ", img_dir)

generator.save(model_save_dir + '/' + model_name + '.h5')
print('Model saved in: ' + model_save_dir + '/' + model_name + '.h5')

with imageio.get_writer(img_dir + '/' + anim_file, mode='I') as writer:
    filenames = glob.glob(img_dir + '/image*.png')
    filenames = sorted(filenames)
    last = -1
    for i, filename in enumerate(filenames):
        frame = 2 * (i**0.5)
        if round(frame) > round(last):
            last = frame
        else:
            continue
        image = imageio.imread(filename)
        writer.append_data(image)
    image = imageio.imread(filename)
    writer.append_data(image)

import IPython

if IPython.version_info > (6, 2, 0, ''):
    display.Image(filename=img_dir + '/' + anim_file)
예제 #11
0
    def sweep(self, set_param, vals, inter_delay=None):
        if inter_delay is not None:
            d = len(vals) * inter_delay
            h, m, s = int(d / 3600), int(d / 60) % 60, int(d) % 60
            print(f'Minimum duration: {h}h {m}m {s}s')

        fig = plt.figure(figsize=(4 *
                                  (2 + len(self._params) + len(self._sr830s)),
                                  4))
        grid = plt.GridSpec(4,
                            1 + len(self._params) + len(self._sr830s),
                            hspace=0)
        setax = fig.add_subplot(grid[:, 0])
        setax.set_xlabel('Time (s)')
        setax.set_ylabel(f'{set_param.label} ({set_param.unit})')
        setaxline = setax.plot([], [])[0]

        paxs = []
        plines = []
        for i, p in enumerate(self._params):
            ax = fig.add_subplot(grid[:, 1 + i])
            ax.set_xlabel(f'{set_param.label} ({set_param.unit})')
            ax.set_ylabel(f'{p.label} ({p.unit})')
            paxs.append(ax)
            plines.append(ax.plot([], [])[0])

        laxs = []
        llines = []
        for i, (l, name, _) in enumerate(self._sr830s):
            ax0 = fig.add_subplot(grid[:-1, 1 + len(self._params) + i])
            ax0.set_ylabel(f'{name} (V)')
            fmt = ScalarFormatter()
            fmt.set_powerlimits((-3, 3))
            ax0.get_yaxis().set_major_formatter(fmt)
            laxs.append(ax0)
            llines.append(ax0.plot([], [])[0])
            ax1 = fig.add_subplot(grid[-1, 1 + len(self._params) + i],
                                  sharex=ax0)
            ax1.set_ylabel('Phase (°)')
            ax1.set_xlabel(f'{set_param.label} ({set_param.unit})')
            laxs.append(ax1)
            llines.append(ax1.plot([], [])[0])
            plt.setp(ax0.get_xticklabels(), visible=False)

        fig.tight_layout()
        fig.show()

        meas = self._create_measurement(set_param)
        with meas.run() as datasaver:
            t0 = time.monotonic()
            for setpoint in vals:
                t = time.monotonic() - t0
                set_param.set(setpoint)

                setaxline.set_xdata(np.append(setaxline.get_xdata(), t))
                setaxline.set_ydata(np.append(setaxline.get_ydata(), setpoint))
                setax.relim()
                setax.autoscale_view()

                if inter_delay is not None:
                    plt.pause(inter_delay)

                data = [(set_param, setpoint), ('time', t)]
                for i, p in enumerate(self._params):
                    v = p.get()
                    data.append((p, v))
                    plines[i].set_xdata(
                        np.append(plines[i].get_xdata(), setpoint))
                    plines[i].set_ydata(np.append(plines[i].get_ydata(), v))
                    paxs[i].relim()
                    paxs[i].autoscale_view()
                for i, (l, _, gain) in enumerate(self._sr830s):
                    _autorange_srs(l, 3)
                    x, y = l.snap('x', 'y')
                    x, y = x / gain, y / gain
                    data.extend([(l.X, x), (l.Y, y)])
                    llines[i * 2].set_xdata(
                        np.append(llines[i * 2].get_xdata(), setpoint))
                    llines[i * 2].set_ydata(
                        np.append(llines[i * 2].get_ydata(), x))
                    llines[i * 2 + 1].set_xdata(
                        np.append(llines[i * 2 + 1].get_xdata(), setpoint))
                    llines[i * 2 + 1].set_ydata(
                        np.append(llines[i * 2 + 1].get_ydata(),
                                  np.arctan2(y, x) * 180 / np.pi))
                    laxs[i * 2].relim()
                    laxs[i * 2].autoscale_view()
                    laxs[i * 2 + 1].relim()
                    laxs[i * 2 + 1].autoscale_view()

                datasaver.add_result(*data)

                fig.tight_layout()
                fig.canvas.draw()
                #plt.pause(0.001)

            d = time.monotonic() - t0
            h, m, s = int(d / 3600), int(d / 60) % 60, int(d) % 60
            print(f'Completed in: {h}h {m}m {s}s')

            b = io.BytesIO()
            fig.savefig(b, format='png')
            display.display(display.Image(data=b.getbuffer(), format='png'))
예제 #12
0
sys.path.append('/content/tpu/models/official/mnasnet')
sys.path.append('/content/tpu/models/common')

"""```
# This is formatted as code
```

# Inference with SavedModel
"""

from IPython import display
import pylab
import PIL
import numpy as np
filename = 'panda.jpg'
display.display(display.Image(filename))
img = np.array(PIL.Image.open(filename).resize((224, 224))).astype(np.float)

import os
import tensorflow as tf

checkpoint_name = 'mnasnet-a1'
export_dir = os.path.join(checkpoint_name, 'saved_model')
serv_sess = tf.Session(graph=tf.Graph())
meta_graph_def = tf.saved_model.loader.load(serv_sess, [tf.saved_model.tag_constants.SERVING], export_dir)

# Checks the saved model signatures.
signature = 'serving_default'
print('Serving Signature: ', signature)
print(meta_graph_def.signature_def[signature])
예제 #13
0
            images.append(img.copy())
            img.close()
    else:
        for image in glob.glob(
                '../plots/anim/epath_tmp/electron_path_displacements_0*.png'):
            img = PIL_Image.open(image)
            images.append(img.copy())
            img.close()

    if start_in_tracker:
        file_path_name = '/Users/brianpollack/Coding/Mu2E/plots/anim/electron_path_displacements_trk.gif'
    else:
        file_path_name = '/Users/brianpollack/Coding/Mu2E/plots/anim/electron_path_displacements.gif'
    print(file_path_name, images)
    writeGif(file_path_name, images, duration=0.5)
    IPdisplay.Image(url=file_path_name)

    plt.figure()
    plt.plot(
        zs_trk,
        disp_bm,
        'go-',
        label='meas syst',
        linewidth=2,
    )
    plt.plot(zs_trk, disp_br, 'bo-', label='rot syst', linewidth=2)
    plt.plot(zs_trk, disp_bp, 'ro-', label='pos syst', linewidth=2)
    plt.legend(fontsize=16, loc='best')
    plt.title('Distance from Ideal Electon at Tracker Stations', fontsize=20)
    plt.xlabel('Z (mm)', fontsize=18)
    plt.ylabel(r'$\Delta$D (mm)', fontsize=18)
예제 #14
0
def display_model(model):
    tf.keras.utils.plot_model(model, to_file='tmp.png', show_shapes=True)
    return ipythondisplay.Image('tmp.png')
예제 #15
0
X = net.GaussianFill([], ["X"], mean=0.0, std=1.0, shape=[2, 3], run_once=0)
print("New network proto:\n\n{}".format(net.Proto()))
print("Type of X is: {}".format(type(X)))
print("The blob name is: {}".format(str(X)))

W = net.GaussianFill([], ["W"], mean=0.0, std=1.0, shape=[5, 3], run_once=0)
b = net.ConstantFill([], ["b"], shape=[
    5,
], value=1.0, run_once=0)

Y = X.FC([W, b], ["Y"])

from caffe2.python import net_drawer
from IPython import display
graph = net_drawer.GetPydotGraph(net, rankdir="LR")
display.Image(graph.create_png(), width=800)

workspace.ResetWorkspace()
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
workspace.RunNetOnce(net)
print("Blobs in the workspace after execution: {}".format(workspace.Blobs()))
# Let's dump the contents of the blobs
for name in workspace.Blobs():
    print("{}:\n{}".format(name, workspace.FetchBlob(name)))

workspace.ResetWorkspace()
print("Current blobs in the workspace: {}".format(workspace.Blobs()))
workspace.CreateNet(net)
workspace.RunNet(net.Proto().name)
print("Blobs in the workspace after execution: {}".format(workspace.Blobs()))
for name in workspace.Blobs():
예제 #16
0
weightsPath = lpath + "yolov3.weights"
configPath = lpath + "yolov3.cfg"

# loading model
net = cv2.dnn.readNetFromDarknet(configPath, weightsPath)

# determine only the *output* layer names that we need from YOLO
ln = net.getLayerNames()
ln = [ln[i[0] - 1] for i in net.getUnconnectedOutLayers()]

# I/O
img_path = "/content/img/"
out_path = "/content/det_yolo/img_yolo/"

#####################################################################

# code RUN

name = 'yolo_'
df = give_frames(img_path, out_path, name)

import pandas as pd
df = pd.DataFrame(df, columns=['image', 'model_time', 'code_time', 'num_ppl'])
df.to_excel("/content/det_yolo/img_yolo/yolo_out.xls")

###########################################################################

# Displaying a single image with detctions in ROI
from IPython import display
display.display(display.Image(os.path.join(out_path, 'out_yolo_536.jpg')))
예제 #17
0
def plot_model(model,
               to_file='model.png',
               show_shapes=False,
               show_dtype=False,
               show_layer_names=True,
               rankdir='TB',
               expand_nested=False,
               dpi=96,
               layer_range=None,
               show_layer_activations=False):
    """Converts a Keras model to dot format and save to a file.

  Example:

  ```python
  input = tf.keras.Input(shape=(100,), dtype='int32', name='input')
  x = tf.keras.layers.Embedding(
      output_dim=512, input_dim=10000, input_length=100)(input)
  x = tf.keras.layers.LSTM(32)(x)
  x = tf.keras.layers.Dense(64, activation='relu')(x)
  x = tf.keras.layers.Dense(64, activation='relu')(x)
  x = tf.keras.layers.Dense(64, activation='relu')(x)
  output = tf.keras.layers.Dense(1, activation='sigmoid', name='output')(x)
  model = tf.keras.Model(inputs=[input], outputs=[output])
  dot_img_file = '/tmp/model_1.png'
  tf.keras.utils.plot_model(model, to_file=dot_img_file, show_shapes=True)
  ```

  Args:
    model: A Keras model instance
    to_file: File name of the plot image.
    show_shapes: whether to display shape information.
    show_dtype: whether to display layer dtypes.
    show_layer_names: whether to display layer names.
    rankdir: `rankdir` argument passed to PyDot,
        a string specifying the format of the plot: 'TB' creates a vertical
          plot; 'LR' creates a horizontal plot.
    expand_nested: Whether to expand nested models into clusters.
    dpi: Dots per inch.
    layer_range: input of `list` containing two `str` items, which is the
      starting layer name and ending layer name (both inclusive) indicating the
      range of layers for which the plot will be generated. It also accepts
      regex patterns instead of exact name. In such case, start predicate will
      be the first element it matches to `layer_range[0]` and the end predicate
      will be the last element it matches to `layer_range[1]`. By default `None`
      which considers all layers of model. Note that you must pass range such
      that the resultant subgraph must be complete.
    show_layer_activations: Display layer activations (only for layers that
      have an `activation` property).

  Raises:
    ValueError: if `plot_model` is called before the model is built.

  Returns:
    A Jupyter notebook Image object if Jupyter is installed.
    This enables in-line display of the model plots in notebooks.
  """

    if not model.built:
        raise ValueError(
            'This model has not yet been built. '
            'Build the model first by calling `build()` or by calling '
            'the model on a batch of data.')

    dot = model_to_dot(model,
                       show_shapes=show_shapes,
                       show_dtype=show_dtype,
                       show_layer_names=show_layer_names,
                       rankdir=rankdir,
                       expand_nested=expand_nested,
                       dpi=dpi,
                       layer_range=layer_range,
                       show_layer_activations=show_layer_activations)
    to_file = path_to_string(to_file)
    if dot is None:
        return
    _, extension = os.path.splitext(to_file)
    if not extension:
        extension = 'png'
    else:
        extension = extension[1:]
    # Save image to disk.
    dot.write(to_file, format=extension)
    # Return the image as a Jupyter Image object, to be displayed in-line.
    # Note that we cannot easily detect whether the code is running in a
    # notebook, and thus we always return the Image if Jupyter is available.
    if extension != 'pdf':
        try:
            from IPython import display
            return display.Image(filename=to_file)
        except ImportError:
            pass
예제 #18
0
def display_digraph(digraph, format):
    if format == 'svg':
        x = display.SVG(digraph.pipe(format=format))
    else:
        x = display.Image(digraph.pipe(format='png'))
    display.display(x)
예제 #19
0
파일: pandas.py 프로젝트: rieder/astropy
# -*- coding: utf-8 -*-

ascii_coded = (
    'Ò♙♙♙♙♙♙♙♙♌♐♐♌♙♙♙♙♙♙♌♌♙♙Ò♙♙♙♙♙♙♙♘♐♐♐♈♙♙♙♙♙♌♐♐♐♔Ò♙♙♌♈♙♙♌♐♈♈♙♙♙♙♙♙♙♙♈♐♐♙Ò♙♐♙♙♙♐♐♙♙♙'
    '♙♙♙♙♙♙♙♙♙♙♙♙Ò♐♔♙♙♘♐♐♙♙♌♐♐♔♙♙♌♌♌♙♙♙♌Ò♐♐♙♙♘♐♐♌♙♈♐♈♙♙♙♈♐♐♙♙♘♔Ò♐♐♌♙♘♐♐♐♌♌♙♙♌♌♌♙♈♈♙♌♐'
    '♐Ò♘♐♐♐♌♐♐♐♐♐♐♌♙♈♙♌♐♐♐♐♐♔Ò♘♐♐♐♐♐♐♐♐♐♐♐♐♈♈♐♐♐♐♐♐♙Ò♙♘♐♐♐♐♈♐♐♐♐♐♐♙♙♐♐♐♐♐♙♙Ò♙♙♙♈♈♈♙♙♐'
    '♐♐♐♐♔♙♐♐♐♐♈♙♙Ò♙♙♙♙♙♙♙♙♙♈♈♐♐♐♙♈♈♈♙♙♙♙Ò')
ascii_uncoded = ''.join([chr(ord(c) - 200) for c in ascii_coded])
url = 'https://media.giphy.com/media/e24Q8FKE2mxRS/giphy.gif'
message_coded = 'ĘĩĶĬĩĻ÷ĜĩĪĴĭèıĶļĭĺĩīļıķĶ'
message_uncoded = ''.join([chr(ord(c) - 200) for c in message_coded])

try:
    from IPython import display

    html = display.Image(url=url)._repr_html_()

    class HTMLWithBackup(display.HTML):
        def __init__(self, data, backup_text):
            super().__init__(data)
            self.backup_text = backup_text

        def __repr__(self):
            if self.backup_text is None:
                return super().__repr__()
            else:
                return self.backup_text

    dhtml = HTMLWithBackup(html, ascii_uncoded)
    display.display(dhtml)
except ImportError:
    https://colab.research.google.com/drive/1vcd0PcYBJ-RtS_iYr262c4yVfofzkuAY
"""

#Install imageio package
#!pip install imageio

# Commented out IPython magic to ensure Python compatibility.
# %matplotlib inline
import imageio
import requests
import matplotlib.pyplot as plt
import IPython.display as dp

#Display the image from the web using an URI.
img = "img.jpg"
dp.Image(requests.get(img).content)

#Read the image using imgaeio package's imread method and URI from web using
source_img = imageio.imread(img)

# Make the image Black and White using the formula Y= 0.299*R + 0.587*G + 0.114*B i.e. applying greyscale
import numpy as np


def grayscaleimg(rgb):
    return np.dot(rgb[..., :3], [0.299, 0.587, 0.114])


gryscl_img = grayscaleimg(source_img)

#Invert the image by subtracting it from 255
예제 #21
0
def plot_model(model,
               filename='model.png',
               show_shapes=True,
               rankdir='TB',
               dpi=96,
               **kwargs):
    """
    :param model:
    :param filename:
    :return:
    """
    """ Setup params """
    graph = pydot.Dot(graph_type='digraph', strict=True)
    graph.set('rankdir', rankdir)
    graph.set('concentrate', True)
    graph.set('dpi', dpi)
    graph.set_node_defaults(shape='record')
    """ First, let's initialize our graph """
    #graph = pydot.Dot(graph_type='digraph', strict = True)
    #graph.set_nodesep(2)
    #graph.set_fontpath(tp.__fonts_dir__)
    #graph.set_fontname(tp.__layers_css__['globals']['font_tag']['name'])
    """ Now let's define each node (layer) """
    nodes = {}
    inbounds = {}
    node_layers = {}
    in_shapes = {}
    for ly in model.layers:
        """ Get layer params """
        __name__ = ly.output.name
        """ Create node """
        nodes[__name__] = generate_node(ly, name=__name__)
        node_layers[__name__] = ly
        """ Get inbounds for this layer """
        if __name__ not in inbounds:
            inbounds[__name__] = []
        if __name__ not in in_shapes:
            in_shapes[__name__] = []

        _input_ = ly.input if isinstance(ly.input, list) else [ly.input]
        inbounds[__name__] += [
            ln.name for ln in _input_ if ln.name != __name__
        ]
        in_shapes[__name__] += [f'(?,{",".join([str(lln) for lln in ln.shape.as_list()[1:]])})' \
                                for ln in _input_ if ln.name != __name__]
        """ If inbound not in nodes, make it now (it means it's an input) """
        for iln, ln in enumerate(inbounds[__name__]):
            if ln not in nodes:
                """ create node """
                nodes[ln] = pydot.Node(ln)
                node_layers[ln] = ly.input[iln]

            if ln not in inbounds:
                inbounds[ln] = []
    """ Now we can easily identify the inputs/outputs of the model by looking at inbounds """
    input_nodes = [
        node_name for node_name in inbounds if len(inbounds[node_name]) == 0
    ]
    output_nodes = [
        node_layers[ib].output.name for ib in list(inbounds.keys())
        if ib not in np.hstack(list(inbounds.values()))
    ]
    print(in_shapes)
    """ Add nodes and edges to graph """
    for node_name in inbounds:
        """ If this is an input_node, recompile using right style """
        if node_name in input_nodes:
            style = copy.deepcopy(tp.__layers_css__['layers']['InputLayer'])
            style['tag'] = copy.deepcopy(
                eval(style['tag'])(node_layers[node_name]))
            graph.del_node(nodes[node_name])
            nodes[node_name] = generate_node(None, name=node_name, style=style)

        node = nodes[node_name]
        node.set_fontname(tp.__layers_css__['globals']['font_tag']['name'])
        graph.add_node(node)
        print(f'[INFO] - Adding layer {node_name} to graph.')

        for ib, ish in zip(inbounds[node_name], in_shapes[node_name]):
            kww = {'label': ish} if show_shapes else {}
            edge = pydot.Edge(nodes[ib], node, **kww)
            graph.add_edge(edge)
            edge.set_fontname(tp.__layers_css__['globals']['font_tag']['name'])
        """ If this is an output node, add extra block """
        if node_name in output_nodes:
            style = copy.deepcopy(tp.__layers_css__['layers']['OutputLayer'])
            style['tag'] = copy.deepcopy(
                eval(style['tag'])(node_layers[node_name]))
            graph.del_node('out_' + node_name)
            out_node = generate_node(None,
                                     name='out_' + node_name,
                                     style=style)
            graph.add_node(out_node)
            edge = pydot.Edge(node, out_node)
            graph.add_edge(edge)

    graph.write_png(filename)
    graph.write_svg(filename.replace('png', 'svg'))

    # Return the image as a Jupyter Image object, to be displayed in-line.
    # Note that we cannot easily detect whether the code is running in a
    # notebook, and thus we always return the Image if Jupyter is available.
    try:
        from IPython import display
        return display.Image(filename=filename)
    except ImportError:
        pass
예제 #22
0
        generate_and_save_images(model, epoch, random_vector_for_generation)


#使用epoch号显示图像
def display_image(epoch_no):
    return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))


plt.imshow(display_image(epochs))
plt.axis('off')

#生成所有保存图像的GIF。
with imageio.get_writer('cvae.gif', mode='I') as writer:
    filenames = glob.glob('image*.png')
    filenames = sorted(filenames)
    last = 1
    for i, filename in enumerate(filenames):
        frame = 2 * (i**0.5)
        if round(frame) > round(last):
            last = frame
        else:
            continue
        image = imageio.imread(filename)
        writer.append_data(image)
    image = imageio.imread(filename)
    writer.append_data(image)
    #这是一个在记事本中显示gif的技巧
    os.system('cp cvae.gif cvae.gif.png')

display.Image(filename='cvae.git.png')
예제 #23
0
    if finded_photo['label'] == "Square":
        print(finded_photo['source'])
        img = requests.get(finded_photo['source'])
        img_file = open(f'flickr_photo/{i}.jpg', 'wb')
        img_file.write(img.content)
        img_file.close()
print("ok")

for i in range(100):
    image = Image.open(f'flickr_photo/{i}.jpg')

    w, h = image.size
    rr, gg, bb = 0, 0, 0

    for x in range(w):
        for y in range(h):
            r, g, b = image.getpixel((x, y))
            rr += r
            gg += g
            bb += b

    cnt = w * h
    print(rr // cnt, gg // cnt, bb // cnt)
    awg_color = Image.new('RGB', (150, 75),
                          color=(rr // cnt, gg // cnt, bb // cnt))
    awg_color.paste(image, (0, 0, 75, 75))
    awg_color.save(f'flickr_photo/awg_{i}.jpg', "JPEG")
    image = display.Image(f'flickr_photo/{i}.jpg')
    awg_color = display.Image(f'flickr_photo/awg_{i}.jpg')
    display.display_jpeg(awg_color)
예제 #24
0
def create_animation(images):
    imageio.mimsave('./animation.gif', images)
    with open('./animation.gif', 'rb') as f:
        display.display(display.Image(data=f.read(), height=512))
예제 #25
0
def test_image_bad_filename_raises_proper_exception():
    display.Image("/this/file/does/not/exist/")._repr_png_()
예제 #26
0
def animate(images):
    images = np.array(images)
    converted_images = np.clip(images * 255, 0, 255).astype(np.uint8)
    imageio.mimsave("./animation.gif", converted_images)
    with open("./animation.gif", 'rb') as f:
        display.display(display.Image(data=f.read(), height=300))
First, let's download [this image](https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg) of a cat in the snow and [this photo](https://upload.wikimedia.org/wikipedia/commons/f/fe/New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg) of the Williamsburg Bridge, NYC under construction.

### Fetch the images
"""

cat_in_snow = tf.keras.utils.get_file(
    '320px-Felis_catus-cat_on_snow.jpg',
    'https://storage.googleapis.com/download.tensorflow.org/example_images/320px-Felis_catus-cat_on_snow.jpg'
)
williamsburg_bridge = tf.keras.utils.get_file(
    '194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg',
    'https://storage.googleapis.com/download.tensorflow.org/example_images/194px-New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg'
)

display.display(display.Image(filename=cat_in_snow))
display.display(
    display.HTML(
        'Image cc-by: <a "href=https://commons.wikimedia.org/wiki/File:Felis_catus-cat_on_snow.jpg">Von.grzanka</a>'
    ))

display.display(display.Image(filename=williamsburg_bridge))
display.display(
    display.HTML(
        '<a "href=https://commons.wikimedia.org/wiki/File:New_East_River_Bridge_from_Brooklyn_det.4a09796u.jpg">From Wikimedia</a>'
    ))
"""### Write the TFRecord file

As we did earlier, we can now encode the features as types compatible with `tf.Example`. In this case, we will not only store the raw image string as a feature, but we will store the height, width, depth, and an arbitrary `label` feature, which is used when we write the file to distinguish between the cat image and the bridge image. We will use `0` for the cat image, and `1` for the bridge image.
"""
예제 #28
0
def showarray(img_array):
    buf = BytesIO()
    Image.fromarray(np.uint8(img_array)).save(buf, 'png')
    display.display(display.Image(data=buf.getvalue()))
예제 #29
0
        generate_and_save_images(model, epoch, random_vector_for_generation)


def display_image(epoch_no):
    return PIL.Image.open('image_at_epoch_{:04d}.png'.format(epoch_no))


plt.imshow(display_image(epochs))
plt.axis('off')  # Display images

anim_file = 'cvae.gif'

with imageio.get_writer(anim_file, mode='I') as writer:
    filenames = glob.glob('image*.png')
    filenames = sorted(filenames)
    last = -1
    for i, filename in enumerate(filenames):
        frame = 2 * (i**0.5)
        if round(frame) > round(last):
            last = frame
        else:
            continue
        image = imageio.imread(filename)
        writer.append_data(image)
    image = imageio.imread(filename)
    writer.append_data(image)

import IPython
if IPython.version_info >= (6, 2, 0, ''):
    display.Image(filename=anim_file)
예제 #30
0
 def __screenshot(self, view):
     """Insert a screenshot of the given view into the notebook. """
     with tempdir.tempdir():
         screenshot.screenshot(view, 'screenshot.png')
         return display.Image('screenshot.png')