Пример #1
0
import urllib.request as req
import json
import shutil
import requests
import bs4

url = "https://building-management.publicwork.ntpc.gov.tw/bm_query.jsp?rt=3"

#%% 抓取驗證碼
img_url = "https://building-management.publicwork.ntpc.gov.tw/ImageServlet"
res = requests.get(img_url, stream=True, verify=False)
with open("img/check.png", "wb") as f:
    shutil.copyfileobj(res.raw, f)

from IPython.display import Image
Image("img/check.png")  # 無法正確顯示

#%% 獲取資料

url = "https://building-management.publicwork.ntpc.gov.tw/bm_list.jsp"  # 含表格資料的網址
headers = {
    "User-Agent":
    "Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
    "Content-Type": "text/html",
}
payloadData = {
    "rt": "BM",
    "PagePT": "0",
    "A2V": "%A8%CF%A5%CE%B0%F5%B7%D3",  # 職照類型:  使用執照
    "D1V": "%B7s%A5_%A5%AB%AAO%BE%F4%B0%CF",  # 建築地址-行政區
    "D3": "%A4T%A5%C1",  # 建築地址-路街段
# coding=utf-8
import sys
import IPython.display as display
import os
import imageio
# from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
from IPython.display import display,Image


# 图片读取和显示示例
fn = os.listdir("notMNIST_small/A/")
for file in fn[:20]:
    path = 'notMNIST_small/A/' + file
    print("path:",path)
    # img = plt.imread(path)
    # plt.imshow(img)
    # plt.show()
    # img = Image.open(path)
    img = Image(filename=path)
    display(img) #没有输出图片    pip install jupyter 是什么  jupyter notebook notebook.ipynb 启动


# performantly, and cost-effectively ingesting, processing, storing, and analyzing it.
# As a result, the business is not able to:
# * analyze un-structured sources like text, video, audio, and images
# * analyze granular (second-level) data across many years of history
# * easily ingest large volumes of data external to the organization
# * build accurate predictive models 
# There is also a requirement from the IT organization to secure this data using the
# latest authentication, encryption and access control frameworks. 

# ### The Solution
# More and more companies are looking to the latest big data technologies to solve these
# challenges. Hadoop-based solutions offer organizations the ability to store, process
# and analyze unlimited amounts of data using a scalable architecture, any kind of data 
# (structured or un-structured) using flexible storage formats, and perform large-scale
# visual and predictive analytics on that data using community-driven analytics frameworks.
Image(filename="img/cloudera.png")
# The Cloudera Enterprise Data Hub is a platform built on open-source technology that 
# provides all the benefits outlined above with the added capabilities of enterprise
# grade security, management, and data analytics tooling that organizations require. 
# The architecture below illustrates how easy a use case like predictive maintenance is
# using the Cloudera technology stack. 
Image(filename="img/architecture.png")

# ### The Frameworks
# #### Apache Spark
Image(filename="img/spark.png")
# Apache Spark is a fast and general engine for large-scale data processing that enables:
# * Fast Analytics - Spark runs programs up to 100x faster than Hadoop MapReduce in memory, or 10x faster on disk.
# * Easy Data Science - With APIs in Java, Scala, Python, and R, it's easy to build parallel apps.
# * General Processing - Spark's libraries enable SQL and DataFrames, machine learning, graph processing, and stream processing.
Пример #4
0
        os.path.join(root, d) for d in sorted(os.listdir(root))
        if os.path.isdir(os.path.join(root, d))
    ]
    if len(data_folders) != num_classes:
        raise Exception('Expected %d folders, one per class. \
        Found %d instead.' % (num_classes, len(data_folders)))
    print(data_folders)
    return data_folders


train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)
""" Problem 1
"""
n = randint(0, 9)
Image(train_folders[n] + '/' + os.listdir(train_folders[n])[0])

image_size = 28
pixel_depth = 255.0  # [0, 255] range


def load_letter(folder, min_num_images):
    """Load the data for a single letter label."""
    image_files = os.listdir(folder)
    dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
    print(folder)
    num_images = 0
    for image in image_files:
        image_file = os.path.join(folder, image)
        try:
Пример #5
0
# * Item 3

# Numbered List
# 1. Item 1
# 2. Item 2
# 3. Item 3

# ### Links

# Link to [Cloudera](http://www.cloudera.com)

# ### Images

# Display a stored image file:
from IPython.display import Image
Image("resources/spark.png")

# **Note:** The image path is relative to `/home/cdsw/` regardless of script
# location.

# ### Code blocks

# To print a block of code in the output without running it, use a comment line
# with three backticks to begin the block, then the block of code with each
# line preceded with the comment character, then a comment line with three
# backticks to close the block. Optionally include the language name after the
# opening backticks:

# ``` python
# print("Hello, World!")
# ```
#!/usr/bin/env python
# coding: utf-8

# In[26]:


import cv2
import numpy as np
from matplotlib import pyplot as plt
from IPython.display import display, Image

#creating facecascade
face_cascade = cv2.CascadeClassifier("D:\\Nishi\\Masters\\SEM III\\Project\\Cosmoshop\\client\\static\\resources\\haarcascade_frontalface_default.xml")
display(Image(filename='D:\\Nishi\Masters\\SEM III\\Project\\Cosmoshop\client\static\\images\\face4.jpg'))
#loading image to matrix
img = cv2.imread("D:\\Nishi\Masters\\SEM III\\Project\\Cosmoshop\client\static\\images\\face4.jpg")

#converting into grayscale image
gray_img = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray_img,scaleFactor = 1.05,minNeighbors=10)
for x,y,w,h in faces : 
    cropped_img = img[y:y+h,x:x+w]
    edges = cv2.Canny(cropped_img,130,1000)        
    number_of_edges = np.count_nonzero(edges)
if number_of_edges > 1000:
    print("Wrinkle Found ")
else:
    print("No Wrinkle Found ")


# In[30]:
Пример #7
0
from IPython.display import Image
from sklearn.externals.six import StringIO
from sklearn.tree import export_graphviz
import pydot

features = list(dataStore.columns[1:])
features

#full tree to complex for interpretation
dot_data = StringIO()
export_graphviz(duhTree,
                out_file=dot_data,
                feature_names=features,
                filled=True,
                rounded=True)

graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png())

#full tree to complex for interpretation
dot_data = StringIO()
export_graphviz(duhTree,
                out_file=dot_data,
                feature_names=features,
                filled=True,
                rounded=True,
                max_depth=4)

graph = pydot.graph_from_dot_data(dot_data.getvalue())
Image(graph[0].create_png())
Пример #8
0
 def png(self):
     return Image(self._repr_png_(), embed=True)
Пример #9
0
import numpy as np
matrix_arr = np.array([[3, 4, 5], [6, 7, 8], [9, 10, 11]])
print("The original matrix:\n{}".format(matrix_arr))
print("slices the first two rows:\n{}".format(matrix_arr[:2])
      )  # similar to list slicing. returns first two rows of the array
print("Slices the first two rows and two columns:\n{}".format(
    matrix_arr[:2, :2]))
print("returns 6 and 7: \n{}".format(matrix_arr[1, :2]))
print("Returns first column:\n {}".format(matrix_arr[:, :1])
      )  #Note that a colon by itself means to take the entire axis

from IPython.display import Image  # importing a image from my computer.
j = Image(filename='/Users/tungvm/Expre.png')
j  # diagrammatic explanation of matrix array slicing works.

personals = np.array(
    ['Manu', 'Jeevan', 'Prakash', 'Manu', 'Prakash', 'Jeevan', 'Prakash'])
print(personals == 'Manu')

from numpy import random
random_no = random.rand(7, 4)
print(random_no)
print("-----------------------------------")
print(random_no[personals == "Manu"])
print("-----------------------------------")
print(random_no[personals == "Manu", 2:])
print("-----------------------------------")
print(random_no[personals != "Manu"])
print("-----------------------------------")
new_var = (personals == "Manu") | (personals == "Jeevan")
print(new_var)
Пример #10
0
def draw_nx(G, legend_edges=None, label='', save_to='', options={}):
    """ Draws a NetworkX graph object. By default, assumes it is a DiGraph.
        
        - save_to: optional filepath where to save a .png image or .dot file        
        - to show clusters, set the cluster attribute in nodes
        - options: Dictionary of GraphViz options
    
        For required libraries, see 
        https://en.softpython.org/graph-formats/graph-formats-sol.html#Required-libraries
    
        legend_edges example:
        
            legend_edges = [
                {'label':'ciao',
                 'color':'red'},
                {'label':'mondo',
                 'color':'blue'}

            ]    
    """

    if G == None:
        raise ValueError('Provided Graph is None !')

    import networkx as nx

    # fix graphviz path for anaconda in windows ...
    try:
        import os
        if os.name == 'nt':
            from os.path import expanduser
            home = expanduser(
                "~"
            )  # because in windows actual path can differ from user login !!!
            graphviz_path = '"C:\\Users\\' + home + '\\Anaconda3\\Library\\bin\\graphviz"'
            if os.path.exists(graphviz_path) and "PATH" in os.environ and (
                    graphviz_path not in os.environ["PATH"]):
                os.environ["PATH"] += ';' + graphviz_path
    except Exception as e:
        print(e)

    if not 'node' in G.graph:
        G.graph['node'] = {}
    if not 'edge' in G.graph:
        G.graph['edge'] = {}
    if not 'graph' in G.graph:
        G.graph['graph'] = {}

    def merge(d2, d1):
        d2.update({k: v for k, v in d1.items() if not k in d2})

    # add graphviz layout options (see https://stackoverflow.com/a/39662097)
    if 'node' in options:
        merge(G.graph['node'], options['node'])
    if 'edge' in options:
        merge(G.graph['edge'], options['edge'])
    if 'graph' in options:
        merge(G.graph['graph'], options['graph'])

    merge(G.graph['node'], {'color': 'blue', 'fontcolor': 'blue'})
    merge(G.graph['edge'], {
        'arrowsize': '0.6',
        'splines': 'curved',
        'fontcolor': 'brown'
    })

    merge(G.graph['graph'], {
        'scale': '3',
        'style': 'dotted, rounded',
    })

    # adding attributes to edges in multigraphs is more complicated but see
    # https://stackoverflow.com/a/26694158
    #G[0][0]['color']='red'

    pdot = nx.drawing.nx_pydot.to_pydot(G)

    if G.name:
        pdot.set_label(G.name)
        pdot.set_labelloc('t')
        pdot.set_labeljust('l')

    def make_legend():

        if legend_edges:

            pydot_mod = get_pydot_mod(pdot)

            glegend = pydot_mod.Cluster(graph_name='Legend',
                                        label='Legend',
                                        labeljust='c')

            i = 0
            for line in legend_edges:

                n1 = pydot_mod.Node(name='legend%s' % i,
                                    label=line['label'],
                                    shape='none',
                                    fontcolor=line['color'])
                n2 = pydot_mod.Node(name='legend%s' % (i + len(legend_edges)),
                                    label='',
                                    shape='none')
                glegend.add_node(n1)
                glegend.add_node(n2)
                glegend.add_edge(
                    pydot_mod.Edge(n1, n2, color=line['color'], penwidth=3))

                i += 1

            pdot.add_subgraph(glegend)

    def make_clusters():

        allowed_types = (int, float, str, tuple)

        clus = {}
        nodes = G.nodes(data=True)
        if len(nodes) > 0:
            for node_id, data in nodes:
                if 'cluster' in data:
                    c = data['cluster']

                    if not type(c) in allowed_types:
                        raise ValueError('Cluster type must be one of %s, found insted: %s' \
                                          % (type(c),allowed_types))
                    if c in clus:
                        clus[c].append(node_id)
                    else:
                        clus[c] = [node_id]

            for c in clus:
                if len(clus[c]) > 0:
                    pydot_mod = get_pydot_mod(pdot)
                    pydot_nodes = []
                    pydot_cluster = pydot_mod.Cluster(graph_name=str(c))
                    for node_id in clus[c]:
                        pydot_node = pydot_mod.Node(name=node_id)
                        pydot_cluster.add_node(pydot_node)
                    pdot.add_subgraph(pydot_cluster)

    make_clusters()
    make_legend()
    fix_save_to = save_to.strip().lower()

    if fix_save_to:
        try:
            # note for saving dot we don't require graph_viz installed
            if fix_save_to.endswith('.dot'):
                pdot.write_raw(save_to.strip())
                print("Dot saved to file: ", save_to)
            else:
                if not fix_save_to.endswith('.png'):
                    raise ValueError(
                        "Provided filename should end with .png  found instead save_to=%s"
                        % save_to)
                pdot.write_png(save_to)
                print("Image saved to file: ", save_to)

        except Exception as e:
            print("ERROR: Could not save file to ", save_to)
            print(e)

    # if we are in jupyter ...
    if not fix_save_to.endswith('.dot'):
        # if we save the dot file it's probably because
        # we don't have graphviz on our system
        import importlib
        ipython_spec = importlib.util.find_spec("IPython")
        if ipython_spec:
            import matplotlib
            import matplotlib.pyplot as plt
            import matplotlib.image as mpimg
            from IPython.display import Image, display
            plt = Image(pdot.create_png())
            display(plt)
                         'pereou05','pereou06', 'intent02', 'intent03', 'intent01']]

#Create Regression Tree using intent01 as the target variable, min_samples_split=20, min_samples_leaf=20
col_names = list(reduc_data1.iloc[:,0:14].columns.values)
col_names

tre = tree.DecisionTreeRegressor(min_samples_split=20,min_samples_leaf=20).fit(reduc_data1.iloc[:,0:14],reduc_data1.intent01)

dot_data = StringIO()
tree.export_graphviz(tre, out_file=dot_data,
                     feature_names=col_names,
                     filled=True,
                     rounded=True,
                     special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
display(Image(graph.create_png()))

#Create plot for your tree (run all together)
plt.scatter(reduc_data1.intent03, reduc_data1.intent01, color='DarkBlue')
plt.plot([4.5,4.5], [0,7], color='Green')
plt.plot([6.5,6.5], [0,7], color='Green')
plt.plot([0,4.5],[2.818,2.818], color='Red')
plt.plot([4.5, 6.5],[6.252, 6.252], color='Red')
plt.xlabel('intent03')
plt.ylabel('intent01')

#2. Classification trees
# Read in data
titanic_data = pd.read_csv('titanic_data.txt', sep='\t')
titanic_data.columns
titanic_data.dtypes
Пример #12
0
# each function.
get_ipython().run_line_magic('prun', 'random_walker_max_distance(400, 10000)')


# ### Jupyter: External image rendering

# In[31]:


from IPython.display import display, Image, HTML, Math


# In[32]:


Image(url='http://python.org/images/python-logo.gif')


# ### Jupyter: HTML rendering

# In[33]:


import scipy, numpy, matplotlib
modules = [numpy, matplotlib, scipy]

row = "<tr><td>%s</td><td>%s</td></tr>"
rows = "\n".join(
    [row % 
     (module.__name__, module.__version__) 
     for module in modules])
# 
# In this tutorial we will implement a simple Convolutional Neural Network in TensorFlow which has a classification accuracy of about 99%, or more if you make some of the suggested exercises.
# 
# Convolutional Networks work by moving small filters across the input image. This means the filters are re-used for recognizing patterns throughout the entire input image. This makes the Convolutional Networks much more powerful than Fully-Connected networks with the same number of variables. This in turn makes the Convolutional Networks faster to train.
# 
# You should be familiar with basic linear algebra, Python and the Jupyter Notebook editor. Beginners to TensorFlow may also want to study the first tutorial before proceeding to this one.

# ## Flowchart

# The following chart shows roughly how the data flows in the Convolutional Neural Network that is implemented below.

# In[1]:


from IPython.display import Image
Image('images/02_network_flowchart.png')


# The input image is processed in the first convolutional layer using the filter-weights. This results in 16 new images, one for each filter in the convolutional layer. The images are also down-sampled so the image resolution is decreased from 28x28 to 14x14.
# 
# These 16 smaller images are then processed in the second convolutional layer. We need filter-weights for each of these 16 channels, and we need filter-weights for each output channel of this layer. There are 36 output channels so there are a total of 16 x 36 = 576 filters in the second convolutional layer. The resulting images are down-sampled again to 7x7 pixels.
# 
# The output of the second convolutional layer is 36 images of 7x7 pixels each. These are then flattened to a single vector of length 7 x 7 x 36 = 1764, which is used as the input to a fully-connected layer with 128 neurons (or elements). This feeds into another fully-connected layer with 10 neurons, one for each of the classes, which is used to determine the class of the image, that is, which number is depicted in the image.
# 
# The convolutional filters are initially chosen at random, so the classification is done randomly. The error between the predicted and true class of the input image is measured as the so-called cross-entropy. The optimizer then automatically propagates this error back through the Convolutional Network using the chain-rule of differentiation and updates the filter-weights so as to improve the classification error. This is done iteratively thousands of times until the classification error is sufficiently low.
# 
# These particular filter-weights and intermediate images are the results of one optimization run and may look different if you re-run this Notebook.
# 
# Note that the computation in TensorFlow is actually done on a batch of images instead of a single image, which makes the computation more efficient. This means the flowchart actually has one more data-dimension when implemented in TensorFlow.

# ## Convolutional Layer
Пример #14
0
if(opts.modelname == None):
    model = Model(inputs=[input_tensor, labels, input_length, label_length], outputs=[loss_out]) 
    model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='adadelta')
    #model.compile(loss={'ctc': lambda y_true, y_pred: y_pred}, optimizer='sgd')
else:
    model = load_model(opts.modelname ,custom_objects = {'<lambda>': lambda y_true, y_pred: y_pred})
    base_model = load_model("base_" +opts.modelname)

'''
print structure of model
'''

if (opts.printmodel):
    plot_model(model, to_file="model.png", show_shapes=True)
    Image('model.png')

if (opts.testing == False):
    model.fit_generator(gen(opts.batch_size), steps_per_epoch=opts.steps, epochs=opts.epochs,
            callbacks=[EarlyStopping(patience=10), evaluator],
            validation_data=gen(), validation_steps=1280)
else:
    #start = time.time()
    print("testing......")
    print
    characters2 = characters + ' '
    [X_test, y_test, _, _], _  = next(gen(1))
    #cv2.imwrite("./save_image/test.jpg" , X_test)
    #print("shape of image:"),
    #print(X_test.shape)
    #X_test[0] = cv2.imread('ScreenShot.png').transpose(1, 0, 2)
Пример #15
0
NN.add(
    Conv2D(filters=64,
           kernel_size=(3, 3),
           activation='relu',
           input_shape=(28, 28, 1)))
#--------------Then adding a pooling layer------------------------------------#
NN.add(MaxPooling2D(pool_size=(2, 2)))
#--------------Then adding flattening the results-----------------------------#
NN.add(Flatten())
#-------Then adding a dense layer to reduce the number of features-------#
NN.add(Dense(units=128, activation='relu'))
#-------Then adding a dense layer to produce the final output-----------#
NN.add(Dense(units=10, activation='softmax'))
#-------------------Visualizing a Model's Structure---------------------------#
plot_model(NN, to_file='convnet.png', show_shapes=True, show_layer_names=True)
Image(
    filename='convnet.png')  # picture name is convnet.png also bydefault bulit
#output: Online serach and then show model structure of pattern given
#--------------------------------------------------------------------------------------------------#

#--------------------------------------------------------------------------------------------------#
# Creating the Neural Networks and adding Layers to the Networks.
# Configuring Keras to Write the TensorBoard Log Files
from tensorflow.keras.datasets import mnist
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Dense, Flatten, MaxPooling2D
from tensorflow.keras.utils import plot_model
from IPython.display import Image
from tensorflow.keras.callbacks import TensorBoard
import time
NN = Sequential()
#--------------Then adding a convolution layer--------------------------------#
Пример #16
0
#   figure:
#     caption: |
#       Hey everyone its **party** time!
#     name: fun-fish
# ---
# from IPython.display import Image
# Image("../images/fun-fish.png")
# ```
# ````
#
# produces the following code cell and figure:

# In[2]:

from IPython.display import Image
Image("../images/fun-fish.png")

# Now we can link to the image from anywhere in our documentation: [swim to the fish](fun-fish)
#
# :::{seealso}
# [](jupyter-cell-tags)
# :::
#
# (content:code-outputs:markdown)=
# ## Markdown
#
# Markdown output is parsed by MyST-Parser, currently with the parsing set to strictly [CommonMark](https://commonmark.org/).
#
# The parsed Markdown is then integrated into the wider context of the document. This means it is possible, for example, to include internal references:

# In[3]:
Пример #17
0
def show_image(image_path):
    display(Image(image_path))

    image_rel = image_path.replace(root, '')
    caption = "Image "
    display(HTML("<div>%s</div>" % caption))
Пример #18
0
x = BatchNormalization(axis=3)(x)
x = Conv2D(64, kernel_size=(1,1), strides=(1,1), padding='valid', activation='relu')(x)
x = Conv2D(192, kernel_size=(3,3), strides=(1,1), padding='same', activation='relu')(x)

x = Inception(x, filters=[64, 96, 128, 16, 32, 32])
x = Inception(x, filters=[128, 128, 192, 32, 96, 64]) 
x = MaxPool2D(pool_size=(3,3), strides=(2,2))(x)
x = Inception(x, filters=[192, 96, 208, 16, 48, 64])
x = Inception(x, filters=[160, 112, 224, 24, 64, 64])
output1 = Pridect(x)

x = Inception(x, filters=[128,128,256,24,64,64])
x = Inception(x, filters=[112,144,288,32,64,64])
output2 = Pridect(x)

x = MaxPool2D(pool_size=(3,3), strides=(2,2))(x)
x = Inception(x, filters=[256,160,320,32,128,128])
x = Inception(x, filters=[384,192,384,48,128,128])
x = AvgPool2D(pool_size=(7,7), strides=(1,1))(x)

x = Flatten()(x)
x = Dropout(0.5)(x)
output3 = Dense(classes, activation='softmax',name='pridects')(x)

model = Model(inputs=input, outputs=[output1, output2, output3])
model.summary()

#draw the network's structure diagram
plot_model(model=model, to_file='GoogleNet.png')
Image('GoogleNet.png')
Пример #19
0
modi_arr = new_arr[4:9] 
modi_arr
modi_arr[1] = 123456
print (new_arr)                  # you can see the changes are refelected in main array. 
modi_arr[:]                  # the sliced variable
            

# arrays can be treated like matrices
matrix_arr =np.array([[3,4,5],[6,7,8],[9,5,1]])
print (matrix_arr)
print (matrix_arr[:][2])
print (matrix_arr[1][2]) #first row and third column
print (matrix_arr[0,2]) # This is same as the above operation

from IPython.display import Image  # importing a image from my computer.
i = Image(filename='download.png')
i # Blue print of a matrix 

if(5>10):
    print("false")
else:
    print("hahahahaaaa")
    
    

import numpy as np
numpy._version_
np.__all__

result = 0
for i in range(100):
Пример #20
0
from keras.callbacks import ReduceLROnPlateau
from sklearn.model_selection import train_test_split
import pandas as pd

# # 1. Prepare data
# MNIST dataset has a good collection of handwritten digits. Train set has 60,000 rows of data and test set has 10,000 rows of data. The binary data is available at http://yann.lecun.com/exdb/mnist/. <br><br>
# However, to demostrate the real-life situation where the input is a real image, we have downloaded the png images from this link https://github.com/myleott/mnist_png/blob/master/mnist_png.tar.gz. It extracts the data from binary file and convert into 60,000 train images and 10,000 test images. The images are categorized according to their label

# Now we are going to use imageio and read on the image. The sample_image is a 28*28 numpy array. To show the image is labelled correctly, we will use matplotlib to visualize the array

# In[12]:

sample_image = imageio.imread('images/train/0/1.png')
print("The shape of the sample image: ", sample_image.shape)
g = plot.imshow(sample_image)
Image("images/train/0/1.png")

# The real image and graph show that this digit is a 0, matched with its label. Now we need to create a method to read all images and its label into numpy. <br> We normalize the data from [0..255] to [0..1]. It will also help CNN to coverg faster.

# In[3]:

train_x = np.empty([60000, 28, 28])
train_y = np.empty([60000], dtype="int32")
test_x = np.empty([10000, 28, 28])
test_y = np.empty([10000], dtype="int32")


def store_image_to_train_test(index, is_train, image_array, label):
    if is_train:
        train_x[index] = image_array / 255
        train_y[index] = int(label)
Пример #21
0
def show_image(image_path):
    display(Image(image_path))

    image_rel = image_path.replace(root, '')
    caption = "Image " + ' - '.join(attributions[image_rel].split(' - ')[:-1])
    display(HTML("<div>%s</div>" % caption))
predictionsDT = dtree.predict(X_test)
print("Accuracy of DT (trained and tested on original data): %s%%\n" % (100*accuracy_score(Y_test, predictionsDT)))
results.write("Accuracy of DT (trained and tested on original data): %s%%\n" % (100*accuracy_score(Y_test, predictionsDT)))

print("Classification similarity of NN and DT trained on target model dataset: %s%%\n" % (np.sum(predictionsDT==predictionsNN)*1.0/len(predictionsDT)*100))
results.write("Classification similarity of NN and DT trained on target model dataset: %s%%\n" % (np.sum(predictionsDT==predictionsNN)*1.0/len(predictionsDT)*100))


#Visualisation
dot_data = StringIO()
export_graphviz(dtree, out_file=dot_data, 
                feature_names=list(dataset.columns),
                filled=True, rounded=True,
                special_characters=True)
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())  
Image(graph.write_png("visualisations/income/income_dtree_trained_original.png"))

###########################
##### SYNTHESIZE DATA #####
###########################

print "Intializing Training Data Synthesis\n"

def incomeRandomizeFunction(k, x, c):
    x_age = random.randint(0, 100)
    x_fnlwgt = random.randint(12285, 1484705)
    x_capital_gain = random.randint(0, 99999)
    x_capital_loss = random.randint(0, 4356)
    x_hours_per_week = random.randint(0, 99)
    x_one_hot = [random.randint(0, 1) for i in range(102)]
    x_temp = [x_age, x_fnlwgt, x_capital_gain, x_capital_loss, x_hours_per_week] + x_one_hot
Пример #23
0
 def make_gif(self, filename="render"):
     imageio.mimsave(
         filename + '.gif',
         [np.array(img) for i, img in enumerate(self.images) if i % 2 == 0],
         fps=29)
     return Image(open(filename + '.gif', 'rb').read())
Пример #24
0
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from IPython.display import Image
from IPython.core.display import HTML 

html = urlopen('https://en.wikipedia.org/wiki/2019_Rugby_World_Cup')
bs = BeautifulSoup(html, 'html.parser')
tables = bs.find_all('a')
for tag in bs.find_all('img'):
  # Print Image 
  print(tag.attrs['src'])

for i in range(len(bs.find_all('img'))):
  print('Image {}:\n'.format(i))
  Image(url = bs.find_all('img')[i].attrs['src'])

# //upload.wikimedia.org/wikipedia/en/thumb/b/be/Flag_of_England.svg/23px-Flag_of_England.svg.png

import os
import csv
import re
from urllib.request import urlopen
from bs4 import BeautifulSoup
from IPython.display import display, Image
from IPython.core.display import HTML
import requests


session = requests.Session()
headers = {'User-Agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/78.0.3904.97 Safari/537.36'}
Пример #25
0
!python /content/models/research/object_detection/model_main.py \
    --pipeline_config_path={pipeline_fname} \
    --model_dir='training' \
    --alsologtostderr \
    --num_train_steps=8000 \
    --num_eval_steps=50

#web camera capture
%cd /content/edge/test
from IPython.display import Image
try:
  filename = take_photo()
  print('Saved to {}'.format(filename))
  
  # Show the image which was just taken.
  display(Image(filename))
except Exception as err:
  # Errors will be thrown if the user does not have a webcam or if they do not
  # grant the page permission to access it.
  print(str(err))

#object detection
%cd /content/models/research/object_detection

import numpy as np
import os
import six.moves.urllib as urllib
import sys
import tarfile
import tensorflow as tf
import zipfile
Пример #26
0
def display_images(filenames):
    for file in filenames:
        display(Image(file))
Пример #27
0
# 이미지 추가하기
from IPython.display import Image
Image(filename='data10/7_group_by/group_by_cities.png')  # 해당이미지가 있는 주소를 넣기

# 동영상 추가하기-유튜브와 연동
from IPython.display import YouTubeVideo
YouTubeVideo("KKGfjhs_26M")  #유튜브의 주소 맨뒤 명만 넣기
Пример #28
0
def graph_tcp(latency):
    set_latency(latency)

    tcp_state_change_script = """
 /*
   fbt::tcp_do_segment:entry {
        trace((unsigned int)args[1]->th_seq);
        trace((unsigned int)args[1]->th_ack);
        trace(tcp_state_string[args[3]->t_state]);
    }
    */
    fbt::tcp_state_change:entry {
        printf("{\\"timestamp\\": %u, \\"local_port\\": %u, 
        \\"foreign_port\\": %u, \\"previous_tcp_state\\": \\"%s\\", 
        \\"tcp_state\\": \\"%s\\"}", 
        walltimestamp,
        ntohs(args[0]->t_inpcb->inp_inc.inc_ie.ie_lport),
        ntohs(args[0]->t_inpcb->inp_inc.inc_ie.ie_fport),
        tcp_state_string[args[0]->t_state],
        tcp_state_string[args[1]]);

        stack();
    }
    """

    # Callback invoked to process the aggregation
    values = []

    def simple_out(raw_value):
        values.append(raw_value)

    # Create a seperate thread to run the DTrace instrumentation
    dtrace_thread = DTraceConsumerThread(tcp_state_change_script,
                                         out_func=simple_out,
                                         chew_func=lambda v: None,
                                         chewrec_func=lambda v: None,
                                         walk_func=None,
                                         sleep=1)
    cmd("sysctl net.inet.tcp.hostcache.purgenow=1")
    # Start the DTrace instrumentation
    dtrace_thread.start()

    # Display header to indicate that the benchmarking has started
    print("Running ipc benchmark")

    # Run the ipc-static benchmark
    benchmark_output = cmd("ipc/ipc-static -v -i tcp 2thread")

    cmd("sleep 1")
    # The benchmark has completed - stop the DTrace instrumentation
    dtrace_thread.stop()
    dtrace_thread.join()
    dtrace_thread.consumer.__del__()

    label = "TCP state machine - {} ms latency".format(latency)
    output_file = "TCP_state_machine_{}_ms.png".format(latency)
    tcp_state_machine = pgv.AGraph(
            label=label, strict=False, directed=True)
    for raw_value in values:
        try:
            value = json.loads(raw_value)
            # print(value)
            # JSON formatted string
            if 'previous_tcp_state' in value and 'tcp_state' in value:
                from_state = value['previous_tcp_state'][6:]
                to_state = value['tcp_state'][6:]
                label = "server" if value[
                                        "local_port"] == TARGET_PORT else \
                    "client"

                # print "State transition {} -> {}".format(
                #    value['previous_tcp_state'], value['tcp_state'])
            else:
                print "String malformatted missing previous_tcp_state of " \
                      "tcp_state fields"
        except ValueError as e:  # stack trace
            prec_f = "\n".join([i.replace('`', '+').split("+")[1] for i in
                                raw_value.split('\n')[1:2]][::-1])
            tcp_state_machine.add_edge(from_state, to_state,
                                       label=label + "\n({})".format(prec_f),
                                       color='green')

            # Raw string - manually post-process
            # print "Preceeding stack frame {}".format(raw_value.split('\n')[1])

    print("Completed")
    tcp_state_machine.draw(path=output_file, format='png', prog='dot')
    return Image(output_file)
Пример #29
0
                                    cv=10,
                                    scoring='recall_micro')
cross_fold.Accuracy = cross_val_score(rforest1,
                                      tree_train,
                                      predict_train,
                                      cv=10)
cross_fold
type(predict_train)

import pickle
filename = 'finalized_model.sav'
pickle.dump(rforest1, open(filename, 'wb'))

from sklearn.externals.six import StringIO
from IPython.display import Image
from sklearn.tree import export_graphviz
import pydotplus

dot_data = StringIO()
export_graphviz(clf,
                out_file=dot_data,
                filled=True,
                rounded=True,
                special_characters=True,
                feature_names=tree_train.columns,
                class_names=["1", "2", "3", "4", "5"])
graph = pydotplus.graph_from_dot_data(dot_data.getvalue())
Image(graph.create_png())

# os.environ["PATH"] += os.pathsep + 'C:/Program Files (x86)/Graphviz2.38/bin/'
Пример #30
0
def render_povstring(string, outfile=None, height=None, width=None,
                     quality=None, antialiasing=None, remove_temp=True,
                     show_window=False, tempfile=None, includedirs=None,
                     output_alpha=False,
                     pov_binary=POVRAY_BINARY):

    """ Renders the provided scene description with POV-Ray.

    Parameters
    ------------

    string
      A string representing valid POVRay code. Typically, it will be the result
      of scene(*objects)

    outfile
      Name of the PNG file for the output.
      If outfile is None, a numpy array is returned (if numpy is installed).
      If outfile is 'ipython' and this function is called last in an IPython
      notebook cell, this will print the result in the notebook.

    height
      height in pixels

    width
      width in pixels

    output_alpha
      If true, the background will be transparent,
    rather than the default black background.  Note
    that this option is ignored if rendering to a
    numpy array, due to limitations of the intermediate
    ppm format.

    """

    print(pov_binary)
    
    pov_file = tempfile or '__temp__.pov'
    with open(pov_file, 'w+') as f:
        f.write(string)

    return_np_array = (outfile is None)
    display_in_ipython = (outfile=='ipython')

    format_type = "P" if return_np_array else "N"

    if return_np_array:
        outfile='-'

    if display_in_ipython:
        outfile = '__temp_ipython__.png'

    cmd = [pov_binary, pov_file]
    if height is not None: cmd.append('+H%d'%height)
    if width is not None: cmd.append('+W%d'%width)
    if quality is not None: cmd.append('+Q%d'%quality)
    if antialiasing is not None: cmd.append('+A%f'%antialiasing)
    if output_alpha: cmd.append('Output_Alpha=on')
    if not show_window:
        cmd.append('-D')
    else:
        cmd.append('+D')
    if includedirs is not None:
        for dir in includedirs:
            cmd.append('+L%s'%dir)
    cmd.append("Output_File_Type=%s"%format_type)
    cmd.append("+O%s"%outfile)
    process = subprocess.Popen(cmd, stderr=subprocess.PIPE,
                                    stdin=subprocess.PIPE,
                                    stdout=subprocess.PIPE)

    out, err = process.communicate(string.encode('ascii'))

    if remove_temp:
        os.remove(pov_file)

    if process.returncode:
        print(type(err), err)
        raise IOError("POVRay rendering failed with the following error: "+err.decode('ascii'))

    if return_np_array:
        return ppm_to_numpy(buffer=out)

    if display_in_ipython:
        if not ipython_found:
            raise("The 'ipython' option only works in the IPython Notebook.")
        return Image(outfile)