def rotate_img(image, angle, color, filter = Image.NEAREST):

    if image.mode == "P" or filter == Image.NEAREST:
        matte = Image.new("1", image.size, 1) # mask
    else:
        matte = Image.new("L", image.size, 255) # true matte
    bg = Image.new(image.mode, image.size, color)
    bg.paste(
        image.rotate(angle, filter),
        matte.rotate(angle, filter)
    )
    return bg
def process_images(folder):

    classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]  # get list of all sub-folders in folder
    img_cnt = 0

    for class_x in classes:

        if os.path.isdir(class_x):

            # get paths to all the images in this folder
            images = [os.path.join(class_x, i) for i in sorted(os.listdir(class_x)) if i != '.DS_Store']

            for image in images:

                img_cnt = img_cnt + 1

                if(img_cnt % 1000 == 0):                # show progress
                    print("Processed %s images" % str(img_cnt))

                im = Image.open(image)
                im = im.resize(dimensions)   # resize image according to dimensions set

                im = make_greyscale_white_bg(im, 127, 127, 127) # turn grey background (if any) to white, and
                                                                  # convert into greyscale image with 1 channel

                im = invert_colors(im)
                im.save(image)   # overwrite previous image file with new image

    print("Finished processing images, images found = ")
    print(img_cnt)
Example #3
0
 def to_image(self,data,index_axial=0,index_azimuthal=0,scale=None,absolute_scale=False): 
     from PIL import Image
     a = float32(data[index_axial,index_azimuthal,:,:].reshape((data.shape[2],data.shape[3])))   
     if scale is None:
         a = 255.0*(a)/(a.max()+1e-12)
     else: 
         if absolute_scale: 
             a = scale*(a) 
         else: 
             a = scale*255.0*(a)/(a.max()+1e-12)
     im = Image.fromarray(a).convert("RGB")
     return im.rotate(90) 
def make_greyscale_white_bg(im, r, b, g):

    im = im.convert('RGBA')   # Convert to RGBA


    data = np.array(im)   # "data" is a height x width x 4 numpy array
    red, green, blue, alpha = data.T # Temporarily unpack the bands for readability

    # Replace grey with white... (leaves alpha values alone...)
    grey_areas = (red == r) & (blue == b) & (green == g)
    data[..., :-1][grey_areas.T] = (255, 255, 255) # Transpose back needed

    im2 = Image.fromarray(data)
    im2 = im2.convert('L')   # convert to greyscale image


    return im2
def test_rotations():

    img = Image.open("Train/172/bcc000002.bmp")

    #img = img.rotate(30)

    img = img.resize(dimensions)



    rot = make_greyscale_white_bg(img, 127, 127, 127)

    rot = invert_colors(rot)
    c_color = rot.getpixel((0, 0))
    rot = rotate_img(rot, 10, c_color)

    w, h = rot.size
    rot.show()
def augment_by_rotations(folder, prev_cnt):

    classes = [os.path.join(folder, d) for d in sorted(os.listdir(folder))]  # get list of all sub-folders in folder

    for path_to_folder in classes:

        if os.path.isdir(path_to_folder):
            images = [os.path.join(path_to_folder, i) for i in sorted(os.listdir(path_to_folder)) if i != '.DS_Store']
            filename = prev_cnt
            for image in images:

                im = Image.open(image)

                # make 4 copies of each image, with random rotations added in
                random_rotate(im, 4, filename, path_to_folder)
                filename = filename + 4

            print("Finished augmenting " + path_to_folder)
def invert_colors(im):

    im = im.convert('RGBA')   # Convert to RGBA
    data = np.array(im)   # "data" is a height x width x 4 numpy array
    red, green, blue, alpha = data.T # Temporarily unpack the bands for readability


    # Replace black with red temporarily... (leaves alpha values alone...)
    black_areas = (red == 0) & (blue == 0) & (green == 0)
    data[..., :-1][black_areas.T] = (255, 0, 0) # Transpose back needed

    # Replace white areas with black
    white_areas = (red == 255) & (blue == 255) & (green == 255)
    data[..., :-1][white_areas.T] = (0, 0, 0) # Transpose back needed

    # Replace red areas (originally white) with black
    red_areas = (red == 255) & (blue == 0) & (green == 0)
    data[..., :-1][red_areas.T] = (255, 255, 255) # Transpose back needed

    im2 = Image.fromarray(data)
    im2 = im2.convert('L')   # convert to greyscale image


    return im2
print(metrics.classification_report(y_test, prediction))

# In[ ]:

from sklearn import tree, metrics, model_selection, preprocessing
from IPython.display import Image, display
import graphviz

dot_data = tree.export_graphviz(
    model,
    out_file=None,
    filled=True,
    rounded=True,
)
graph = pydotplus.graph_from_dot_data(dot_data)
display(Image(graph.create_png()))
graph.write_png('decTreeOutput.png')

# # CONCLUSION
# Accuracy K_Nearest Neighbor is  84 % for K-Value: 25 While Decision Tree has an Acuracy of 77%.
#

#
# Since KNN Achievd the greatest accuracy, It is the most suitable Algorith to Predict the  HAR data

# The results could be improved by using more creativity in the feature

# In[ ]:

# In[ ]:
Example #9
0
'''
Un robottino deve muoversi su di una scacchiera di 15 x 15 celle  con celle bianche e nere  ciascuna di lato 40. 
Per rendere il percorso accidentato alcune delle celle della scacchiera contengono ostacoli (queste celle sono  colorate di rosso).

Un  esempio di scacchiera con ostacoli e' dato  dall'immagine 'I1.png'

Per caricare e salvare immagini PNG usate le funzioni load e save che abbiamo preparato nel modulo immagini.py .

Al'inizio il robottino e' posizionato sulla prima cella in altro a sinistra della scacchiera ed e' rivolto verso destra (x crescente). 
Ad ogni step tenta di ragiungere una delle celle adiacenti in orizzontale o verticale. 
Le regole di movimento del robottino sono le seguenti: 
- al generico step, si sposta sulla cella che ha di fronte se questa e' libera da ostacoli e non ci e' gia transitato in passato. 
- se invece la cella risulta occupata o e' una cella su cui ha gia transitato, ruota di 90 gradi in senso orario ed aspetta lo step successivo. 
- dopo aver ruotato  di 360 gradi senza essere riuscito a spostarsi si ferma. 

Progettare la funzione  percorso(fname, fname1) che presi in input:
- il percorso di un file (fname) contenente l'immagine in formato .png di una scacchiera con ostacoli
- il percorso di un file di tipo .png (fname1) da creare
legge l'immagine della scacchiera in fname, colora di verde le celle della scacchiera percorse dal robottino prima di fermarsi, 
colora di blu la cella in cui il robottino si ferma e registra l'immagine ricolorata nel file fname1. 
Inoltre restituisce una stringa dove in sequanza sono codificati i passi effettuati dal robottino prima di fermarsi. 
La codifica e' a seguente: 
    '0' per un passo verso destra (x crescenti)
    '1' per un passo verso il basso (y crescenti)
    '2' per un passo verso sinistra (x decrescenti)
    '3' per un passo verso l'alto (y decrescenti)

Si puo' assumere che la cella in alto a sinistra sia priva di ostacoli. 

Per esempi di scacchiere con ostacoli e relativi cammini  vedere il file grade02.txt 
Example #10
0
    display: table-cell;
    text-align: center;
    vertical-align: middle;
}
</style>
""")

# # *Circuitos Elétricos I*

# ## Semana 2 - Circuitos divisores
#
# ### Carregamento de um circuito de alimentação
#
# Quando uma fonte de tensão fornece energia a um dado elemento de circuito, diz-se que a fonte encontra-se *carregada*, ou que o elemento conectado à fonte é uma *carga* que está sendo alimentada pela fonte. De maneira geral, o carregamento de um circuito é o processo de introdução de elementos que extrairão corrente do circuito. Quanto maior for a  magnitude da corrente drenada, maior será o efeito de carregamento.

Image("./figures/J3C0.png", width=300)

# Da relação do divisor de tensão, temos que:
#
# $$ \begin{align} v_x &= \frac{R_x}{R_{eq}}v_s = \frac{R_x}{R_x+R_y}v_s\\
#                  v_y &= \frac{R_y}{R_{eq}}v_s = \frac{R_y}{R_x+R_y}v_s
#     \end{align}$$
#
# Ao conectar uma carga $R_L$ ao resistor $R_y$, temos que a tensão $v_y$ será dada por:
#
# $$\begin{align} v_y &= \frac{(R_y||R_L)}{R_{eq}}v_s = \frac{(R_y||R_L)}{R_x+(R_y||R_L)}v_s\\ \\
#                 v_y &= \frac{\frac{R_yR_L}{R_y+R_L}}{R_x+\frac{R_yR_L}{R_y+R_L}}v_s = \frac{R_yR_L}{R_x(R_y+R_L)+R_yR_L}v_s = \frac{R_y}{R_x(\frac{R_y}{R_L}+1)+R_y}v_s
# \end{align}$$
#
#
# Note que, se $R_L >> R_y$, a tensão $v_y$ com a carga conectada aproxima-se da tensão obtida com os terminais em aberto.

#%% adding second level to workflow
wFSL.connect([
    (copemerge, flameo, [('merged_file', 'cope_file')]),
    (varcopemerge, flameo, [('merged_file', 'var_cope_file')]),
    (level2model, flameo, [('design_mat', 'design_file'),
                           ('design_con', 't_con_file'), ('design_grp',
                                                          'cov_split_file')]),
])


#%%
wFSL.write_graph("workflow_graph.dot", graph2use='colored', format='png', simple_form=True)
from IPython.display import Image
Image(filename="/media/Data/work/l1FSL/workflow_graph.png")
wFSL.write_graph(graph2use='flat')

Image(filename = '/media/Data/work/l1run/graph_detailed.png')

wFSL.run('MultiProc', plugin_args={'n_procs': 3})         


#% Graph single subject
from nilearn.plotting import plot_stat_map
from nilearn.plotting import plot_glass_brain
anatimg = '/media/Data/FromHPC/output/fmriprep/sub-1072/anat/sub-1072_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz'
plot_stat_map(
    '/media/Data/work/l1FSL/_subject_id_1072/filmgls/mapflow/_filmgls1/stats/zstat1.nii.gz', title='LossRisk - fwhm=6',
    bg_img=anatimg, threshold=3, display_mode='x', cut_coords=(-5, 0, 5, 10, 15), dim=0);
        
#!/usr/bin/env python
# coding: utf-8

# # <font color ='Red'> PRODUCT FEEDBACK ANALYSIS </font>
# 

# #### Social Media is playing an important role in marketing of a product. This social media can also be used to know the customer's opinions on improvising the product's features, it's quality and many other things.
# #### Sentimental Analysis can roughly give us an idea about the future demand of the product which can help us in increasing the revenue and cutting down on manufacturing costs.
# #### In the Jupyter Notebook, we will do the sentimental analysis on a car model and for analysis purpose we will take the comments as the data from it's Facebook post.
# #### We will use Python libraries along with Facebook Graph API to achieve our goal.

# In[10]:


from IPython.display import Image
Image("/Users/saurabhkarambalkar/Desktop/Picture.jpg", width=1000)


# # <font color ='Red'>Business Aspects</font>
# <li> Reduce the Manufactuing units and cost of the product
# <li> Improve the quality of  product by taking the feedback from the customers 
# 

# # <font color ='Red'>Assumptions</font>
# <li> The assumptions for the following analysis is that all the reviews provided by the customers are authentic and unbiased

# # <font color = 'Red'> Limitations </font>
# - The API provides limited access to the data due to which the models don't have much data to be trained upon
# - If all the reviews given by the customers turn out to be facts, sentiment analysis could not be carried out on the data 

# ## Facebook
import matplotlib.pyplot as plt
import numpy as np
from scipy.integrate import odeint

get_ipython().run_line_magic('matplotlib', 'inline')
from brian2 import *
import brian2
from IPython.display import Image
from scipy.optimize import *

# # Fig 5.4: I_Na,p + I_K Model

# In[3]:

Image(filename="img/1.png")

# In[4]:

C = 1

g_L = 8
g_Na = 20
g_K = 10

E_L1 = -80
E_L2 = -78
E_Na = 60
E_K = -90

tau = 1
Example #14
0
#定义一个类,名字为user
class user:
    def __init__(self, name):  #构造函数
        self.name = name
        print("用户名设置完毕。")

    def showname(self):  #公有成员函数
        print("Current user's name is:", self.name)


'''一般在脚本最后调用主函数main();当我们直接运行当前脚本的时候
__name__相当于__main__。当这个脚本被当作模块import的时候,并不执行main()
'''
if __name__ == '__main__':
    print("Welcome to July's blog!")
    # print('Welcome to July\'s blog!')
    # 上面两句都正确,注意单引号的时候内部的符号要转义.
    main()

    #展示一些库
    img = np.zeros((100, 100))
    img[np.random.randint(0, 100, 500), np.random.randint(0, 100, 500)] = 255
    img2 = filters.gaussian_filter(img, 4, order=2)
    buf = io.BytesIO()
    matplotlib.image.imsave(buf, img2, cmap="gray")
    plt.plot([1, 2, 3, 4])
    plt.ylabel('some numbers')
    plt.show()
    Image(buf.getvalue())  #
rescaled_asmd_train, rescaled_asmd_test, severity_train, severity_test = train_test_split(
    rescaled_asmd, array_severity, test_size=0.25, random_state=1)
#print(severity_train)

clf_gini = DecisionTreeClassifier()
clf_gini.fit(rescaled_asmd_train, severity_train)

#print(clf_gini.score(rescaled_asmd_test,severity_test))

out = StringIO()
tree.export_graphviz(clf_gini,
                     out_file=out,
                     feature_names=['age', 'shape', 'margin', 'density'])
graph = graph_from_dot_data(out.getvalue())
Image(graph.create_png())

kcv_score = cross_val_score(clf_gini, rescaled_asmd, array_severity, cv=10)
#print(kcv_score.mean())  #0.73

clf_rf = RandomForestClassifier()
clf_rf.fit(rescaled_asmd_train, severity_train)

rf_score = cross_val_score(clf_rf, rescaled_asmd, array_severity, cv=10)
#print(rf_score.mean()) #0.769

clf_svc = svm.SVC(kernel='linear',
                  C=1.0)  #kernel='rbf', kernel='sigmoid', kernel='poly'
svm_cv_scores = cross_val_score(clf_svc, rescaled_asmd, array_severity, cv=10)

#print(svm_cv_scores.mean()) # 80.3
def show_image(image_path):
    display(Image(image_path))

    image_rel = image_path.replace(root, '')
    caption = "Image " + ' - '.join(attributions[image_rel].split(' - ')[:-1])
    display(HTML("<div>%s</div>" % caption))
Example #17
0
def FeatureImportanceRanking(estimator,
                             feature_labels,
                             ax=None,
                             figsize=(10, 10),
                             rounding=3,
                             show=True,
                             save=False,
                             plot_dir='Output/Plots',
                             title='FIR',
                             save_params={},
                             plot_params={}):
    """
    INPUT:
     - estimator -> A sklearn tree-based fitted estimator object that ows the 
             attribute feature_importances. (object)
     - feature_labels -> Labels of t he model features that will be used in the 
             plot. (list of strings).
     - ax -> Axes of the current figure. (matplotlib axes object)
     - figsize -> Size of the figure object. (tuple of int: (width, height))
     - rounding -> Number of decimals for labels. (int)
     - show -> Choose whether or not to display the plot. (bool) 
     - save -> Choose whether or not to save the plot. (bool)
     - plot_dir -> Plot saving directory. (path string)
     - title -> Name of the plot file without file extension. (string)
     - save_params -> Parameters for the saving operation. (dict)
     - plot_params -> Parameters for pandas.DataFrame.plot. (dict)
             Look at the doc. for more info. 
    """

    # Default values for parameter dictionaries:
    # save_params
    SP = {'format': 'jpg'}
    # plot_params
    PP = {'color': 'darkcyan', 'legend': False, 'fontsize': 15, 'width': 0.9}

    # Update parameter dictionaries with user choices
    SP.update(save_params)
    PP.update(plot_params)

    # Output file directory
    file_dir = os.path.join(plot_dir, f"{title}.{SP['format']}")

    # If the plot file already exists and must not be overwritten, then display it.
    if show and os.path.exists(file_dir) and not save:
        if ax == None:
            display(Image(filename=file_dir, retina=True))
        else:
            ax.imshow(plt.imread(file_dir), aspect='equal')
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

    # If the file must be created or overwritten ...
    else:
        # Create a Series containing the feature importance values
        FI = pd.Series(estimator.feature_importances_, index=feature_labels)\
            .sort_values()

        # Create the plot in the given axes or in a new axes
        if ax == None:
            fig, ax = plt.subplots(figsize=figsize)

        FI.plot(kind='barh', ax=ax, **PP)
        ax.get_xaxis().set_visible(False)
        ax.set(facecolor='white')
        for i, imp in enumerate(FI):
            ax.text(imp + 0.001, i - 0.13, str(round(imp, rounding)))

        # Save the plot if needed
        if save:
            plt.savefig(file_dir, **SP)
        # Prevent display of the plot if needed
        if not show:
            plt.close()
Example #18
0
def PermutationImportanceRanking(estimator,
                                 X,
                                 y,
                                 feature_labels,
                                 ax=None,
                                 figsize=(10, 10),
                                 rounding=3,
                                 show=True,
                                 save=False,
                                 plot_dir='Output/Plots',
                                 title='PIR',
                                 save_params={},
                                 pi_params={},
                                 plot_params={}):
    """
    INPUT:
     - estimator -> A sklearn tree-based fitted estimator object. (object) 
             Look at sklearn.inspection.permutation_importance doc. for more info. 
     - X -> Feature matrix. (ndarray or DataFrame)
             Look at sklearn.inspection.permutation_importance doc. for more info.
     - y -> Target array. (array-like or None)
             Look at sklearn.inspection.permutation_importance doc. for more info.
     - feature_labels -> Labels of the model features that will be used in the 
             plot. (list of strings).
     - ax -> Axes of the current figure. (matplotlib axes object)
     - figsize -> Size of the figure object. (tuple of int: (width, height))
     - rounding -> Number of decimals for labels. (int)
     - show -> Choose whether or not to display the plot. (bool) 
     - save -> Choose whether or not to save the plot. (bool)
     - plot_dir -> Plot saving directory. (path string)
     - title -> Name of the plot file without file extension. (string)
     - save_params -> Parameters for the saving operation. (dict)
     - pi_params -> Parameters for sklearn.inspection.permutation_importance. 
             (dict) Look at the doc. for more info.
     - plot_params -> Parameters for pandas.DataFrame.plot. (dict)
             Look at the doc. for more info. 
    """

    # Default values for parameter dictionaries:
    # save_params
    SP = {'format': 'jpg'}
    # pi_params
    PIP = {
        'scoring': 'neg_mean_squared_error',
        'n_repeats': 10,
        'n_jobs': 1,
        'random_state': 1
    }
    # plot_params
    PP = {
        'color': 'goldenrod',
        'legend': False,
        'capsize': 3,
        'fontsize': 15,
        'width': 0.9
    }

    # Update parameter dictionaries with user choices
    SP.update(save_params)
    PIP.update(pi_params)
    PP.update(plot_params)

    # Output file directory
    file_dir = os.path.join(plot_dir, f"{title}.{SP['format']}")

    # If the plot file already exists and must not be overwritten, then display it.
    if show and os.path.exists(file_dir) and not save:
        if ax == None:
            display(Image(filename=file_dir, retina=True))
        else:
            ax.imshow(plt.imread(file_dir), aspect='equal')
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

    # If the file must be created or overwritten ...
    else:
        # Compute permutation importance values
        PI = permutation_importance(estimator=estimator, X=X, y=y, **PIP)
        # Create a dataframe with means and standard deviations only.
        del PI['importances']
        PI = pd.DataFrame(PI, index=feature_labels)\
            .sort_values(by='importances_mean')

        # Create the plot in the given axes or in a new axes
        if ax == None:
            fig, ax = plt.subplots(figsize=figsize)

        PI.plot(y='importances_mean',
                kind='barh',
                xerr=PI['importances_std'],
                ax=ax,
                **PP)
        ax.get_xaxis().set_visible(False)
        ax.set(facecolor='white')
        for i, (imp, se) in enumerate(
                zip(PI['importances_mean'], PI['importances_std'])):
            ax.text(imp + se + 0.005 * PI['importances_mean'][-1], i - 0.13,
                    str(round(imp, rounding)))
        # Save the plot if needed
        if save:
            plt.savefig(file_dir, **SP)
        # Prevent display of the plot if needed
        if not show:
            plt.close()
Example #19
0
def GeoPlot(GDF,
            col,
            ax=None,
            figsize=(50, 27),
            Antarctica=True,
            show=True,
            save=False,
            plot_dir='Output/Plots',
            title='GeoPlot',
            save_params={},
            plot_params={},
            adjust_colorbar_params={}):
    """
    INPUT:
     - GDF -> GeoDataFrame with geometry column and other data columns. 
             (GeoDataFrame)
     - col -> Name of the column containing data to plot. (string)
     - ax -> Axes of the current figure. (matplotlib axes object)
     - figsize -> Size of the figure object. (tuple of int: (width, height))
     - Antarctica -> Choose whether or not to plot the Antartic continent. (bool)
     - show -> Choose whether or not to display the plot. (bool) 
     - save -> Choose whether or not to save the plot. (bool)
     - plot_dir -> Plot saving directory. (path string)
     - title -> Name of the plot file without file extension. (string)
     - save_params -> Parameters for the saving operation. (dict)
     - plot_params -> Parameters for geopandas.GeoDataFrame.plot. (dict)
             Look at the doc. for more info. 
             Three additional items are included:
             - boundary_color -> Countries' borders color. (string)
             - land_color -> Lands color. (string)
             - sea_color -> Sea color. (string)      
      - adjust_colorbar_params -> Parameters used to adjust the colorbar 
              settings. Items in the dictionary are:
              - center_on_zero -> Choose whether or not to center the colorbar 
                  on the value 0. (bool) Make sure to choose in plot_params a 
                  diverging colormap to obtain a better outcome.
              - remove_outliers -> Choose whether or not to remove outliers from
                  the colorbar range of values. Outliers will be colored with 
                  the same color of the closest value on the colorbar. 
              - vmin, vmax -> Minimum and maximum values considered in the 
                  colorbar's range of values. (int) Smaller or larger values are
                  represented with same color of the closest value on the 
                  colorbar.
              - labelsize -> Size of the colorbar labels. (int)
              - valueson -> Specific values that will be represented on the 
                  colorbar as straigth vertical lines. (float or list of floats)
              - valueson_color -> Color of lines on the colorbar. (string)       
              - valueson_linewidth -> Width of lines on the colorbar. (int)                      
    """

    # Default values for parameter dictionaries:
    # plot_params
    PP = {
        'cmap': 'viridis',
        'marker': 'h',
        'markersize': 9,
        'boundary_color': 'white',
        'land_color': 'whitesmoke',
        'sea_color': '#adcfeb',
        'legend': True,
        'legend_kwds': {
            'orientation': 'horizontal'
        }
    }
    # adjust_colorbar_params
    ACP = {
        'center_on_zero': False,
        'remove_outliers': False,
        'vmin': None,
        'vmax': None,
        'labelsize': 40,
        'valueson': [],
        'valueson_color': 'yellow',
        'valueson_linewidth': 5
    }
    # save_params
    SP = {'format': 'jpg'}

    # Update parameter dictionaries with user choices
    PP.update(plot_params)
    ACP.update(adjust_colorbar_params)
    SP.update(save_params)

    # Output file directory
    file_dir = os.path.join(plot_dir, f"{title}.{SP['format']}")

    # If the plot file already exists and must not be overwritten, then display it.
    if show and os.path.exists(file_dir) and not save:
        if ax is None:
            display(Image(filename=file_dir, retina=True))
        else:
            ax.imshow(plt.imread(file_dir), aspect='equal')
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

    # If the file must be created or overwritten ...
    else:
        # Import the geodataframe containing the map of the world
        world = gpd.read_file(gpd.datasets.get_path('naturalearth_lowres'))
        # Remove Antarctica if needed
        if not Antarctica:
            world = world[(world.name != "Antarctica")]
            GDF = RemoveAntarctica(GDF)

        # Remove outliers from the colorbarif needed
        if ACP['remove_outliers']:
            min_w, max_w = Whiskers(GDF[col])
            if ACP['vmin'] is None:
                ACP['vmin'] = min_w
            if ACP['vmax'] is None:
                ACP['vmax'] = max_w
        else:
            if ACP['vmin'] is None:
                ACP['vmin'] = GDF[col].min()
            if ACP['vmax'] is None:
                ACP['vmax'] = GDF[col].max()
        # Center the colorbar on 0 if needed
        if ACP['center_on_zero']:
            PP['norm'] = TwoSlopeNorm(vmin=ACP['vmin'],
                                      vcenter=0,
                                      vmax=ACP['vmax'])
        else:
            PP['norm'] = Normalize(vmin=ACP['vmin'], vmax=ACP['vmax'])

        with sns.axes_style("white"):
            # Create the plot in the given axes or in a new axes
            if ax is None:
                fig, ax = plt.subplots(figsize=figsize)

            # Legend adjustments if colorbar must be displayed
            if PP['legend']:
                divider = make_axes_locatable(ax)
                cax = divider.append_axes("bottom", size="5%", pad=0.1)
                cax.tick_params(labelsize=ACP['labelsize'])
                PP['cax'] = cax

            # Plot of the world on the backgroud
            world.plot(ax=ax,
                       facecolor=PP.pop('land_color'),
                       edgecolor="none",
                       zorder=1).set_facecolor(PP.pop('sea_color'))
            # Plot of the countries' borders on the foreground
            world.boundary.plot(ax=ax,
                                color=PP.pop('boundary_color'),
                                zorder=3)

            # Plot of the data points
            GDF.plot(column=col, ax=ax, **PP, zorder=2)

            # Hide axis
            ax.get_xaxis().set_visible(False)
            ax.get_yaxis().set_visible(False)

            # Draw lines on the colorbar on the specified values
            if PP['legend']:
                ACP['valueson'] = MakeList(ACP['valueson'])
                for v in ACP['valueson']:
                    cax.vlines(v,
                               -1,
                               1,
                               colors=ACP['valueson_color'],
                               linewidth=ACP['valueson_linewidth'])

            # fig.tight_layout()

            # Save the plot if needed
            if save:
                plt.savefig(file_dir, **SP)
            # Prevent display of the plot if needed
            if not show:
                plt.close()
Example #20
0
sales_pred = lm.predict(testing)
sales_pred

# In[10]:

SSD = sum((testing["Sales"] - sales_pred)**2)
SSD

# In[11]:

RSE = np.sqrt(SSD / (len(testing) - 2 - 1))
RSE

# In[12]:

sales_mean = np.mean(testing["Sales"])
error = RSE / sales_mean
error

# In[15]:

get_ipython().run_line_magic('matplotlib', 'inline')
data.plot(kind="scatter", x="TV", y="Sales")
#plt.plot(pd.DataFrame(data["TV"]), sales_pred, c="red", linewidth = 2)

# In[14]:

from IPython.display import Image
Image(filename="/content/python-ml-course/notebooks/resources/summary-lm.png")
from IPython.display import Image  
from sklearn.externals.six import StringIO  
from sklearn.tree import export_graphviz
import pydot 

features = list(df.columns[1:])
features


# In[18]:

dot_data = StringIO()  
export_graphviz(dtree, out_file=dot_data,feature_names=features,filled=True,rounded=True)

graph = pydot.graph_from_dot_data(dot_data.getvalue())  
Image(graph[0].create_png())  


# ## Random Forests

# In[41]:

from sklearn.ensemble import RandomForestClassifier
rfc = RandomForestClassifier(n_estimators=100)
rfc.fit(X_train, y_train)


# In[45]:

rfc_pred = rfc.predict(X_test)
Example #22
0
def predict_video(video_name, enc_model, clf_model):
   
    n_frames = get_video_frame_num(video_name) 
    pred_trace = {'picked_f': [], 'total_frame': 0, 'picked_rate': 0.0, 'proc_stage': []}
 
    encdata = getMetadata(video_name) 
    cvdata = getCVInfoFromLog(video_name)
    mv_file = os.path.join(MV_INPUT_FOLDER, video_name + '.pickle')
    with open(mv_file) as fh:
        mv_features = pickle.load(fh)
    w = encdata['metadata']['w'] 
    h = encdata['metadata']['h']

    selected_fids = [0]
    prev_fid = 0
    stages = [-1]
    for fid in xrange(1, n_frames):
        frame_name = str(fid) + '.jpg'
        prev_frame_name = str(prev_fid) + '.jpg'

 
        # generate features
        #### single frame features
        enc = encdata[frame_name]
        x = {}
        if enc['type'] == 'P': 
            x[ENC_TYPE] = 0
        else:
            x[ENC_TYPE] = 1

        cv = cvdata[frame_name]
        mv = mv_features[fid]
        #########################

        x[IMG_WIDTH] = w
        x[IMG_HEIGHT] = h
        x[ENC_SIZE] = enc['size']
        x[MV_SIZE] = mv[0] 
        x[MV_MEAN] = mv[1]
        x[MV_MAX] = mv[2]
        x[MV_MIN] = mv[3]
        x[DIST_FROM_PFID] = fid - prev_fid
 
        # predict if we should pick this frame
        fst_layer_f = [x[ENC_TYPE], x[IMG_WIDTH], x[IMG_HEIGHT], x[ENC_SIZE], x[MV_SIZE], x[MV_MEAN], x[MV_MAX], x[MV_MIN], x[DIST_FROM_PFID]]
        pred_y = enc_model.predict(fst_layer_f)
        if int(pred_y) == 0:
            stages += [0]
            continue
        
        ### two-frame
        cur_img = cv2.imread(os.path.join('/home/t-yuche/frames', video_name, frame_name))     
        prev_img = cv2.imread(os.path.join('/home/t-yuche/frames', video_name, prev_frame_name))  
        if  h * w > 320 * 240:
            cur_img = cv2.resize(cur_img, (320, 240)) 
            prev_img = cv2.resize(prev_img, (320, 240)) 
        ##   
        framediff = getFrameDiff(prev_img, cur_img)
        framediff_prec = framediff/ (h * w * 1.0)

        ##
        pilcur_img = cv2.cvtColor(cur_img, cv2.COLOR_BGR2RGB)
        pilprev_img = cv2.cvtColor(prev_img, cv2.COLOR_BGR2RGB)
        pil_cur = Image.fromarray(pilcur_img)
        pil_prev = Image.fromarray(pilprev_img)
        phash_v = phash(pil_prev, pil_cur) 
        
        ##
        hist_score = colorHistSim(prev_img, cur_img)

        ##
        sprev_img = cv2.resize(prev_img, (160,120))
        scur_img = cv2.resize(cur_img, (160,120))
        sift_score = getSIFTMatchingSim(sprev_img, scur_img)            
        ##
        surf_score = getSURFMatchingSim(sprev_img, scur_img)            
        ###


        x[SOBEL] = cv['sobel'][0]
        x[ILLU] = cv['illu'][0]
        x[FRAME_DIFF] = framediff_prec
        x[PHASH] = phash_v
        x[COLORHIST] = hist_score
        x[SIFTMATCH] = sift_score
        x[SURFMATCH] = surf_score
 
        #x = scale_feature(x, range_value) 
        #p_label, dummy, dummy = svm_predict([1], [x], svm_model, '-q')
        snd_layer_f = [x[ENC_TYPE], x[IMG_WIDTH], x[IMG_HEIGHT], x[ENC_SIZE], x[MV_SIZE], x[MV_MEAN], x[MV_MAX], x[MV_MIN], x[SOBEL], x[ILLU], x[FRAME_DIFF], x[PHASH], x[COLORHIST], x[SIFTMATCH], x[SURFMATCH], x[DIST_FROM_PFID]]
        pred_y = clf_model.predict(snd_layer_f) 
         
        #print p_label
        if int(pred_y) == 1:
            stages += [2]
            selected_fids += [fid]
            prev_fid = fid
        else:
            stages += [1]

    pred_trace['picked_f'] = selected_fids
    pred_trace['total_frame'] =  n_frames
    pred_trace['picked_rate'] = len(selected_fids)/(n_frames * 1.0)
    pred_trace['proc_stage'] = stages
    #print video_name, selected_fids, '\n' , n_frames, pred_trace['picked_rate']
    
    return pred_trace
Example #23
0
def display_images(filenames):
    """Helper to pretty-print images."""
    for filename in filenames:
        display(Image(filename))
        return np.array(return_image)

def display(image) : 
    if type(image) is type([]) : 
        for images in image :
            matplotlib.pyplot.figure()
            imshow(images,cmap=matplotlib.pyplot.get_cmap('gray'))
    else : 
        imshow(image,cmap=matplotlib.pyplot.get_cmap('gray'))


# #original image

# In[170]:

image = np.array(Image.open('sample images/sample_image.jpg')) 

#imshow(image,cmap=matplotlib.pyplot.get_cmap('grey'))
impr = image_processing()
image = impr.rgb_to_greyscale(image)
display(image)


# #vertical edge detection

# In[171]:

def vertical_edge_detection(self,image) : 
    kernel = [1,[[-1,0,1],
                 [-1,0,1],
                 [-1,0,1]]]
Example #25
0
        if answer2 == 1:
            pt('うーむ、もう一度考えてみるのじゃ', 2)
            continue
        elif answer2 == 2:
            print('そうじゃ、それでこそ', syuzinkou, 'じゃ!')
            time.sleep(2)
            break
        else:
            pt('指定された数字のみ入力するのじゃ!', 2)
            continue
pt('いきなりだが、能勢と戦うための君の奴隷を選んでもらう', 2)
print('画像が確認できたらエンターで進んでね♬')

pt('まずもっちーだーーーーーー', 2)
display(Image(url1))
pt('進むにはEnterを押してね', 2)
input()
pt('次はクマだーーーーー', 2)

display(Image(url2))
pt('進むにはEnterを押してね', 2)
input()
pt('最後はこーだいだーーーーーーー', 2)

display(Image(url3))
pt('進むにはEnterを押してね', 2)
input()
pt('これが奴隷たちの能力だーーー', 2)
print('''					
				
# Finally, Random Forest has some other benefits:
# + It gives you a measure of "variable important" which relates how useful your input features (e.g. spectral bands) were in the classification
# + The "out-of-bag" samples in each tree can be used to validate each tree. Grouping these predicted accuracies across all trees can [sometimes give you an unbiased estimate of the error rate](http://www.stat.berkeley.edu/~breiman/RandomForests/cc_home.htm#ooberr) similar to doing cross-validation.
# + Can be used for regressions, unsupervised clustering, or supervised classification
# + Available in many popular languages, including Python, R, and MATLAB
# + Free and open source, and fast

# ## scikit-learn
#
# In this chapter we will be using the Random Forest implementation provided by the [scikit-learn](http://scikit-learn.org) library. `Scikit-learn` is an amazing machine learning library that provides easy and consistent interfaces to many of the most popular machine learning algorithms. It is built on top of the pre-existing scientific Python libraries, including NumPy, SciPy, and matplotlib, which makes it very easy to incorporate into your workflow. The number of available methods for accomplishing any task contained within the library is (in my opinion) its real strength. No single algorithm is best for all tasks under all circumstances, and `scikit-learn` helps you understand this by abstracting the details of each algorithm to simple consistent interfaces. For example:

# In[1]:

from IPython.display import Image
img = Image(
    'http://scikit-learn.org/stable/_images/plot_classifier_comparison_001.png'
)
print(img)

# [This figure](http://scikit-learn.org/stable/auto_examples/plot_classifier_comparison.html) shows the classification predictions and the decision surfaces produced for three classification problems using 9 different classifiers. What is even more impressive is that all of this took only about 110 lines of code, including comments!

# ## Preparing the dataset
# #### Opening the images
# Our first step is to recall our previous chapter's lessons by reading in the example image and the ROI image we created in [chapter 5](chapter_5_vector.ipynb):

# In[2]:

# Import Python 3's print function and division
from __future__ import print_function, division

# Import GDAL, NumPy, and matplotlib
def random_forest_visualization(HEADERS, feature_headers, class_names):
    train_x, test_x, train_y, test_y = split_dataset(dataset, 0.7, [
        'nb_follower', 'nb_following', 'verified', 'reputation', 'age',
        'nb_tweets', 'posted_at', 'length', 'proportion_spamwords',
        'orthographe', 'nb_hashtag', 'nb_urls', 'nb_emoji', 'type'
    ], HEADERS[-2])
    model = RandomForestClassifier(class_weight={
        0: 1,
        1: 5
    },
                                   max_depth=10,
                                   min_samples_leaf=5,
                                   min_samples_split=20)
    model.fit(train_x.drop('type', axis=1), train_y)
    estimator = model.estimators_[9]
    return (export_graphviz(estimator,
                            out_file='tree.dot',
                            feature_names=feature_headers,
                            class_names=class_names,
                            rounded=True,
                            proportion=False,
                            precision=2,
                            filled=True))


if __name__ == "__main__":
    random_forest_visualization(HEADERS, feature_headers, class_names)
    call(['dot', '-Tpng', 'tree.dot', '-o', 'tree.png', '-Gdpi=600'])
    Image(filename='tree.png')
my_ax = g.ax
zz = np.array(
    [clf.predict([[xx, yy]])[0] for xx, yy in zip(np.ravel(X_), np.ravel(Y_))])
Z = zz.reshape(X_.shape)

my_ax.contourf(X_, Y_, Z, 2, alpha=.1, colors=('blue', 'green', 'red'))
my_ax.contour(X_, Y_, Z, 2, alpha=1, colors=('blue', 'green', 'red'))

my_ax.set_xlabel('Sepal length')
my_ax.set_ylabel('Sepal width')
my_ax.set_title('DecisionTree boundaries')

plt.show()
"""* Max_depth 차례대로 3, 7, 10. 나머지 1과 동일"""

from IPython.display import Image
Image('1.JPG')
"""*	Random_state 차례대로 3, 5, 10. 나머지 1과 동일"""

from IPython.display import Image
Image('2.JPG')
"""*	Criterion 차례대로 gini, entropy. 나머지 1과 동일"""

from IPython.display import Image
Image('3.JPG')
"""*	Splitter 차례대로 best, random. 나머지 1과 동일."""

from IPython.display import Image
Image('4.JPG')
"""분석결과를 보면 각각의 parameter마다 다 다른 결과를 도출함을 볼 수 있다. 하지만 random_state는 숫자의 변화가 있어도 결과의 차이가 거의 없었으며 max_depth는 숫자가 커질수록 더 분류가 정교해지는 것을 볼 수 있다. 하지만 train set에 너무 fit하게 학습되면 overfitting이 일어날 수도 있으니 조심해야 할 것 같다. 또한 criterion 같은 경우 entropy보다 gini의 성능이 더 좋았으며 splitter는 random보다 best가 훨씬 분류가 잘 된 것을 볼 수 있다."""
Example #29
0
File: m8r.py Project: clintz1/src
 def png(self):
     return Image(self._repr_png_(), embed=True)
Example #30
0
File: mask.py Project: rogalag/eden
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 1)*255)
    f = BytesIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))
Example #31
0
plt.show()

#Plotting the accuracy of validation and training 
accur_train = history.history['accuracy']
accur_val = history.history['val_accuracy']
plt.plot(epochstoplot, accur_train, 'g', label='Training accuracy')
plt.plot(epochstoplot, accur_val, 'b', label='validation accuracy')
plt.title('Training and Validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.show()

import numpy as np  

from IPython.display import Image, display
TGREEN =  '\033[1;37;42m'
TRED =    '\033[1;37;41m'
for i in range (1,17):
  img_directory = str(i) + '.jpg'
  img_pred= image.load_img('/content/test-70x70/'+img_directory,target_size = (70,70))
  img_pred = image.img_to_array(img_pred)
  img_pred = np.expand_dims(img_pred, axis = 0)

  prediction = model.predict(img_pred)
  display(Image(img_directory,width= 150, height=150))
  print("\n")
  if(int(prediction[0][0]) == 0):
    print(TGREEN + "The person is wearing a mask. \n")
  else:
    print(TRED + "The person is not wearing a mask.\n")
    os.path.join(root, d) for d in sorted(os.listdir(root))
    if os.path.isdir(os.path.join(root, d))]
  if len(data_folders) != num_classes:
    raise Exception(
      'Expected %d folders, one per class. Found %d instead.' % (
        num_classes, len(data_folders)))
  print(data_folders)
  return data_folders
  
train_folders = maybe_extract(train_filename)
test_folders = maybe_extract(test_filename)

#%% Problem 1

from PIL import Image
image=Image.open('MDEtMDEtMDAudHRm.png')
image.show()


#%%
image_size = 28  # Pixel width and height.
pixel_depth = 255.0  # Number of levels per pixel.

def load_letter(folder, min_num_images):
  """Load the data for a single letter label."""
  image_files = os.listdir(folder)
  dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
  image_index = 0
  print(folder)
  for image in os.listdir(folder):
Example #33
0
 'notMNIST_large/J']

test_folders = ['notMNIST_small/A',
 'notMNIST_small/B',
 'notMNIST_small/C',
 'notMNIST_small/D',
 'notMNIST_small/E',
 'notMNIST_small/F',
 'notMNIST_small/G',
 'notMNIST_small/H',
 'notMNIST_small/I',
 'notMNIST_small/J']

 from IPython.display import display,Image

display(Image(filename="notMNIST_small/A/Q0NXaWxkV29yZHMtQm9sZEl0YWxpYy50dGY=.png"))

image_size = 28  # Pixel width and height.
pixel_depth = 255.0  # Number of levels per pixel.

def load_letter(folder, min_num_images):
  """Load the data for a single letter label."""
  image_files = os.listdir(folder)
  dataset = np.ndarray(shape=(len(image_files), image_size, image_size),
                         dtype=np.float32)
  print(folder)
  num_images = 0
  for image in image_files:
    image_file = os.path.join(folder, image)
    try:
      image_data = (ndimage.imread(image_file).astype(float) -