Beispiel #1
0
    def get_colors(self, qty):

        qty = np.power(qty / qty.max(), 1.0 / CONTRAST)

        if COLORMAP == 0:
            rgba = cm.gray(qty, alpha=ALPHA)
        elif COLORMAP == 1:
            rgba = cm.afmhot(qty, alpha=ALPHA)
        elif COLORMAP == 2:
            rgba = cm.hot(qty, alpha=ALPHA)
        elif COLORMAP == 3:
            rgba = cm.gist_heat(qty, alpha=ALPHA)
        elif COLORMAP == 4:
            rgba = cm.copper(qty, alpha=ALPHA)
        elif COLORMAP == 5:
            rgba = cm.gnuplot2(qty, alpha=ALPHA)
        elif COLORMAP == 6:
            rgba = cm.gnuplot(qty, alpha=ALPHA)
        elif COLORMAP == 7:
            rgba = cm.gist_stern(qty, alpha=ALPHA)
        elif COLORMAP == 8:
            rgba = cm.gist_earth(qty, alpha=ALPHA)
        elif COLORMAP == 9:
            rgba = cm.spectral(qty, alpha=ALPHA)

        return rgba
Beispiel #2
0
    def get_colors(self, qty):

        qty = np.power(qty / qty.max(), 1.0 / CONTRAST)

        if COLORMAP == 0:
            rgba = cm.gray(qty, alpha=ALPHA)
        elif COLORMAP == 1:
            rgba = cm.afmhot(qty, alpha=ALPHA)
        elif COLORMAP == 2:
            rgba = cm.hot(qty, alpha=ALPHA)
        elif COLORMAP == 3:
            rgba = cm.gist_heat(qty, alpha=ALPHA)
        elif COLORMAP == 4:
            rgba = cm.copper(qty, alpha=ALPHA)
        elif COLORMAP == 5:
            rgba = cm.gnuplot2(qty, alpha=ALPHA)
        elif COLORMAP == 6:
            rgba = cm.gnuplot(qty, alpha=ALPHA)
        elif COLORMAP == 7:
            rgba = cm.gist_stern(qty, alpha=ALPHA)
        elif COLORMAP == 8:
            rgba = cm.gist_earth(qty, alpha=ALPHA)
        elif COLORMAP == 9:
            rgba = cm.spectral(qty, alpha=ALPHA)

        return rgba
def comparison_v2(adata, name_keys, group=None, color_thresholds=None, n_genes=70):
    name_list_cut = {}
    for i, j in enumerate(name_keys):
        name_list_cut[i] = adata.uns[j][0:n_genes]
    name_list = {}
    for i, j in enumerate(name_keys):
        name_list[i] = adata.uns[j]

    length = n_genes
    width = len(name_list)

    rank_table = pd.DataFrame(name_list_cut)
    row_names=np.arange(n_genes)+1
    colors=np.ndarray((length,width))
    for key in name_list:
        for i in range(n_genes):
            top100=False
            top50=False
            top20=False
            for key2 in name_list:
                if key is key2:
                    pass
                else:
                    if name_list[key][i] in name_list[key2]:
                        index=name_list[key2].index(name_list[key][i])
                        if index <100:
                            top100=True
                            if index <50:
                                top50=True
                                if index >=20:
                                    top20=True
                        else:
                            pass
                    else:
                        top100=False
                        top50=False
                        top20=False
            if top100 is True:
                colors[i, key] = 0.55
                if top50 is True:
                    colors[i, key] = 0.75
                    if top20 is True:
                        colors[i, key] = 0.9
            else:
                colors[i, :] = 0.35

    plt.figure(figsize=(4,4 ), dpi=120)
    ax = plt.subplot(111, frame_on=False)
    ax.xaxis.set_visible(False)
    ax.yaxis.set_visible(False)
    ax.table(cellText=rank_table.as_matrix(), rowLabels=row_names, colLabels=name_keys,
             cellColours=cm.afmhot(colors), loc="center", fontsize=22)
    plt.show()
Beispiel #4
0
    def init_vis(self, p):
        pygame.init()
        (display_width, display_height) = (1280, 720)
        self.screen = pygame.display.set_mode((display_width, display_height))
        pygame.display.set_caption("Arcade Learning Environment Agent Display")
        self.game_surface = None

        self.clock = pygame.time.Clock()
        self.delay = p['fps']
        self.framenum = 0
        self.i_range = [-0.001, 0.001]
        #precompute pallete
        self.intensity_pal = cm.afmhot(np.arange(256))
        self.intensity_pal = (self.intensity_pal * 255).astype(np.int)
    def init_vis(self,p):
        pygame.init()
        (display_width,display_height) = (1280,720)
        self.screen = pygame.display.set_mode((display_width,display_height))
        pygame.display.set_caption("Arcade Learning Environment Agent Display")
        self.game_surface = None

        self.clock = pygame.time.Clock()
        self.delay = p['fps']
        self.framenum = 0
        self.i_range = [-0.001,0.001]
        #precompute pallete
        self.intensity_pal = cm.afmhot(np.arange(256))
        self.intensity_pal = (self.intensity_pal*255).astype(np.int)
Beispiel #6
0
def map_val_colors(img, v_min=0.0, v_max=1.0, cmap='hot'):
    fun = {
        'hot':
        lambda t: cm.afmhot(colors.Normalize(vmin=v_min, vmax=v_max)(t),
                            bytes=True),
        'jet':
        lambda t: cm.jet(colors.Normalize(vmin=v_min, vmax=v_max)(t),
                         bytes=True),
        'Greys':
        lambda t: cm.Greys(colors.Normalize(vmin=v_min, vmax=v_max)(t),
                           bytes=True),
        'Blues':
        lambda t: cm.Blues(colors.Normalize(vmin=v_min, vmax=v_max)(t),
                           bytes=True),
    }
    return fun[cmap](img)
Beispiel #7
0
def add_node_heatmap(treeplot, nodelist, vis=True):
    """
    Plot circles on nodes with colors indicating how frequently each node
    appears in nodelist. For use with plotting potential regime
    shift locations

    Args:
        nodelist (list): list of node objects. Repeats allowed (and expected)
    """
    nodeset = list(set(nodelist))
    n = len(nodelist)

    # Don't plot any values that appear so infrequently as to be less than
    # what the minimum color would be
    cutoff = round(n*(1.0/255))

    nodeset = [x for x in nodeset if nodelist.count(x) >= cutoff]

    cols = [ afmhot(float(i)/n) for i in [nodelist.count(x) for x in nodeset] ]



    add_circles(treeplot, nodes=nodeset, colors=cols, size=6, vis=vis)
Beispiel #8
0
def add_node_heatmap(treeplot, nodelist, vis=True):
    """
    Plot circles on nodes with colors indicating how frequently each node
    appears in nodelist. For use with plotting potential regime
    shift locations

    Args:
        nodelist (list): list of node objects. Repeats allowed (and expected)
    """
    nodeset = list(set(nodelist))
    n = len(nodelist)

    # Don't plot any values that appear so infrequently as to be less than
    # what the minimum color would be
    cutoff = round(n*(1.0/255))

    nodeset = [x for x in nodeset if nodelist.count(x) >= cutoff]

    cols = [ afmhot(float(i)/n) for i in [nodelist.count(x) for x in nodeset] ]



    add_circles(treeplot, nodes=nodeset, colors=cols, size=6, vis=vis)
    colorterm = np.array( regterm_list )
    colorrange = ( np.max( colorterm ) - np.min( colorterm ) )
    colorlevel = ( colorterm - np.min( colorterm ) ) / colorrange

    print ''
    print 'MINIMUM:', np.min( colorterm )
    print 'MAXIMUM:', np.max( colorterm )
    print ''


    colorlevel_sorted = colorlevel[np.argsort( colorlevel )][::-1]
    points_kn_array = np.array( points_kn_list )
    points_kn_array_sorted  = points_kn_array[np.argsort( colorlevel )][::-1]
    for ii in xrange( count ) :
        points_kn = points_kn_array_sorted[ii]
        plt.plot( points_kn.T[0], points_kn.T[1], color=cm.afmhot( colorlevel_sorted[ii] ) )

    #--------------------------------------------------------------------------
    # data
    plt.plot( U_in.T[0], U_in.T[1], 'k' )
#    plt.plot( U_in.T[0], U_in.T[1], marker='.', c="black", label='data' )

    #--------------------------------------------------------------------------
    # answer
    # projection of 'answer' onto PC plane
    albd_answer_kj  = np.loadtxt( ALBDFILE ).T
    dalbd_answer_kj = albd_answer_kj - M_j
    coeff_kn = np.dot( dalbd_answer_kj, V_nj.T )
    answer_x, answer_y = coeff_kn[:,0:2].T

    plt.scatter( answer_x[0], answer_y[0], marker='o', c='blue' )
    def show_color(self, num=-1, displ='sput'):
        Z_ = self.Z_history[num]
        X_ = self.X
        Y_ = self.Y
        l_z = np.pad(Z_, ((0, 1), (0, 1)), mode='wrap')
        l_z += self.slope_corr_diff1

        l_slopes_x = np.diff(l_z, 1, axis=1)[:-1] / self.dx
        l_slopes_y = np.diff(l_z, 1, axis=0)[:, :-1] / self.dx

        l_angles_x = np.arctan(l_slopes_x)
        l_angles_y = np.arctan(l_slopes_y)

        angles_x = (np.roll(l_angles_x, 1, axis=1) + l_angles_x) * 0.5
        angles_y = (np.roll(l_angles_y, 1, axis=0) + l_angles_y) * 0.5
        slopes_x = np.tan(angles_x)
        slopes_y = np.tan(angles_y)

        normal_magnitude = np.sqrt(
            np.power(slopes_x, 2) + np.power(slopes_y, 2) + 1.0)
        thetas = np.arccos(1.0 / normal_magnitude)
        if displ == 'moment':
            omegas = np.arctan2(slopes_y, slopes_x)
            omegas = np.abs(omegas)
            omegas[omegas >= np.pi *
                   0.5] = np.pi - omegas[omegas >= np.pi * 0.5]

            x_back_mask = slopes_x > 0.0
            x_for_mask = np.logical_not(x_back_mask)

            y_back_mask = slopes_y > 0.0
            y_for_mask = np.logical_not(y_back_mask)

            # ero_00 = (1.0-np.cos(4.0*thetas))*self.moment/(normal_magnitude*np.power(self.dx, 3))
            # ANGLE NORMALIZATION INSIDE DEFINITION BELOW
            ero_00 = (self.mamp * self.flux_const *
                      (1.0 / self.dx) * 0.5) * (1.0 - np.cos(4.0 * thetas))
            sin_omega = np.sin(omegas)
            cos_omega = np.cos(omegas)

            acc_00 = (1 - sin_omega) * (1 - cos_omega) * ero_00
            acc_01 = cos_omega * (1 - sin_omega) * ero_00
            acc_10 = (1 - cos_omega) * sin_omega * ero_00
            acc_11 = sin_omega * cos_omega * ero_00

            # lets roll
            acc_01 = np.roll(x_for_mask*acc_01, (1, 0), axis=(1, 0)) \
                + np.roll(x_back_mask*acc_01, (-1, 0), axis=(1,0))

            acc_10 = np.roll(y_for_mask*acc_10, (0, 1), axis=(1,0)) \
                + np.roll(y_back_mask*acc_10, (0, -1), axis=(1,0))
            """
            (-1, -1) | (-1, 0) | (-1, 1)
            ----------------------------
            (0, -1)  |         | (0, 1)
            ----------------------------
            (1, -1)  | (1, 0)  | (1, 1)
            """

            acc_11 = np.roll(np.logical_and(x_for_mask, y_for_mask)*acc_11, (1, 1), axis=(1,0)) \
                + np.roll(np.logical_and(x_for_mask, y_back_mask)*acc_11, (1, -1), axis=(1,0)) \
                + np.roll(np.logical_and(x_back_mask, y_back_mask)*acc_11, (-1, -1), axis=(1,0)) \
                + np.roll(np.logical_and(x_back_mask, y_for_mask)*acc_11, (-1, 1), axis=(1,0))

            results = -ero_00 + acc_00 + acc_01 + acc_10 + acc_11

        elif displ == 'sput':
            results = (self.yamp * self.flux_const) * yamamura(
                thetas, self.ytheta, self.f)

        res_max = np.max(results)
        res_min = np.min(results)

        print(res_max, res_min)
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.set_aspect('equal')

        if displ == 'moment':
            abs_max = max(abs(res_min), abs(res_max))
            normalized = (results / (2 * abs_max)) + 0.5
            # 0 -> 255
            digitized = np.array(255 * normalized, dtype=int)
            print(np.min(normalized), np.max(normalized))
            print(np.min(digitized), np.max(digitized))
            my_col = cm.seismic(digitized)

        elif displ == 'sput':
            my_col = cm.afmhot((results - res_min) / (res_max - res_min))

        max_range = np.array(
            [X_.max() - X_.min(),
             Y_.max() - Y_.min(),
             Z_.max() - Y_.min()]).max() / 2.0

        mid_x = (X_.max() + X_.min()) * 0.5
        mid_y = (Y_.max() + Y_.min()) * 0.5
        mid_z = (Z_.max() + Z_.min()) * 0.5

        ax.set_xlim(mid_x - max_range, mid_x + max_range)
        ax.set_ylim(mid_y - max_range, mid_y + max_range)
        ax.set_zlim(mid_z - max_range, mid_z + max_range)

        surf = ax.plot_surface(X_, Y_, Z_, facecolors=my_col)
        #, lrstride=1, cstride=1, inewidth=0)#,antialiased=False
        plt.show()
        f, (a3, a4) = plt.subplots(1, 2)
        #a1.imshow(l_slopes_x)
        #a2.imshow(np.degrees(angles_x))
        a3.imshow(np.degrees(thetas))
        a4.imshow(results)
        plt.show()
Beispiel #11
0
def colour (number):
    global superset;
    return cm.afmhot( number** 2 *1.0/ 3 / len(superset))
Beispiel #12
0
def hough(img, scale=1.0, th=(16 / 256)):
    '''Generates a Hough transform visualization.

    The visualization is obtained by first running an edge detector followed
    by the Standard Hough Transform.  Since the goal is to make a nice looking
    image, not accurately find lines, any image with strong linear segments
    will be "good enough" for this purpose.

    One important thing to keep in mind is that the size of the Hough parameter
    space isn't the same as the original image.  To make this look reasonable,
    the transform is stretched so that it the same dimensions as the input
    image.  While this invalidates the transform, the purpose, again, is for
    visualization.

    Parameters
    ----------
    img : numpy.ndarray
        input RGB image
    scale : float
        smoothing amount for the Canny edge detector's Gaussian pre-filter
    th : float
        scaling factor used to boost the exposure of the visualization

    Returns
    -------
    hough : numpy.ndarray
        visualization of the Hough parameter space
    edges : numpy.ndarray
        edges used to generate for the SHT
    '''
    click.secho('WARNING: ', fg='yellow', nl=False)
    click.echo(
        'The Hough implementation is not optimized; this will be very slow.'
    )  # noqa: E501

    grey = skimage.color.rgb2gray(img)
    grey = skimage.filters.gaussian(grey, scale)

    # Compute the gradients.
    gx = skimage.filters.sobel_h(grey)
    gy = skimage.filters.sobel_v(grey)

    mag = np.sqrt(gx**2 + gy**2)
    ang = np.arctan2(gy, gx)

    # For each pixel, figure out where it goes into a Hough accumulator,
    # weighting it by the magnitude.   This is a slightly modified version of
    # Algorithm 4.2 in "Computer Vision: Algorithm and Applications" by Richard
    # Szeliski.
    click.echo(' -- Pre-computing Hough indices.')
    y, x = np.meshgrid(np.arange(mag.shape[1]), np.arange(mag.shape[0]))

    # Compute the line scalar parameters, given the normals.
    d = np.cos(ang) * x + np.sin(ang) * y
    d_max = np.sqrt(d.shape[0]**2 + d.shape[1]**2)

    # Scale everything to be between 0 and 1.
    d = (d + d_max) / (2 * d_max)
    ang = (ang + np.pi) / (2 * np.pi)
    mag = mag / np.max(mag)

    i = d * (mag.shape[0] - 1)
    j = ang * (mag.shape[1] - 1)

    # Precompute the indices so there's less work going on in the for-loop.
    ii = np.zeros((i.shape[0], i.shape[1], 2))
    jj = np.zeros((j.shape[0], j.shape[1], 2))

    ii[:, :, 0] = np.floor(i)
    ii[:, :, 1] = np.ceil(i)

    jj[:, :, 0] = np.floor(j)
    jj[:, :, 1] = np.ceil(j)

    ii = ii.astype(np.int)
    jj = jj.astype(np.int)

    # Now, do the actual Hough transform.
    click.echo(' -- Running Hough transform.')
    hough = np.zeros_like(d)
    for y in range(hough.shape[0]):
        for x in range(hough.shape[1]):
            # The accumulator pixel doesn't fall exactly into one bin, so some
            # smoothing is done by adding to the four possible bins.
            i1 = ii[y, x, 0]
            i2 = ii[y, x, 1]

            j1 = jj[y, x, 0]
            j2 = jj[y, x, 1]

            hough[i1, j1] += mag[y, x]
            hough[i1, j2] += mag[y, x]
            hough[i2, j1] += mag[y, x]
            hough[i2, j2] += mag[y, x]

    # Adjust the exposure so that the structure of the parameter space becomes
    # obvious.
    hough = hough / hough.max()
    hough = hough / th
    hough[hough > 1] = 1

    out = cm.afmhot(hough)
    out = skimage.color.rgba2rgb(out)

    return out, _edge_map(gx, gy)
Beispiel #13
0
def useClassifier(FILE_PATH,levels,CLASSIFIER_TYPE,trainSample,superPixMethod,brushMasks,features,triGrown):
        path = FILE_PATH
        #levels = 5
        #trainingPath = os.path.join(path,)
        training = np.load(os.path.join(path,'trainingData_'+str(levels)+'_'+\
        'brush'+str(brushMasks)+'_'+str(superPixMethod)+'_'+str(features)+'_'+'grown'+str(triGrown)+'.npz'))
        shuffled = training['shuffled']
        trainRatio = training['R']
        #trainingImages = training['header'](images)
        newpath = os.path.join(path,'predictedMasks')
        k=0
        header = training['header'][()]['images']
        searchingFolders = True
        #masksInfo = (levels,CLASSIFIER_TYPE,trainSample,superPixMethod)
        #pickle.dump( masksInfo, open(os.path.join(newpath,"maskInfo.pickle"), "wb" ) )
        while searchingFolders == True:
            
            #print((json.load(open(os.path.join(newpath,"maskInfo.json"), 'rb'))==
            #(levels,CLASSIFIER_TYPE,trainSample,superPixMethod)))
            newpath=os.path.join(path,'predictedMasks')+str(k)
            if os.path.exists(newpath):
                
                    
                    File = open(os.path.join(newpath,"maskInfo.json"), 'r')
                    loadInfo = json.load(File)
                    
                
            #a=pickle.load(open(os.path.join(newpath,"maskInfo.pickle")))
            #print( (loadInfo))
            #print( [levels,CLASSIFIER_TYPE,trainSample,superPixMethod])
            #print((loadInfo==[levels,CLASSIFIER_TYPE,trainSample,superPixMethod]))
            
            #print((loadInfo))
            print((levels,CLASSIFIER_TYPE,trainSample,superPixMethod))
            #print loadInfo
            if not os.path.exists(newpath):
                
                searchingFolders = False
                os.makedirs(newpath)
                masksInfo = {'levels':levels,'CLASSIFIER_TYPE':CLASSIFIER_TYPE,
                'trainSample':trainSample,'superPixMethod':superPixMethod
                ,'brushMasks':brushMasks,'features':features,'triGrown':triGrown}
                json.dump( masksInfo, open(os.path.join(newpath,"maskInfo.json"), "w" ) )
                print('first')
                json.dump(header,open(os.path.join(newpath,"trainInfo.json"), "w" ))
            elif (loadInfo=={'levels':levels,'CLASSIFIER_TYPE':CLASSIFIER_TYPE,
                'trainSample':trainSample,'superPixMethod':superPixMethod
                ,'brushMasks':brushMasks,'features':features,'triGrown':triGrown}):
                searchingFolders = False
                print('here')
            #l=lp
            '''
            elif (loadInfo!=(levels,CLASSIFIER_TYPE,trainSample,superPixMethod)):
                #newpath=os.path.join(path,'predictedMasks')+str(k)
                if not os.path.exists(newpath):
                    searchingFolders = False
                    os.makedirs(newpath)
                    masksInfo = (levels,CLASSIFIER_TYPE,trainSample,superPixMethod)
                    json.dump( masksInfo, open(os.path.join(newpath,"maskInfo.json"), "w" ) )
                elif (loadInfo==(levels,CLASSIFIER_TYPE,trainSample,superPixMethod)):
                    searchingFolders = False
            elif (loadInfo==(levels,CLASSIFIER_TYPE,trainSample,superPixMethod)):
                searchingFolders = False
            '''
            k +=1
        print('Sampling rate = '+str(training['S'])+', trainingRatio = '+str(trainRatio))
        #print(training.item())
        header = training['header'][()]['images']
        #print(type(a))
        #print((a[()])['images'])
        #print(header)
        print('Images used as training: '+ str(header))
       
        if CLASSIFIER_TYPE == 'LinearSVC':
            try:
                pickleFile = open(os.path.join(path,'LinearSVC'+'_'+str(levels)+'_'+\
        'brush'+str(brushMasks)+'_'+str(superPixMethod)+'_'+str(features)+'_'+'grown'+str(triGrown)+'.pickle'), 'rb')
            except IOError:
                print('Classifier not trained '+'\n'+'##'+'\n'+'##'+'\n'+'##'+'\n'+'##')
                print('##>>>>>>>>'+'\n'+'##>>>>>>>>'+'\n'+'##>>>>>>>>'+'\n'+'##>>>>>>>>')
            classifier = pickle.load(pickleFile)
            
        elif CLASSIFIER_TYPE == 'Tree':
            try:
                pickleFile = open(os.path.join(path,'Tree'+'_'+str(levels)+'_'+\
        'brush'+str(brushMasks)+'_'+str(superPixMethod)+'_'+str(features)+'_'+'grown'+str(triGrown)+'.pickle'), 'rb')
            except IOError:
                print('Classifier not trained '+'\n'+'##'+'\n'+'##'+'\n'+'##'+'\n'+'##')
                print('##>>>>>>>>'+'\n'+'##>>>>>>>>'+'\n'+'##>>>>>>>>'+'\n'+'##>>>>>>>>')
            classifier = pickle.load(pickleFile)
            #print(os.path.join(path,'Tree'+'_'+str(levels)+'_'+str(trainSample)+'.pickle'))
            
        else:
            print('Classifier requested has not been recognised')
	
        totalError = 0
        totTestingError = 0
        totTrainingError = 0
        imageSetSize = shuffled.shape[0]
        numberPredicted = 0
        imageIndex = 0
        missingTest = 0
        missingTrain = 0
	
        for filepath in shuffled: #glob.glob(os.path.join(DIR_PATH, '*.jpg')):
            '''
            if imageIndex == int(shuffled.shape[0]/trainRatio+1): 
                averageErrorTraining = totalError/numberPredicted
                print('Average error for training set of '+str(int(shuffled.shape[0]/trainRatio+1))+' images is '+ str(averageErrorTraining))
                totalError = 0
                realTrainSetSize = numberPredicted
            '''
            
            
            #print(imageIndex)
            #print('out of')
            #print(shuffled.shape[0])
            
            
            fileNameStringWithExtension = os.path.basename(filepath)
            fileNameString = os.path.splitext(fileNameStringWithExtension)[0]
            maskPath = os.path.join(path, 'masks/'+fileNameString+'_mask')
            brushMaskPath = os.path.join(path, 'brushMasks/'+fileNameString+'_mask'+'.jpg')
            trainMaskPath = os.path.join(path, 'trainMasks/'+fileNameString+'_mask'+'.jpg')
            procTrain = False
            if not os.path.exists(os.path.join(newpath,fileNameString+'_mask.jpg')):
                print('Image '+str(imageIndex+1)+' out of '+str(shuffled.shape[0]))
                sobelise.process_image(filepath,levels,features)
                totalSob = testing_sobel.concatSob(filepath,levels,features)
                maskMissing = False
                try:
                    maskRaw = Image.open(maskPath+'.jpg')
                    maskMissing = False
                    print 'harpy'
                    if os.path.exists(brushMaskPath) and brushMasks==True:
                        procTrain = True
                    if os.path.exists(trainMaskPath) and brushMasks==False:
                        procTrain = True
                    imageIndex += 1
                except IOError:
                    print('Image '+fileNameString+' has no corresponding mask, therefore error cannot be calculated')
                    if os.path.exists(brushMaskPath) and brushMasks==True:#imageIndex % trainRatio == 0:
                        missingTrain +=1
                        procTrain = True
                        print('exists 0')
                    elif os.path.exists(trainMaskPath) and brushMasks==False:#imageIndex % trainRatio == 0:
                        missingTrain +=1
                        procTrain = True
                        print('exists 0')
                    else:
                        missingTest +=1
                    imageIndex += 1
                    maskMissing = True
                    #continue
                
                im = Image.open(filepath)
                im = np.asarray(im)
                
                '''
                #im = ndimage.gaussian_filter(im, 3)
                
                sx0 = ndimage.sobel(im[...,...,0], axis=0, mode='constant')
                sy0 = ndimage.sobel(im[...,...,0], axis=1, mode='constant')
                #sob0 = np.hypot(sx0, sy0)
                sx1 = ndimage.sobel(im[...,...,1], axis=0, mode='constant')
                sy1 = ndimage.sobel(im[...,...,1], axis=1, mode='constant')
                #sob1 = np.hypot(sx1, sy1)
                sx2 = ndimage.sobel(im[...,...,2], axis=0, mode='constant')
                sy2 = ndimage.sobel(im[...,...,2], axis=1, mode='constant')
                #sob2 = np.hypot(sx2, sy2)

                sobx = np.dstack([sx0,sx1,sx2])
                soby = np.dstack([sy0,sy1,sy2])

                sobx_blurred0 = ndimage.gaussian_filter(sobx, 8)
                soby_blurred0 = ndimage.gaussian_filter(soby, 8)
                #sob_blurred1 = ndimage.gaussian_filter(sob1_3D, 8)
                #sob_blurred2 = ndimage.gaussian_filter(sob2_3D, 8)
                #sob_blurred = sob_blurred0+sob_blurred1+sob_blurred2
                #sob_blurred2 = ndimage.gaussian_filter(sob_blurred, 8)
                #sob_blurred3 = ndimage.gaussian_filter(sob_blurred2, 8)
                imWithSobBlurred0 = np.dstack([im,sobx,soby,sobx_blurred0,soby_blurred0])
                '''
                #im = rescale(im,0.25)
                im = rescale(im,0.25)#125)
                if features=='RGB'or features=='entropy'or features=='dwt':
                    imArray = im*255 # normalising
                elif features=='sobel' or features=='sobelHandv' or features=='combinedEntSob'or features=='combinedDwtSob':
                    imArray = np.asarray(totalSob)
                    imArray = np.dstack([imArray,im*255])
                elif features =='sobelSansRGB':
                    imArray = np.asarray(totalSob)
                if features =='entropy'or features =='dwt' or features =='combinedDwtSob' or features =='combinedEntSob':
                    dwtFeature = dwtSlide(filepath,4,features)
                '''
                abc = dwtFeature[:,0].reshape(im.shape[0],im.shape[1])
                abc = abc/np.max(abc)
                b = dwtFeature[:,1].reshape(im.shape[0],im.shape[1])
                b = b/np.max(b)
                abc2 = dwtFeature[:,2].reshape(im.shape[0],im.shape[1])
                abc2 = abc2/np.max(abc2)
                b2 = dwtFeature[:,3].reshape(im.shape[0],im.shape[1])
                b2 = b2/np.max(b2)
                abc3 = dwtFeature[:,4].reshape(im.shape[0],im.shape[1])
                abc3 = abc3/np.max(abc3)
                #b3 = dwtFeature[:,7].reshape(im.shape[0],im.shape[1])
                #b3 = b3/np.max(b)
                abc = np.hstack([abc,b,abc2,b2,abc3])
                '''
                #abc = dwtFeature[:,0].reshape(im.shape[0],im.shape[1])
                #abc = abc/np.max(abc)
                #b = dwtFeature[:,1].reshape(im.shape[0],im.shape[1])
                #b = b/np.max(b)
                #abc = np.hstack([abc,b])
                flatIm = im.reshape(im.shape[0]*im.shape[1],-1)
                #flatIm = np.zeros((flatIm.shape[0],flatIm.shape[1])) #for dwt testing
                ##flatImArray = np.hstack([flatIm,dwtFeature])
                flatImArray = imArray.reshape(imArray.shape[0]*imArray.shape[1],imArray.shape[2])
                if features=='combinedEntSob'or features=='combinedDwtSob' or features=='entropy' or features=='dwt':
                    flatImArray = np.hstack([flatImArray,dwtFeature])
                featureMap = flatImArray
                #print('here b4')
               
                a=water_test.watershedFunc2(filepath,superPixMethod)
                #print('between')
                b,totClassified,totMask2,segmentOutlines,totMask=water_test.superPix(im,a,featureMap,classifier,100)
                if superPixMethod == 'None':
                    b=totClassified
                #print('here after')
                print(np.unique((segmentOutlines*255).astype(np.uint8)))
                
                #new end
                #imArray = im
            
                
                #maskArray = np.asarray(maskRaw)
                #maskArray = resize(maskArray,[totalSob.shape[0],totalSob.shape[1]])
                #maskArray *= 255
                #flatMaskArray = maskArray.reshape(maskArray.shape[0]*maskArray.shape[1],1)
                ###flatImArray = imArray.reshape(imArray.shape[0]*imArray.shape[1],imArray.shape[2])
                #predictedMask = classifier.predict(flatImArray)#for superpix
                numberPredicted += 1
                pixelCount = flatImArray.shape[0]
                outputSampleCount = int(1*pixelCount)
                #indices = np.random.choice(pixelCount,replace=False,size=outputSampleCount)
                X = flatImArray#flatImArray[indices,...]
                #y = flatMaskArray#flatMaskArray[indices,...]
                #yPrime = predictedMask.astype(np.int)#for superpix
                
                yPrime = b #new line for superpix
                yPrime = np.asarray(yPrime)
                '''
                totMask2 = np.asarray(totMask2)
                totMask2 = np.reshape(totMask2,(totalSob.shape[0],totalSob.shape[1]))
                totMask2 = rescale(totMask2,4,preserve_range=True)
                totMask2 *= 255
                totMask2 = totMask2.astype(np.uint8)
                '''
                print yPrime.shape
                yPrime = np.reshape(yPrime, (-1, 1)) # -1 means make it whatever it needs to be
                #print(yPrime.shape)
                #print(np.max(yPrime))
                print yPrime.shape
                print im.shape
                yPrimeForMaskSave = np.reshape(yPrime,(im.shape[0],im.shape[1]))
                yPrimeForMaskSaveCopy = np.reshape(yPrime,(im.shape[0],im.shape[1]))
                #print(np.max(yPrimeForMaskSave))
                yPrimeForMaskSave = rescale(yPrimeForMaskSave,8,preserve_range=True,order=0)#order was 1
                #print(np.max(yPrimeForMaskSave))
                #print(np.max(yPrimeForMaskSave))
                yPrimeForMaskSave *= 255
                ##yPrimeForMaskSaveCopy = yPrimeForMaskSaveCopy.astype(np.uint8)
                ##yPrimeForMaskSaveCopy *= 255
                yPrimeForMaskSave = yPrimeForMaskSave.astype(np.uint8)
                ##yPrimeForMaskSaveCopy = yPrimeForMaskSaveCopy.astype(np.uint8)
                #print(os.path.join(newpath,fileNameString+'_mask'))
                if not os.path.exists(os.path.join(path,'preMasks'+str(k-1))):
                    os.makedirs(os.path.join(path,'preMasks'+str(k-1))) 
                basicPath = os.path.join(path,'preMasks'+str(k-1))
                print(np.max(totMask2))
                print(np.min(totMask2))
                #Image.fromarray((abc*255).astype(np.uint8)).save(os.path.join(basicPath,fileNameString+'abc_mask.jpg'))
                Image.fromarray(np.uint8(cm.afmhot(totMask2)*255)).save(os.path.join(basicPath,fileNameString+'_ratio_mask.jpg'))
                Image.fromarray(yPrimeForMaskSave).save(os.path.join(newpath,fileNameString+'_mask.jpg'))
                Image.fromarray((totClassified*255).astype(np.uint8)).save(os.path.join(basicPath,fileNameString+'_basic_mask.jpg'))
                segImage = Image.fromarray((((segmentOutlines*255).astype(np.uint8))))
                segImage=segImage.convert('RGB')
                yPrimeForMaskSaveImage = Image.fromarray((((yPrimeForMaskSaveCopy*255).astype(np.uint8))))
                yPrimeForMaskSaveImage=yPrimeForMaskSaveImage.convert('RGB')
                
                yPrimeForMaskSaveImage2 = np.array(yPrimeForMaskSaveImage)
                yPrimeForMaskSaveImage2[:,:,1:3]=0
                print np.max(yPrimeForMaskSaveImage2)
                print np.max(yPrimeForMaskSaveCopy)
                #l=lp
                yPrimeForMaskSaveImage2 = Image.fromarray((((yPrimeForMaskSaveImage2).astype(np.uint8))))
                
                print(np.max(im))
                origImage = Image.fromarray((im*255).astype(np.uint8))
                
                #np.array(segImage)[...,1:3]=0
                #segImage = Image.fromarray((segmentOutlines).astype(np.uint8))
                
                print(np.array(segImage).shape)
                print(np.array(origImage).shape)
                blend = Image.blend(segImage,origImage,0.7)
                blend.save(os.path.join(basicPath,fileNameString+'_segment_mask.jpg'))
                #print(yPrimeForMaskSaveCopy.shape)
                print(im.shape)
                print(segmentOutlines.shape)
                print(yPrimeForMaskSaveCopy.shape)
                blend2 = Image.blend((yPrimeForMaskSaveImage2),origImage,0.7)
                blend2.save(os.path.join(basicPath,fileNameString+'_final_mask.jpg'))
                #Image.fromarray((((segmentOutlines*255).astype(np.uint8)))).save(os.path.join(basicPath,fileNameString+'_segment_mask.jpg'))
                #yPrime = (yPrime>64).astype(np.int)
                if maskMissing == False: 
                    maskArray = np.asarray(maskRaw)
                    maskArray = resize(maskArray,[im.shape[0],im.shape[1]])
                    maskArray *= 255
                    flatMaskArray = maskArray.reshape(maskArray.shape[0]*maskArray.shape[1],1)
                    y = flatMaskArray
                    
                    y = (y>64).astype(np.int)
                    absError = np.float((np.absolute(y-yPrime)).sum())/(y.shape[0]*y.shape[1])
                    print('Error from image '+fileNameString+ ' is '+str(absError))
                
                    if procTrain==True:#os.path.exists(brushMaskPath):
                        #print('Training Image')
                        print('exists 1')
                        totTrainingError = totTrainingError+absError
                    else:
                        totTestingError = totTestingError+absError
                    #totalError = totalError+absError
            else:
                print('Image '+str(imageIndex+1)+' out of '+str(shuffled.shape[0])+' already processed')
                numberPredicted+=1
                try:
                    maskRaw = Image.open(maskPath+'.jpg')
                    maskMissing = False
                    imageIndex += 1
                    if os.path.exists(brushMaskPath) and brushMasks==True:
                        procTrain = True
                    if os.path.exists(trainMaskPath) and brushMasks==False:
                        procTrain = True
                except IOError:
                    print('Image '+fileNameString+' has no corresponding mask, therefore error cannot be calculated')
                    if os.path.exists(brushMaskPath) and brushMasks==True:
                        procTrain = True
                    if os.path.exists(trainMaskPath) and brushMasks==False:
                        procTrain = True
                    if procTrain==True:#os.path.exists(brushMaskPath):#imageIndex % trainRatio == 0:
                        missingTrain +=1
                        print('exists 2')
                    else:
                        missingTest +=1
                    imageIndex += 1
                    maskMissing = True
                    continue # was commented
                imgLoad = np.asarray(Image.open(os.path.join(newpath,fileNameString+'_mask.jpg')))
                yPrime = resize(imgLoad,[imgLoad.shape[0]/4,imgLoad.shape[1]/4])
                #yPrime = resize(imgLoad,[imgLoad.shape[0]*imgLoad.shape[1]])
                if (np.max(yPrime) <= 1):
                    yPrime *= 255
                flatYPrime = yPrime.reshape(yPrime.shape[0]*yPrime.shape[1])
                flatYPrime = (flatYPrime>64).astype(np.int)
                flatYPrime = np.reshape(flatYPrime, (-1, 1))
                if maskMissing == False: 
                    maskArray = np.asarray(maskRaw)
                    maskArray = resize(maskArray,[imgLoad.shape[0]/4,imgLoad.shape[1]/4])
                    maskArray *= 255
                    flatMaskArray = maskArray.reshape(maskArray.shape[0]*maskArray.shape[1],1)
                    y = flatMaskArray
                    #print(y.shape)
                    #print(flatYPrime.shape)
                    y = (y>64).astype(np.int)
                    absError = np.float((np.absolute(y-flatYPrime)).sum())/(y.shape[0]*y.shape[1])
                    print('Error from image '+fileNameString+ ' is '+str(absError))
                
                    if procTrain==True:#os.path.exists(brushMaskPath):
                        print('Training Image')
                        
                        totTrainingError = totTrainingError+absError
                        
                    else:
                        
                        totTestingError = totTestingError+absError
                    #totalError = totalError+absError
                 
            #imageIndex += 1
        '''    
        if imageIndex == int(shuffled.shape[0]/trainRatio): 
            averageErrorTraining = totalError/numberPredicted
            print('Average error for training set of '+str(int(shuffled.shape[0]/trainRatio))+' images is '+ str(averageErrorTraining))
            totalError = 0
            realTrainSetSize = numberPredicted - 1
            averageErrorTest = totalError/(numberPredicted-realTrainSetSize)
            print('Average error for testing set of '+str(imageSetSize-shuffled.shape[0]/trainRatio)+' images is '+ str(averageErrorTest))
        '''
        if len(header)-missingTrain>0:
            print('Number Predicted = ' + str(numberPredicted) +' out of '+str(shuffled.shape[0]))
            averageErrorTraining = totTrainingError/(len(header)-missingTrain)
            print'tot training error'
            print totTrainingError
            print('Average error for training set (predicted only) of '+str(int((shuffled.shape[0]/trainRatio+1)-missingTrain))+' images is '+ str(averageErrorTraining))
            averageErrorTest = totTestingError/(shuffled.shape[0]-len(header)-missingTest)
            print('Average error for testing set (predicted only) of '+str((shuffled.shape[0]-len(header)-missingTest))+' images is '+ str(averageErrorTest))
            performance = {'size':(shuffled.shape[0]-len(header)-missingTest),'error':str(averageErrorTest)}
            json.dump( performance, open(os.path.join(path,"performance_"+str(levels)+'_'+\
            'brush'+str(brushMasks)+'_'+str(superPixMethod)+'_'+str(features)+'_'+'grown'+str(triGrown)+".json"), "w" ) )
        else:
            print('Could not calculate error as there were no true masks')
Beispiel #14
0
if plot_f:
    Icolors = Icolors / Imax
    import matplotlib.pyplot as plt
    from matplotlib import cm, colors
    from mpl_toolkits.mplot3d import Axes3D
    x = sin(theta_p) * cos(phi_p)
    y = sin(theta_p) * sin(phi_p)
    z = cos(theta_p)
    fig = plt.figure(figsize=plt.figaspect(1.))
    ax = fig.add_subplot(1, 1, 1, projection='3d')
    ax.plot_surface(x,
                    y,
                    z,
                    rstride=1,
                    cstride=1,
                    facecolors=cm.afmhot(Icolors))
    plt.show()

# Randomly sample positions on sphere
theta = zeros(Nsamples)
phi = zeros(Nsamples)
I = zeros(Nsamples)
for n in range(Nsamples):
    theta[n] = arccos(1. - 2. * random())
    phi[n] = 2. * pi * random()
    I[n] = get_I(theta[n], phi[n], amp)


# Evaluate eddington tensor exactly
def get_n(i, theta, phi):
    if i == 0: