Example #1
0
    def gradient(self, x, y):
        h = 1e-9
 
        gradw = [np.zeros(w.shape) for w in self.weights]
        gradb = [np.zeros(w.shape) for w in self.weights]

        for gw, w in zip(gradw, self.weights):
            for i, v in np.ndenumerate(w.flat):
                w.flat[i] = v + h
                fxh1 = self.cost.func(self.feedforward(x), y)
                w.flat[i] = v - h
                fxh2 = self.cost.func(self.feedforward(x), y)
                gw.flat[i] = (fxh1 - fxh2)/(2*h)
                w.flat[i] = v

        for gb, b in zip(gradb, self.biases):
            for i, v in np.ndenumerate(b.flat):
                b.flat[i] = v + h
                fxh1 = self.cost.func(self.feedforward(x), y)
                b.flat[i] = v - h
                fxh2 = self.cost.func(self.feedforward(x), y)
                gb.flat[i] = (fxh1 - fxh2)/(2*h)
                b.flat[i] = v

        return gradw, gradb
Example #2
0
def assert_equal_from_matlab(a, b, options=None):
    # Compares a and b for equality. They are all going to be numpy
    # types. hdf5storage and scipy behave differently when importing
    # arrays as to whether they are 2D or not, so we will make them all
    # at least 2D regardless. For strings, the two packages produce
    # transposed results of each other, so one just needs to be
    # transposed. For object arrays, each element must be iterated over
    # to be compared. For structured ndarrays, their fields need to be
    # compared and then they can be compared element and field
    # wise. Otherwise, they can be directly compared. Note, the type is
    # often converted by scipy (or on route to the file before scipy
    # gets it), so comparisons are done by value, which is not perfect.
    a = np.atleast_2d(a)
    b = np.atleast_2d(b)
    if a.dtype.char == 'U':
        a = a.T
    if b.dtype.name == 'object':
        a = a.flatten()
        b = b.flatten()
        for index, x in np.ndenumerate(a):
            assert_equal_from_matlab(a[index], b[index], options)
    elif b.dtype.names is not None or a.dtype.names is not None:
        assert a.dtype.names is not None
        assert b.dtype.names is not None
        assert set(a.dtype.names) == set(b.dtype.names)
        a = a.flatten()
        b = b.flatten()
        for k in b.dtype.names:
            for index, x in np.ndenumerate(a):
                assert_equal_from_matlab(a[k][index], b[k][index],
                                         options)
    else:
        with warnings.catch_warnings():
            warnings.simplefilter('ignore', RuntimeWarning)
            npt.assert_equal(a, b)
Example #3
0
def limpa_imagem(img_cinza):
    #binariza a imagem em escala de cinza
    img_bin_cinza = np.where(img_cinza < np.mean(img_cinza), 0, 255)
    
    # aplica lbp sobre a imagem em escala de cinza
    # lbp foi aplicado para evitar perda de informacao em regioes
    # proximas as regioes escuras (provaveis celulas)
    lbp_img = local_binary_pattern(img_cinza, 24, 3, method='uniform')
    
    # aplica efeito de blurring sobre a imagem resultante do lbp 
    blur_img = gaussian(lbp_img,sigma=6)    
    img_bin_blur = np.where(blur_img < np.mean(blur_img), 0, 255)
     
    # junta as duas regiões definidas pela binarizacao da imagem em escala
    # de cinza e a binarizacao do blurring    
    mascara = np.copy(img_bin_cinza)    
    for (a,b), valor in np.ndenumerate(img_bin_blur):
        if valor == 0:        
            mascara[a][b] = 0 
            
    # aplica a mascara obtida sobre a imagem original (em escala de cinza)
    # para delimitar melhor as regiões que não fornecerao informacoes (regioes
    # totalmente brancas)
    img_limpa = np.copy(img_cinza)
    for (a,b), valor in np.ndenumerate(mascara):
        if valor == 255:
            img_limpa[a][b] = 255

    return (img_limpa)
Example #4
0
    def OnPaint(self, event):
        dc = wx.PaintDC(self.frame.drawPanel)

        # calculate some basic variables for drawing
        boardPixelWidth, boardPixelHeight = dc.GetSize()
        pieceWidth = boardPixelWidth // self.tetris.BOARD_WIDTH
        pieceHeight = boardPixelHeight // self.tetris.BOARD_HEIGHT
        offsetX = (boardPixelWidth - pieceWidth * self.tetris.BOARD_WIDTH) // 2
        offsetY = boardPixelHeight - pieceHeight * self.tetris.BOARD_HEIGHT

        # draw board
        for (y, x), value in numpy.ndenumerate(self.tetris.board):
            if value != 0:
                self.DrawRect(dc, x, y, pieceWidth, pieceHeight, pieces.Pieces.colors[value], offsetX, offsetY)

        # draw current tile
        if self.tetris.cPiece is not None:
            for (y, x), value in numpy.ndenumerate(self.tetris.cPiece.shape):
                if value != 0:
                    self.DrawRect(
                        dc,
                        x + self.tetris.cPiece.x,
                        y + self.tetris.cPiece.y,
                        pieceWidth,
                        pieceHeight,
                        self.tetris.cPiece.color,
                        offsetX,
                        offsetY,
                    )
Example #5
0
    def axis_radio_event(self):
        matrix = numpy.array([[button.isChecked() for button in row] for row in self.button_matrix])

        if (matrix.sum() < 3) :
            empty_col = matrix.sum(0).tolist().index(0)
            empty_row = matrix.sum(1).tolist().index(0)
            matrix[empty_row, empty_col] = True

            for (row, col), value in numpy.ndenumerate(matrix):
                self.button_matrix[row][col].setChecked(value)

        elif (matrix.sum() > 3) :

            double_col = matrix.sum(0).tolist().index(2)
            double_row = matrix.sum(1).tolist().index(2)
            matrix[double_row, :] = False
            matrix[:, double_col] = False
            matrix[double_row, double_col] = True

            empty_col = matrix.sum(0).tolist().index(0)
            empty_row = matrix.sum(1).tolist().index(0)
            matrix[empty_row, empty_col] = True

            for (row, col), value in numpy.ndenumerate(matrix):
                self.button_matrix[row][col].setChecked(value)
def plotHammingGraph(X):
    print('X:',X)
    indices = [i for i,x in np.ndenumerate(X)] # TODO arrange in circle
    coords = normalizeCoords([i for i,x in np.ndenumerate(X)])
    print('coords:',coords)
    arity = np.max(indices)+1

    from mpl_toolkits.mplot3d import Axes3D

    fig = plt.figure()
    ax = fig.add_subplot(111,projection='3d')
    s = '\n'.join(str(x) for x in np.ndenumerate(X))
    from inspect import getsource
    print('getsource:',getsource(plt.text))
    ax.set_title(Automaton(X))
    fig.text(0.01,0.99,s,horizontalalignment='left',verticalalignment='top',transform=ax.transAxes,fontsize=20)
    # add edges
    from matplotlib import patheffects
    for i,idx in enumerate(indices):
        cur_coord = coords[i]
        ax.text(cur_coord[0], cur_coord[1], cur_coord[2]-0.04,  '%s' % (str(idx)+'\n'+str(X[idx])), horizontalalignment='center', size=20, zorder=2, color='k', fontdict={'fontweight':'bold'}, path_effects=[patheffects.PathPatchEffect(edgecolor='white', facecolor='black', linewidth=1.2)])
        for j in getHammingNeighbors(idx,arity):
            ax.plot(*zip(*[cur_coord, coords[ltoi(j,arity)]]),c='black') # zorder doesn't work, sorry

    #add nodes
    ax.scatter(*zip(*coords),c=X.flatten(),s=500,cmap='gist_ncar')
def optical_flow_HS(im1, im2, s, n):
    N,M = im1.shape
    u = np.zeros(im1.shape)
    v = np.zeros(im1.shape)    
    tu = np.zeros(im1.shape)
    tv = np.zeros(im1.shape)
    fx,fy,ft = conv(im1,im2)
   
    for i in range(n):
        for (y, x), n in np.ndenumerate(im1):
            if x >= 2 and x < (M-1) and y >= 2 and y < (N-1): 
                Ex = fx[y][x]
                Ey = fy[y][x]
                Et = ft[y][x]
                AU = (u[y][x-1] + u[y][x+1] + u[y-1][x] + u[y+1][x])/4
                AV = (v[y][x-1] + v[y][x+1] + v[y-1][x] + v[y+1][x])/4
                
                A = (Ex*AU + Ey*AV +Et)
                B = (1 + s*(Ex*Ex + Ey*Ey))
                tu[y][x] = AU - (Ex*s*A/B)
                tv[y][x] = AV - (Ey*s*A/B)
        
        for (y, x), n in np.ndenumerate(im1):
            if x >= 2 and x < (M-1) and y >= 2 and y < (N-1):     
                u[y][x] = tu[y][x] 
                v[y][x] = tv[y][x]
                
    return  u,v
Example #8
0
def getPathData(data, param):
    path_data = []
    p = param

    tree = Grid_adaptiveM(data, p.Eps, p)
    tree.buildIndex()
    tree.adjustConsistency()

    leaf_boxes = []
    for (_, _), l1_child in np.ndenumerate(tree.root.children):
        if not l1_child.n_isLeaf and l1_child.children is not None:
            for (_, _), l2_child in np.ndenumerate(l1_child.children):  # child1 is a first-level cell
                leaf_boxes.append((l2_child.n_box, l2_child.a_count))
        leaf_boxes.append((l1_child.n_box, l1_child.a_count))

    for data in leaf_boxes:
        # [[x_min,y_min],[x_max,y_max]]
        path = []
        box = data[0]
        # (x_min, y_min) --> (x_min, y_max) --> (x_max, y_max) --> (x_max, y_min) --> (x_min, y_min)
        path.append((mpath.Path.MOVETO, (box[0][0], box[0][1])))
        path.append((mpath.Path.LINETO, (box[0][0], box[1][1])))
        path.append((mpath.Path.LINETO, (box[1][0], box[1][1])))
        path.append((mpath.Path.LINETO, (box[1][0], box[0][1])))
        path.append((mpath.Path.CLOSEPOLY, (box[0][0], box[0][1])))

        path_data.append((path, data[1]))

    return path_data
Example #9
0
def check_gradient(x_ad, x_np, *vars):
    grad1 = x_ad.gradient(vars)

    grad2 = []
    for v in vars:
        curr = np.zeros(shape=x_np.shape)
        for i,xi in np.ndenumerate(x_np):
            curr[i] = xi.d(v)
        grad2.append(curr)

    grad1,grad2 = [np.array(g) for g in grad1,grad2]
    assert np.max(np.abs(np.log(grad1 / grad2))) < 1e-12

    if get_order() == 1:
        return

    hess1 = x_ad.hessian(vars)
    hess2 = []
    for v in vars:
        currRow1,currRow2 = [],[]
        #hess1.append(currRow1)
        hess2.append(currRow2)

        for u in vars:
            # make d2 entry for hess2
            curr = np.zeros(shape=x_np.shape)
            for i,xi in np.ndenumerate(x_np):
                if u is v:
                    curr[i] = xi.d2(v)
                else:
                    curr[i] = xi.d2c(u,v)
            currRow2.append(curr)

    hess1,hess2 = [np.array(h) for h in hess1,hess2]
    assert np.max(np.abs(np.log(hess1 / hess2))) < 1e-12
Example #10
0
File: A.py Project: od0/HW2
def plot_density(count_trips,count,title):
    grid = np.zeros((config.bins,config.bins))
    for (i,j),z in np.ndenumerate(grid):
        try:
            grid[j,i] = float(count[(i,j)]) / float(count_trips[(i,j)])
        except:
            grid[j,i] = 0
        #print "----"
        #print grid[i,j], i, j
        #print count[(i,j)]
        #print count_trips[(i,j)]
    grid = np.flipud(grid) #to counter matshow vertical flip
    fig, ax = plt.subplots(figsize=(10, 10))
    ax.matshow(grid, cmap='spectral')
    ax.xaxis.set_ticks_position('bottom')
    ax.set_xlabel('Longitude')
    ax.set_ylabel('Latitude')
    xticks = np.linspace(config.minlong,config.maxlong,num=round(config.bins/2))
    yticks = np.linspace(config.minlat,config.maxlat,num=round(config.bins/2))
    yticks = yticks[::-1]
    xticks = np.around(xticks,decimals=1)
    yticks = np.around(yticks,decimals=1)
    xspace = np.linspace(0,config.bins-1,config.bins/2)
    yspace = np.linspace(0,config.bins-1,config.bins/2)
    plt.xticks(xspace,xticks)
    plt.yticks(yspace,yticks)
    for (i,j),z in np.ndenumerate(grid):
        ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center')
    plt.title(title)
    plt.show()
Example #11
0
def combine(a, b, w):
    matches = {}

    # split dictionaries into keys and values
    al = [x for x in a.items()]
    ak, av = zip(*al)
    bl = [x for x in b.items()]
    bk, bv = zip(*bl)

    # scale the values in the range 0-1
    a_scaled = preprocessing.minmax_scale(av, feature_range=(0,1))
    b_scaled = preprocessing.minmax_scale(bv, feature_range=(0,1))

    # build numpy structured arrays combining scaled values and original keys
    names = ['keys', 'values']
    formats = ['S225', 'f8']
    dtype = dict(names=names, formats=formats)
    anp = np.array(list(zip(ak,a_scaled)), dtype=dtype)
    bnp = np.array(list(zip(bk,b_scaled)), dtype=dtype)

    # iterate over numpy structures creating a weighted average between values with the same key
    for i, t1 in np.ndenumerate(anp):
        for j, t2 in np.ndenumerate(bnp):
            if anp['keys'][i] == bnp['keys'][j]:
                stack = np.vstack((anp['values'][i], bnp['values'][j]))
                matches[anp['keys'][i].decode("utf-8")] = np.average(stack, axis=0, weights=w)[0]   # python dictionary

    return matches
def callback2(data):
    global face_det
    global filename
    global datei
    #rospy.loginfo(rospy.get_caller_id()+"CDIA data:" + str(len(data.head_detections)))
    if len(data.head_detections)==1 and len(data.head_detections[0].face_detections) == 1:
        face_det+=1
        print face_det
        bridge = CvBridge()
        image = bridge.imgmsg_to_cv2(data.head_detections[0].color_image,"rgb8")
        depth = bridge.imgmsg_to_cv2(data.head_detections[0].depth_image,"32FC3")
        #cv2.imshow("image",image)
        cv2.imshow("depth",depth)
        cv2.waitKey()
        print depth.shape
        set_name = datei[1]
        depth_path="/home/stefan/rgbd_db_heads/"+set_name+"/"+filename+"_d.xml"
        img_path="/home/stefan/rgbd_db_heads/"+set_name+"/"+filename+"_c.bmp"
        #path+"/"+str(dir)+"/"+os.path.splitext(file)[0]+".xml"
        depth_slice1 = depth[:,:,0].astype(numpy.float32)
        depth_slice2 = depth[:,:,1].astype(numpy.float32)
        depth_slice3 = depth[:,:,2].astype(numpy.float32)

        for(r,c),value in numpy.ndenumerate(depth_slice1):
            if depth_slice1[r,c]==0:
                depth_slice1[r,c]=numpy.nan
        for(r,c),value in numpy.ndenumerate(depth_slice2):
            if depth_slice2[r,c]==0:
                depth_slice2[r,c]=numpy.nan
        print filename
Example #13
0
    def traverse_cells(self, visitor):
	""" Call a visitor function on each cell in the Partition. The visitor
	should look like this::

	  def visitor(path):
	      pass

        The path passed in is a list of PathNodes describing the nesting of
        the cell within partitions.  From the path, you can get all the
        containing partitions of the element it points to, the element,
        and both n-dimensional and flat indices of the element within each
        partition.  See PathNode for more details.
        """
        if self.children.size:
            for index, child in np.ndenumerate(self.children):
                child.traverse_cells(visitor)
        else:
            for index, elt in np.ndenumerate(self.box):
                # Build a list of PathNodes containing ancestor partitions
                path = [Partition.PathNode(p) for p in self.ancestors]
                path[-1].index = index
                # assign index of elt within each partition to each PathNode
                i = -2
                while i >= -len(path):
                    child = path[i+1]
                    path[i].index = child.partition.self_to_parent(child.index)
                    i -= 1
                # Now visit the element with its path.
                visitor(path)
Example #14
0
    def test_convenient_facetgrid(self):
        a = easy_array((10, 15, 4))
        d = DataArray(a, dims=["y", "x", "z"])
        g = self.plotfunc(d, x="x", y="y", col="z", col_wrap=2)

        self.assertArrayEqual(g.axes.shape, [2, 2])
        for (y, x), ax in np.ndenumerate(g.axes):
            self.assertTrue(ax.has_data())
            if x == 0:
                self.assertEqual("y", ax.get_ylabel())
            else:
                self.assertEqual("", ax.get_ylabel())
            if y == 1:
                self.assertEqual("x", ax.get_xlabel())
            else:
                self.assertEqual("", ax.get_xlabel())

        # Infering labels
        g = self.plotfunc(d, col="z", col_wrap=2)
        self.assertArrayEqual(g.axes.shape, [2, 2])
        for (y, x), ax in np.ndenumerate(g.axes):
            self.assertTrue(ax.has_data())
            if x == 0:
                self.assertEqual("y", ax.get_ylabel())
            else:
                self.assertEqual("", ax.get_ylabel())
            if y == 1:
                self.assertEqual("x", ax.get_xlabel())
            else:
                self.assertEqual("", ax.get_xlabel())
Example #15
0
def trend_significance(residuals, sigma=0.05):
    nt = len(residuals)
    count = 0
    x = len(residuals[0, :, 0])
    y = len(residuals[0, 0, :])
    rcorrs = np.empty(shape=[x, y])
    for (i,j), value in np.ndenumerate(rcorrs):
        count += 1
        r_corr,_ = sp.stats.pearsonr(residuals[: -1, i, j], residuals[1:, i, j])
        if r_corr < 0:
            r_corr = 0
        rcorrs[i][j] = r_corr
    
    cs = np.empty(shape=[x, y])    
    for (i,j), rcor in np.ndenumerate(rcorrs):
        neff = float(nt * (1-rcor) / (1 + rcor))
        #neff = nt
        a = residuals[:,i,j]
        b = a * a
        d = sum(b)
        se = np.sqrt( d / ( neff - 2 ) )
        sb = se / np.sqrt( sum( ( np.arange(nt) - np.mean( np.arange(nt) ) )**2 ) )

        tcrit = sp.stats.t.isf(sigma/2.0, nt - 2 )

        c = tcrit * sb

        cs[i][j] = c
    return cs
Example #16
0
def arrays_to_xls(array_list,sheet_name_list,xls_packet_fout):
    '''Writes arrays to Excel spreadsheet.'''
    
    ibook = xlsxwriter.Workbook(xls_packet_fout,{'nan_inf_to_errors': True}) 
    
    # Write the arrays to a bundle of Excel sheets to facilitate array inspection
    for iarray,isheet_name in zip(array_list,sheet_name_list):
                                  
        if (iarray.ndim > 2):    # 3D array           
            for ilay in range(np.shape(iarray)[0]):
                jarray = iarray[ilay,:,:]
                isheet_name = isheet_name + '_' + str(ilay+1)
                isheet = ibook.add_worksheet(isheet_name)
                print 'Writing sheet: ',isheet_name
                for (irow,icol),ival in np.ndenumerate(jarray):
                    isheet.write(irow,icol,ival)
                
        else:   # For one layer        
            isheet = ibook.add_worksheet(isheet_name)
            print 'Writing sheet: ',isheet_name               
            for (irow,icol),ival in np.ndenumerate(iarray):
                isheet.write(irow,icol,ival)                                
    
    ibook.close()
    
    return
Example #17
0
def label_components(m):
    linked = [{0}]
    label = 0
    b = np.zeros(m.shape,dtype=int)
    for (i,j),v in np.ndenumerate(m):
        if v:
            N = b[i-1,j]
            W = b[i,j-1]
            if N > 0:
                b[i,j] = N
                if W > 0 and N != W:
                    linked[N].add(W)
                    linked[W].add(N)
            elif W > 0:
                b[i,j] = W
            else:
                label += 1
                b[i,j] = label
                linked += [{label}]
        else:
            b[i,j] = 0

    for (i,j),v in np.ndenumerate(b):
        if v > 0 and len(linked[v])> 1:
            b[i,j] = min(linked[v])

    labels = list({min(linked[v]) for v in range(label+1)})
    for i in range(1,len(labels)):
        b[b==labels[i]] = i

    return b,len(labels)-1
def detectCircles(img, r, useGradient):
    grayimg = rgb2gray(img)
    edges = cv2.Canny(img,100,200)
    ax[0].imshow(edges, cmap=plt.cm.gray)
    ax[0].set_title('after canny image operation')
    if useGradient == 0:
        accumulator1 = np.zeros(edges.shape)
        for (i,j),value in np.ndenumerate(edges):
            if value:
                for t_idx in np.arange(0,2*math.pi,math.pi/100):
                    a = int(i - (r * math.cos(t_idx)));
                    b = int(j + (r * math.sin(t_idx)));
                    if a>0 and b>0 and a < accumulator1.shape[0] and b < accumulator1.shape[1]:
                        accumulator1[a, b] += 1
        
        print accumulator1
        ax[1].imshow(accumulator1, cmap=plt.cm.gray)
        ax[1].set_title('Accumulator array without using gradient')
    else:
        dx = ndimage.sobel(grayimg, axis=0, mode='constant')
        dy = ndimage.sobel(grayimg, axis=1, mode='constant')
        accumulator = np.zeros(edges.shape)
        for (i,j),value in np.ndenumerate(edges):
            if value:
                gradient = math.atan(-dx[i,j]/(dy[i,j]+0.00001))
                for theta in np.arange(gradient-math.pi/4,gradient+math.pi/4,math.pi/100):
                    a = int(i - (r * math.cos(theta)));
                    b = int(j + (r * math.sin(theta)));
                    if a < accumulator.shape[0] and b < accumulator.shape[1]:
                        accumulator[a, b] += 1
        ax[1].imshow(accumulator, cmap=plt.cm.gray)
        ax[1].set_title('Accumulator array with gradient')
        print accumulator
    return 
def make_G_hat(M, alpha=1, beta=1):
    '''G hat is Markov chain of length 2
    Gcp is a matrix to go from  contries to product and then 
    Gpc is a matrix to go from products to ccountries'''
    
    k_c  = M.sum(axis=1) #aka k_c summing over the rows
    k_p = M.sum(axis=0) #aka k_p summering over the columns
    
    G_cp = np.zeros(shape=M.shape)
    #Gcp_beta
    for [c, p], val in np.ndenumerate(M):
        numerateur = (M[c,p]) * (k_c[c] ** ((-1) * beta))
        denominateur = Gcp_denominateur(M, p, k_c, beta)
        G_cp[c,p] = numerateur / float(denominateur)
    
    
    G_pc = np.zeros(shape=M.T.shape)
    #Gpc_alpha
    for [p, c], val in np.ndenumerate(M.T):
        numerateur = (M.T[p,c]) * (k_p[p] ** ((-1) * alpha))
        denominateur = Gpc_denominateur(M, c, k_p, alpha)
        G_pc[p,c] = numerateur / float(denominateur)
    
    
    return {'G_cp': G_cp, "G_pc" : G_pc}
Example #20
0
    def RedrawMap(self):

        dc = wx.MemoryDC()
        dc.SelectObject(self.Buffer)

        for (rownum, colnum), value in np.ndenumerate(self.currentmap.tiles):
            if self.currentmap.terrain[rownum, colnum] > 0:
                self.putImage(
                    dc, "%s.png" % terrain.type[self.currentmap.terrain[rownum, colnum]].picture, rownum, colnum
                )

            if self.overlays[rownum, colnum] & BLUE_RING:
                self.putImage(dc, "selected_border_blue.png", rownum, colnum)

            if self.overlays[rownum, colnum] & RED_RING:
                self.putImage(dc, "selected_border_red.png", rownum, colnum)

        for (rownum, colnum), value in np.ndenumerate(self.currentmap.tiles):
            unit_id, unit_color, unit_health = self.currentmap.board[:, rownum, colnum]
            if unit_id > 0:
                self.putImage(
                    dc, "%s_%s.png" % (colors.type[unit_color].name, units.type[unit_id].picture), rownum, colnum
                )
                self.putImage(dc, "counter_%s.png" % unit_health, rownum, colnum)

            if self.red_counters[rownum, colnum] > -1:
                self.putImage(dc, "counter_%d_red.png" % self.red_counters[rownum, colnum], rownum, colnum)

            if self.overlays[rownum, colnum] & SHADED:
                self.putImage(dc, "selected_overlay.png", rownum, colnum)

        gc = wx.GraphicsContext.Create(dc)
        gc.SetAntialiasMode(True)
        for x in self.arrows:
            coords1, coords2, width, color = x
            self.drawArrow(gc, coords1, coords2, width, color)

        for coords, text in self.circles:

            x, y = coords

            gc.SetBrush(wx.Brush("#E60000", wx.SOLID))
            # gc.SetPen(wx.TRANSPARENT_PEN)
            gc.SetPen(wx.Pen(wx.BLACK, 1, wx.SOLID))
            gc.DrawRoundedRectangle(x, y, 12, 12, 3)
            font = wx.Font(6, wx.DEFAULT, wx.NORMAL, wx.BOLD)
            gc.SetFont(font, wx.WHITE)
            txtWidth, txtWeight, txtDescent, txtExternalLeading = gc.GetFullTextExtent(text)
            gc.DrawText(text, int(x + 6 - (txtWidth / 2)), int(y + 6 - (txtWeight / 2)))
            # gc.DrawText(text, x, y)

        b = dc.GetAsBitmap()
        del dc

        b.SetMaskColour("#FF00FF")
        self.setInnerBitmap(b)

        # self.Refresh(eraseBackground=False)
        self.Update()
    def denseExtract(self, histF, positions, N):
 #       I = img.astype(float)/255.0
#      I = (I-I.mean())/I.std()

        # size and centre of image
        (nx, ny, kk) = histF.shape
        
        features = np.zeros((N,self.mNCount),dtype=np.float32)
        scale = range(0, self.mNBins-1)
        for p in range(N):
            cx = positions[p,0]+1
            cy = positions[p,1]+1
            if cx<self.mNBins*self.mNSize: continue
            if cy<self.mNBins*self.mNSize: continue
            if cx> nx - self.mNBins*self.mNSize: continue
            if cy> ny - self.mNBins*self.mNSize: continue
            
            f_index = 0
            for s in scale:
                allVals = np.zeros((self.mNMaxFreq+1,self.mNMaxFreq+1),dtype=np.complex64)
                
                for freq in range(0,self.mNMaxFreq+1):
                    template = self.ciKernel[s*(self.mNMaxFreq+1)+freq]
                    (tnx, tny) = template.shape
                    tnx2 = int(round(0.5*tnx))
                    for k in range(0,self.mNMaxFreq+1):
                        allVals[freq,k] = np.sum(np.sum(np.multiply(histF[cx-tnx2:cx-tnx2+tnx,cy-tnx2:cy-tnx2+tnx,k],template)))
                        #if p==2193 and freq==0 and s==0:
                        #        print k
                        #        for kk in histF[cx-tnx2:cx-tnx2+tnx,cy-tnx2:cy-tnx2+tnx,k]:
                        #            for jj in kk:
                        #                print jj.real
                
                
                for (x,y), val in np.ndenumerate(allVals):
                    if x==y:
                        features[p,f_index]=val.real
                        f_index+=1
                        features[p,f_index]=val.imag
                        f_index+=1

                    else:
                        for (x1,y1), val1 in np.ndenumerate(allVals):
                            if x1<x: continue
                            if y1<y: continue
                            if (x-y)==(x1-y1):
                                features[p,f_index]=(val*val1.conjugate()).real
                                f_index+=1
                                features[p,f_index]=(val*val1.conjugate()).imag
                                f_index+=1

        
        return features

#        print "diff to original array:"
#        print features[0], fHOG[0]
#        print np.max(np.abs(features-fHOG))

        return fHOG.tolist()
Example #22
0
def inverse_relation(R, size_rhs=None, with_indices=False):
    """Computes the inverse relation of a relation.

    If `r` is a relation, then the inverse relation `ri` is defined by

        x ri y  <=>  y r x

    Parameters
    ----------
    R
        2D |NumPy array| of integers representing a relation r on the
        natural numbers via ::

            x r y <=> (x < R.size[0] and y in R[x]).

        Rows of `R` which are to short are padded with -1.
    size_rhs
        Can be provided for speedup. Has to be greater than `R.max()`.
    with_indices
        If `True`, also return the matrix `RINVI`.

    Returns
    -------
    RINV
        2D |NumPy array| representation of the inverse relation.
    RINVI
        |NumPy array| such that for `RINV[i, j] != -1`::

            R[RINV[i, j], RINVI[i, j]] = i.

        Only returned if `with_indices` is `True`.
    """

    assert R.ndim == 2
    logger.warn('Call to unoptimized function inverse_relation')

    num_columns_RINV = np.bincount(R.ravel()).max()
    size_rhs = size_rhs or (R.max() + 1)
    RINV = np.empty((size_rhs, num_columns_RINV), dtype=R.dtype)
    RINV.fill(-1)
    if with_indices:
        RINVI = np.empty_like(RINV)
        RINVI.fill(-1)

    RINV_COL_COUNTS = np.zeros(size_rhs, dtype=np.int32)

    if not with_indices:
        for index, x in np.ndenumerate(R):
            if x >= 0:
                RINV[x, RINV_COL_COUNTS[x]] = index[0]
                RINV_COL_COUNTS[x] += 1
        return RINV
    else:
        for index, x in np.ndenumerate(R):
            if x >= 0:
                RINV[x, RINV_COL_COUNTS[x]] = index[0]
                RINVI[x, RINV_COL_COUNTS[x]] = index[1]
                RINV_COL_COUNTS[x] += 1
        return RINV, RINVI
Example #23
0
def get_group_array():
    group = np.ones((9, 9), dtype=int)
    mask = np.ones((9, 9, 9), dtype=int)
    for (row, column), val in np.ndenumerate(group):
        group[row, column] = np.floor(column/3) + 3 * np.floor(row/3)
    for (row, column), val in np.ndenumerate(group):
        mask[val, row, column] = 0
    return group, mask
Example #24
0
File: hinton.py Project: felix1m/ki
def normalize_values(a):
    sum = 0
    for xy, val in n.ndenumerate(a):
        sum += abs(val)
    if any(val > 1 for xy, val in n.ndenumerate(a)):
        for xy, val in n.ndenumerate(a):
            a[xy] = float(val/sum)*10  # da es sonst zu klein ist.
    return a
Example #25
0
    def extract(self, img):
        I = img.astype(float)/255.0
#      I = (I-I.mean())/I.std()

        # size and centre of image
        (nx, ny) = I.shape
        cx = int(round(0.5*nx))
        cy = int(round(0.5*ny))

        # compute gradient with a central difference method and store in complex form
        (dy, dx) = np.gradient(I)
        dz = dx + 1j*dy

        # compute magnitude/phase of complex numbers
        phi = np.angle(dz)
        r = np.abs(dz)
        r = r/(r.std()+0.001)
 #       r = r/(r.mean()+0.001)


        # create an empty array for storing the dfft of the orientation vector
        histF = np.zeros([nx, ny, self.mNMaxFreq+1])+0j

        # take the dfft of the orientation vector up to order MaxFreq
        # positive values of k only since negative values give conjugate
        for k in range(0,self.mNMaxFreq+1):
            histF[:,:,k] = np.multiply(np.exp( -1j * (k) * phi) , r+0j)
        

        # compute regional descriptors by convolutions (these descriptors are not rotation invariant)
        fHOG = np.zeros([self.mNCount])
        scale = range(0, self.mNBins-1)
        f_index = 0
        for s in scale:
            allVals = np.zeros((self.mNMaxFreq+1,self.mNMaxFreq+1),dtype=np.complex64)
            for freq in range(0,self.mNMaxFreq+1):
                template = self.ciKernel[s*(self.mNMaxFreq+1)+freq]
                (tnx, tny) = template.shape
                tnx2 = int(round(0.5*tnx))
                for k in range(0,self.mNMaxFreq+1):
                    allVals[freq,k] = np.sum(np.sum(np.multiply(histF[cx-tnx2:cx-tnx2+tnx,cy-tnx2:cy-tnx2+tnx,k],template)))
            for (x,y), val in np.ndenumerate(allVals):
                if x==y:
                    fHOG[f_index]=val.real
                    f_index+=1
                    fHOG[f_index]=val.imag
                    f_index+=1
                else:
                    for (x1,y1), val1 in np.ndenumerate(allVals):
                        if x1<x: continue
                        if y1<y: continue
                        if (x-y)==(x1-y1):
                            fHOG[f_index]=(val*val1.conjugate()).real
                            f_index+=1
                            fHOG[f_index]=(val*val1.conjugate()).imag
                            f_index+=1

        return fHOG.tolist()
Example #26
0
	def shmoo_plotting(self):
		''' pixel register shmoo plot '''
		shmoopdf = PdfPages('shmoo.pdf')
		shmoonp = np.array(self.shmoo_errors)
		data = shmoonp.reshape(len(self.voltages),-1,order='F')
		fig, ax = plt.subplots()
		plt.title('Pixel registers errors')
		ax.set_axis_off()
		tb = Table(ax, bbox=[0,0,1,1])
		ncols = len(self.bitfiles)
		nrows = len(self.voltages)
		width, height = 1.0 / ncols, 1.0 / nrows
			# Add cells
		for (i,j), val in np.ndenumerate(data):
			color = ''
			if val == 0: color = 'green'
			if (val > 0 & val < 10): color = 'yellow'
			if val > 10: color = 'red'
			tb.add_cell(i, j, width, height, text=str(val),
				loc='center', facecolor=color)
		# Row Labels...
		for i in range(len(self.voltages)):
			tb.add_cell(i, -1, width, height, text=str(self.voltages[i])+'V', loc='right',
						edgecolor='none', facecolor='none')
		# Column Labels...
		for j in range(len(self.bitfiles)):
			freq_label = self.bitfiles[j][-9:-7].translate(None, '_')
			tb.add_cell(nrows+1, j, width, height/2, text=freq_label+' MHz', loc='center',
							   edgecolor='none', facecolor='none')
		ax.add_table(tb)
		shmoopdf.savefig()

		''' global register shmoo plot '''
		shmoo_glob_np = np.array(self.shmoo_global_errors)
		data_g = shmoo_glob_np.reshape(len(self.voltages),-1,order='F')
		fig_g, ax_g = plt.subplots()
		ax_g.set_axis_off()
		tb_g = Table(ax_g, bbox=[0,0,1,1])
		plt.title('Global registers errors')
		# Add cells
		for (i,j), val_g in np.ndenumerate(data_g):
			color = ''
			if val_g == 0: color = 'green'
			if val_g > 0: color = 'red'
			tb_g.add_cell(i, j, width, height, text=str(val_g),
				loc='center', facecolor=color)
		# Row Labels...
		for i in range(len(self.voltages)):
			tb_g.add_cell(i, -1, width, height, text=str(self.voltages[i])+'V', loc='right',
						edgecolor='none', facecolor='none')
		# Column Labels...
		for j in range(len(self.bitfiles)):
			freq_label = self.bitfiles[j][-9:-7].translate(None, '_')
			tb_g.add_cell(nrows+1, j, width, height/2, text=freq_label+' MHz', loc='center',
							   edgecolor='none', facecolor='none')
		ax_g.add_table(tb_g)
		shmoopdf.savefig()
		shmoopdf.close()
Example #27
0
 def calcDiscussionLength(self,measure=('word','character')[0]):
     counts = np.zeros((len(self.data),),dtype=int)
     if measure == 'word':
         for i,disc in np.ndenumerate(self.data['Discussion']):
             counts[i[0]] = len(re.split('[ .-/]+',disc))
     else:
         for i,disc in np.ndenumerate(self.data['Discussion']):
             counts[i[0]] = len(disc)
     return counts
Example #28
0
def is_similar_matrix(m1, m2, small=1E-5):
  """
  Evaluates similar matrixes through matrix components.
  """
  iter1 = np.ndenumerate(m1)
  iter2 = np.ndenumerate(m2)
  for (i1, val1), (i2, val2) in zip(iter1, iter2):
    if not is_similar_mag(val1, val2, small):
      return False
  return True
Example #29
0
    def connectSpots(self):
        width, height = self.lattice.shape
        for (row, col), val in ndenumerate(self.lattice):
            self.lattice[row][col].vertex = self.graphMain.add_vertex()

        for (row, col), val in ndenumerate(self.lattice):
            neighbors = self.find_neighbors(row,col)
            for neighbor in neighbors:
                if neighbor.vertex is not None:
                    self.graphMain.add_edge(self.lattice[row][col].vertex, neighbor.vertex)
def hist3d_color(x_data, y_data, z_data, bins=12):

    import numpy as np
    import matplotlib.pyplot as pyplot
    from mpl_toolkits.mplot3d import Axes3D

    linspejs = np.linspace(-180.0, 180.0, 9.0)

    ax1 = np.histogram2d(x_data, y_data, bins=linspejs)
    ax2 = np.histogram2d(x_data, z_data, bins=linspejs)
    ax3 = np.histogram2d(z_data, y_data, bins=linspejs)
    xs, ys, zs = ax1[1], ax1[2], ax3[1]
    dx, dy, dz = xs[1] - xs[0], ys[1] - ys[0], zs[1] - zs[0]

    def rdn():
        return (1 - (-1)) * np.random.random() + -1

    smart = np.zeros((8, 8, 8), dtype=int)

    #    print len (ax1[0]); quit()

    for (i1, j1), v1 in np.ndenumerate(ax1[0]):
        if v1 == 0:
            continue
        for k2, v2 in enumerate(ax2[0][i1]):
            v3 = ax3[0][k2][j1]
            if v1 == 0 or v2 == 0 or v3 == 0:
                continue
            num = min(v1, v2, v3)
            smart[i1, j1, k2] += num
            v1 -= num
            v2 -= num
            v3 -= num
    points = []
    for (i, j, k), v in np.ndenumerate(smart):

        if v >= 5.0:

            points.append((xs[i], ys[j], zs[k], v))
    points = np.array(points)
    fig = pyplot.figure()
    sub = fig.add_subplot(111, projection="3d")

    Blues = plt.get_cmap("coolwarm")

    sub.scatter(points[:, 0], points[:, 1], points[:, 2], color=Blues(points[:, 3] / 10.0), marker="o", s=25)

    sub.axes.set_xticks(xs)
    sub.axes.set_yticks(ys)
    sub.axes.set_zticks(zs)
    pyplot.ion()
    pyplot.grid()
    pyplot.show()
    return points, sub
Example #31
0
    def setB(self, B):
        """ Calculate atomic data at a given B-field (Tesla). """
        self.B = B
        self.M = np.zeros(self.num_states)
        self.F = np.zeros(self.num_states)
        self.E = np.zeros(self.num_states)
        self.MI = np.zeros(self.num_states)
        self.MJ = np.zeros(self.num_states)
        self.MIax = np.zeros(self.num_states)
        self.MJax = np.zeros(self.num_states)
        self.V = np.zeros((self.num_states, self.num_states))

        I = self.I
        I_dim = np.rint(2.0 * I + 1).astype(int)

        for level, data in self.levels.items():

            J = level.J
            J_dim = np.rint(2.0 * J + 1).astype(int)

            Jp = np.kron(operators.Jp(J), np.identity(I_dim))
            Jm = np.kron(operators.Jm(J), np.identity(I_dim))
            Jz = np.kron(operators.Jz(J), np.identity(I_dim))

            Ip = np.kron(np.identity(J_dim), operators.Jp(I))
            Im = np.kron(np.identity(J_dim), operators.Jm(I))
            Iz = np.kron(np.identity(J_dim), operators.Jz(I))

            H = data.g_J * _uB * B * Jz
            if self.I != 0:
                gI = data.g_I
                IdotJ = (Iz @ Jz + (1 / 2) * (Ip @ Jm + Im @ Jp))

                H += -gI * _uN * B * Iz
                H += data.Ahfs * IdotJ

                if J > 1 / 2:
                    IdotJ2 = np.linalg.matrix_power(IdotJ, 2)
                    ident = np.identity(I_dim * J_dim)
                    H += data.Bhfs / (2 * I * J * (2 * I - 1) *
                                      (2 * J - 1)) * (3 * IdotJ2 +
                                                      (3 / 2) * IdotJ -
                                                      ident * I * (I + 1) * J *
                                                      (J + 1))

            H /= hbar  # work in angular frequency units
            lev = data.slice()
            E, V = np.linalg.eig(H)
            inds = np.argsort(E)
            V = V[:, inds]
            E = E[inds]

            # check that the eigensolver found the angular momentum eigenstates
            M = np.diag(V.conj().T @ (Iz + Jz) @ V)
            if max(abs(M - np.rint(2 * M) / 2)) > 1e-5:
                raise ValueError('Error finding angular momentum'
                                 ' eigenstates at {}T. Is the field too small'
                                 ' to lift the state degeneracy?'.format(B))

            self.E[lev] = E
            self.V[lev, lev] = V
            self.M[lev] = np.rint(2 * M) / 2
            self.MIax[lev] = np.kron(np.ones(J_dim), np.arange(-I, I + 1))
            self.MJax[lev] = np.kron(np.arange(-J, J + 1), np.ones(I_dim))
            self.MI[lev] = np.rint(2 * np.diag(V.conj().T @ (Iz) @ V)) / 2
            self.MJ[lev] = np.rint(2 * np.diag(V.conj().T @ (Jz) @ V)) / 2

            F_list = np.arange(I - J, I + J + 1)
            if data.Ahfs < 0:
                F_list = F_list[::-1]

            for M in set(self.M[lev]):
                for Fidx, idx in np.ndenumerate(np.where(M == self.M[lev])):
                    self.F[lev][idx] = F_list[M <= F_list][Fidx[1]]

        if self.M1 is not None:
            self.calc_M1()
        if self.ePole is not None:
            self.calc_Epole()
Example #32
0
 def __hash__(self) -> int:
     vals = tuple(v for _, v in np.ndenumerate(self._matrix))
     return hash((MatrixGate, vals))
Example #33
0
def checkTSData(time,
                loadP,
                cumPOutMaxPa,
                timeStepCutOff=5,
                maxTimeStepAcceptedFactor=1.1):
    """
    checkData does a cursory check of the time step sizes, and flags artifacts that clear are due to outages (data or
    physical) so that they are not part of any future ramp rate considerations. Note that 'normal' time step size is
    'guessed' using the median of the differences in time steps. This is based on the assumption, that most time steps
    are of the median size, while mean values could be skewed by a few long outages.
    :param time: [Series] time vector, time is assumed to be in unix epochs
    :param loadP: [Series] load vector, load is assumed to be in kW
    :param cumPOutMaxPa: total sum of generating nameplate capacity, loads above this value should be tossed.
    :param timeStepCutOff: [float] maximum time step size where ramp rate assessment still is recommended
    :param maxTimeStepAcceptedFactor: [float] factor to calculate maximum time step size not flagged as out of bounds
        for subsequent assessments as a function of the median time step size.
    :return status: [integer] status flag, TRUE if ramp-rate assessment ok (avg. deltaT <= timeStepCutOff), FALSE else.
    :return ignoreIdx: [Series] list of indices with black listed ramp rates due to outages
    :return msg: [List of strings] status messages for the log file
    """
    status = False
    ignoreIdx = np.empty(0, dtype=int)
    msg = []

    # *** Check delta-T and set status accordingly ***
    # Get differences in time stamps and run simple stats
    dt = np.diff(time)
    medianDt = np.nanmedian(dt)
    meanDt = np.nanmean(dt)

    # Record for log or messaging
    msg.append('Mean difference between time stamps: ' + str(meanDt) + ' s')
    msg.append('Median difference between time stamps : ' + str(medianDt) +
               ' s')

    # Check step size and set status accordingly
    if medianDt <= timeStepCutOff and medianDt > 0:
        status = True
        msg.append(
            'Time steps sufficiently small, ramp rate assessment recommended.')
    elif medianDt > timeStepCutOff:
        status = False
        msg.append(
            'Time steps are too large, ramp rate assessment not recommended.')
    else:
        status = False
        raise TimeStampVectorError(
            medianDt,
            'Time stamp issue, causality not preserved: median dt < 0.')

    # *** Blacklist large differences between time steps ***
    for idx, dtVal in np.ndenumerate(dt):
        if dtVal > maxTimeStepAcceptedFactor * medianDt:
            ignoreIdx = np.append(ignoreIdx, idx)
            #print('Index added to igIdx: ' + str(idx))

    # *** Search for 'drops to zero' and 'rises from zero' - blacklist in ignoreIdx as outages. Also add all 0 kW
    # entries to the ignoreIdx ***

    # Previous value in time series, random value initially
    prevVal = 100

    #Step through the loadP vector and search for rises from and drops to 0 kW
    for i, val in np.ndenumerate(loadP):
        if val == 0 and prevVal != 0:
            ignoreIdx = np.append(ignoreIdx, i)
            msg.append('Drop from ' + str(prevVal) +
                       ' kW to 0 kW detected. Index: ' + str(i))
        elif prevVal == 0 and val != 0:
            msg.append('Rise from 0 kW to ' + str(val) +
                       ' kW detected. Index: ' + str(i))
            ignoreIdx = np.append(ignoreIdx, i)
        elif val == 0:
            ignoreIdx = np.append(ignoreIdx, i)
            msg.append('0 kW value found at index: ' + str(i))
        prevVal = val

    # *** Search for excessively high loads ***
    ignoreIdx = np.append(ignoreIdx, loadP[loadP > cumPOutMaxPa].index.values)

    # The ignoreIdx needs some cleaning up: remove duplicates, and sort ascending
    ignoreIdx = np.unique(ignoreIdx)
    ignoreIdx.sort(0)

    return status, ignoreIdx, msg
Example #34
0
def plot2(plot_data,
          cmap='Spectral_r',
          amap='RdBu_r',
          ylabels=None,
          titles=('DJF', 'MAM', 'JJA', 'SON', 'ANNUAL'),
          suptitle=None,
          cbar_label='',
          abar_label='',
          vmin=None,
          vmax=None,
          amin=None,
          amax=None,
          cbar_extend='neither',
          abar_extend='neither',
          stack_cbars=False,
          map_obj=default_map,
          anom_hatching=None):
    """"""
    ncols = 5
    nrows = 3

    # Make sure plot_data is the right size
    assert (map_obj.xi.shape == map_obj.yi.shape)
    assert (map_obj.xi.shape == plot_data.shape[2:])
    assert (plot_data.shape[:2] == (nrows, ncols))

    # set data ranges
    if vmin is None:
        vmin = plot_data[:2].min()
    if vmax is None:
        vmax = plot_data[:2].max()
    if amin is None:
        amin = plot_data[2].min()
    if amax is None:
        amax = plot_data[2].max()

    # Set colorbar norms and ticks
    assert (type(cmap) == str)
    cn = 10
    cmap = cmap_discretize(cmap, n_colors=cn)
    cnorm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
    cticks = np.linspace(vmin, vmax, num=cn + 1)
    assert (type(amap) == str)
    an = 10
    amap = cmap_discretize(amap, n_colors=an)
    anorm = mpl.colors.Normalize(vmin=amin, vmax=amax)
    aticks = np.linspace(amin, amax, num=an + 1)

    if abar_label and not cbar_label:
        cbar_label = abar_label

    # Copy colormap and data ranges to iterables
    cmaps = (cmap, cmap, amap)
    vmins = (vmin, vmin, amin)
    vmaxs = (vmax, vmax, amax)

    # Make the plot
    fig, axes = plt.subplots(nrows=nrows, ncols=ncols, figsize=(11, 5.5))
    for (i, j), ax in np.ndenumerate(axes):
        plt.sca(ax)
        sub_plot_pcolor(plot_data[i, j],
                        cmap=cmaps[i],
                        cbar=None,
                        vmin=vmins[i],
                        vmax=vmaxs[i],
                        map_obj=map_obj,
                        ax=ax)
        if i == 0 and titles is not None:
            ax.set_title(titles[j])
        if j == 0 and ylabels is not None:
            ax.set_ylabel(ylabels[i])
        if i == nrows - 1 and anom_hatching is not None:
            map_obj.m.contourf(map_obj.xi,
                               map_obj.yi,
                               100 * (anom_hatching[j]), [0, 5],
                               cmap=plt.get_cmap('gray'),
                               hatches=['....', None],
                               alpha=0,
                               ax=ax)

    # Add figure title
    if suptitle is not None:
        fig.suptitle(suptitle, fontsize=16, fontweight='roman', y=1.02)
    plt.tight_layout()

    # Color bars
    if stack_cbars:
        ax1 = fig.add_axes([0.995, 0.37, 0.015, 0.54])
        cb1 = mpl.colorbar.ColorbarBase(ax1,
                                        cmap=cmap,
                                        norm=cnorm,
                                        orientation='vertical',
                                        extend=cbar_extend,
                                        ticks=cticks)
        ax2 = fig.add_axes([0.995, 0.08, 0.015, 0.23])
        cb2 = mpl.colorbar.ColorbarBase(ax2,
                                        cmap=amap,
                                        norm=anorm,
                                        orientation='vertical',
                                        extend=abar_extend,
                                        ticks=aticks,
                                        extendfrac=0.12)
        if cbar_label:
            cb1.set_label(cbar_label, rotation=90)
            cb2.set_label(abar_label, rotation=90)

    else:
        ax1 = fig.add_axes([0.995, 0.08, 0.015, 0.83])
        cb1 = mpl.colorbar.ColorbarBase(ax1,
                                        cmap=cmap,
                                        norm=cnorm,
                                        orientation='vertical',
                                        extend=cbar_extend,
                                        ticks=cticks)
        ax2 = fig.add_axes([1.05, 0.08, 0.015, 0.83])
        cb2 = mpl.colorbar.ColorbarBase(ax2,
                                        cmap=amap,
                                        norm=anorm,
                                        orientation='vertical',
                                        extend=abar_extend,
                                        ticks=aticks)
        if cbar_label:
            cb1.set_label(cbar_label, y=0.005, labelpad=-10, rotation=0)
        if abar_label:
            cb2.set_label(abar_label, y=0.005, labelpad=-10, rotation=0)

    return fig, axes
Example #35
0
    resizedCoordinates = np.multiply(coordinates, factor)
    resizedCoordinates = np.around(resizedCoordinates)
    resizedCoordinates[:, [0, 1]] = resizedCoordinates[:, [1, 0]]

    print(resizedCoordinates)

polygon = Polygon(resizedCoordinates)

print(polygon)

im = Image.open(imageToCrop).convert('RGBA')
pixels = np.array(im)
im_copy = np.array(im)

for index, pixel in np.ndenumerate(pixels):
    # Unpack the index.
    row, col, channel = index
    # We only need to look at spatial pixel data for one of the four channels.
    if channel != 0:
        continue
    point = Point(row, col)
    if not polygon.contains(point):
        im_copy[(row, col, 0)] = 255
        im_copy[(row, col, 1)] = 255
        im_copy[(row, col, 2)] = 255
        im_copy[(row, col, 3)] = 0

cut_image = Image.fromarray(im_copy)
cut_image.save(imageToSave)
Example #36
0
def ndenumerate_nonan(arr):
    '''Generic ndenumerate for np.ndenumerate with only not gen_isnan values'''
    return (idx_v for idx_v in np.ndenumerate(arr) if not gen_isnan(idx_v[1]))
def gmm_concatenation(user_image, parse_array, pose_raw, transform):
    fine_width = 192
    fine_height = 256
    radius = 3

    user_image = user_image.resize((fine_width, fine_height), Image.BILINEAR)
    parse_array = cv2.resize(parse_array, (fine_width, fine_height))
    leg_left_label = 16
    leg_right_label = 17
    for i, v in np.ndenumerate(parse_array):
            if parse_array[i[0], i[1]] == leg_left_label or \
               parse_array[i[0], i[1]] == leg_right_label:
               parse_array[i[0], i[1]] = 9
    # user_image = cv2.resize(user_image, (fine_width, fine_height), interpolation=cv2.INTER_NEAREST)
    # parse_array = cv2.resize(parse_array, (fine_width, fine_height), interpolation=cv2.INTER_NEAREST)
    user_image = transform(user_image)

    # upper, lower 따로
    body_segment = (parse_array > 0).astype(np.float32)  # head + body
    skin_segment = (parse_array == 1).astype(np.float32) + \
                   (parse_array == 2).astype(np.float32) + \
                   (parse_array == 4).astype(np.float32) + \
                   (parse_array == 13).astype(np.float32) + \
                   (parse_array == 5).astype(np.float32) + \
                   (parse_array == 14).astype(np.float32) + \
                   (parse_array == 15).astype(np.float32)
    prod_segment = (parse_array == 9).astype(np.float32) + \
                   (parse_array == 16).astype(np.float32) + \
                   (parse_array == 17).astype(np.float32)
    # resize 안해도 되려나?
    temp_segment = Image.fromarray((body_segment * 255).astype(np.uint8))
    """
    temp_segment = temp_segment.resize((fine_width//16, fine_height//16), Image.BILINEAR)
    temp_segment = temp_segment.resize((fine_width, fine_height), Image.BILINEAR)
    """
    body_segment = transform(temp_segment)  # [-1,1]
    skin_segment = torch.from_numpy(skin_segment)  # [0,1]
    prod_segment = torch.from_numpy(prod_segment)

    # im_c = user_image * prod_segment + (1 - prod_segment)
    im_h = user_image * skin_segment - (1 - skin_segment)

    pose_keypoints = -np.ones((18, 3), dtype=float)

    for i in range(18):
        try:
            if pose_raw['subset'][0, i] != -1:
                pose_keypoints[i, :] = pose_raw['candidate'][int(pose_raw['subset'][0, i]), :3]
            else:
                pose_keypoints[i, :] = 0

        except Exception:
            pass

    # pose_keypoint : (18, 2)
    pose_data = pose_keypoints.reshape((-1, 3))

    point_num = pose_data.shape[0]
    pose_map = torch.zeros(point_num, fine_height, fine_width)
    r = radius
    for i in range(point_num):
        one_map = Image.new('L', (fine_width, fine_height))
        one_map = transform(one_map)
        pose_map[i] = one_map[0]

    agnostic = torch.cat([body_segment, im_h, pose_map], 0)
    # expand_dims & gpu ([1, 22, 256, 192])
    agnostic = agnostic.unsqueeze(0).cuda()

    return agnostic
# we create 40 separable points
np.random.seed(0)
X = np.r_[np.random.randn(20, 2) - [2,2], np.random.randn(20, 2) + [2, 2]]
Y = [0]*20 + [1]*20

# fit the model
clf = SGD(loss="hinge", alpha = 0.01, n_iter=50, fit_intercept=True)
clf.fit(X, Y)

# plot the line, the points, and the nearest vectors to the plane
xx = np.linspace(-5, 5, 10)
yy = np.linspace(-5, 5, 10)
X1, X2 = np.meshgrid(xx, yy)
Z = np.empty(X1.shape)
for (i,j), val in np.ndenumerate(X1):
    x1 = val
    x2 = X2[i,j]
    p = clf.predict_margin([x1, x2])
    Z[i,j] = p[0]
levels = [-1.0, 0.0, 1.0]
linestyles = ['dashed','solid', 'dashed']
colors = 'k'
pl.set_cmap(pl.cm.Paired)
pl.contour(X1, X2, Z, levels, colors=colors, linestyles=linestyles)
pl.scatter(X[:,0], X[:,1], c=Y)

pl.axis('tight')
pl.show()

import numpy as np


def squiggle_xy(a, b, c, d, i=np.arange(0.0, 2 * np.pi, 0.05)):
    return np.sin(i * a) * np.cos(i * b), np.sin(i * c) * np.cos(i * d)


fig11 = plt.figure(figsize=(8, 8), constrained_layout=False)
outer_grid = fig11.add_gridspec(4, 4, wspace=0, hspace=0)

for a in range(4):
    for b in range(4):
        # gridspec inside gridspec
        inner_grid = outer_grid[a, b].subgridspec(3, 3, wspace=0, hspace=0)
        axs = inner_grid.subplots()  # Create all subplots for the inner grid.
        for (c, d), ax in np.ndenumerate(axs):
            ax.plot(*squiggle_xy(a + 1, b + 1, c + 1, d + 1))
            ax.set(xticks=[], yticks=[])

# show only the outside spines
for ax in fig11.get_axes():
    ax.spines['top'].set_visible(ax.is_first_row())
    ax.spines['bottom'].set_visible(ax.is_last_row())
    ax.spines['left'].set_visible(ax.is_first_col())
    ax.spines['right'].set_visible(ax.is_last_col())

plt.show()

#############################################################################
#
# ------------
def array_ndenumerate_sum(arr):
    s = 0
    for (i, j), v in np.ndenumerate(arr):
        s = s + (i + 1) * (j + 1) * v
    return s
def array_ndenumerate_premature_free(size):
    x = np.arange(size)
    res = np.zeros_like(x, dtype=np.intp)
    for i, v in np.ndenumerate(x):
        res[i] = v
    return res
Example #42
0
Created on Wed Jul  3 23:57:22 2019

@author: Ramiro
"""

import numpy as np

img = np.arange(1, 10)
img = np.reshape(img, (3,3))

print(img)

margin = 4
newImg = np.zeros((img.shape[0]+2*margin, img.shape[1]+2*margin))

for i in np.ndenumerate(newImg):
    nImgCoord = i[0]
    
    #Copy img at the center of newImg
    if (nImgCoord[0]<newImg.shape[0]-2*margin and nImgCoord[1]<newImg.shape[1]-2*margin):
        newImg[(nImgCoord[0]+margin, nImgCoord[1]+margin)] = img[nImgCoord] 

for a in range(0, margin):
    newImg[a,:] = newImg[margin,:]
    newImg[:,a] = newImg[:,margin]
    newImg[newImg.shape[0]-(margin-a), :] = newImg[newImg.shape[0]-margin-1, :]
    newImg[:, newImg.shape[0]-(margin-a)] = newImg[:, newImg.shape[0]-margin-1]

print(newImg)

#Get original img
Example #43
0
def _NumpyAdd(ref, indices, updates):
    # Since numpy advanced assignment does not support repeated indices,
    # we run a simple loop to perform scatter_add.
    for i, indx in np.ndenumerate(indices):
        ref[indx] += updates[i]
Example #44
0
def _NumpyAddScalar(ref, indices, update):
    for _, indx in np.ndenumerate(indices):
        ref[indx] += update
Example #45
0
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np

df = pd.read_csv('cars.csv')
df = df[df['Класс автомобиля'] != '4']
df = df[df['Класс автомобиля'] != 'J']
df = df[(df['Привод'] == 'задний') | (df['Привод'] == 'передний') |
        (df['Привод'] == 'полный')]
df = df.drop('Модель', 1)
df = df.drop('Цвет', 1)
df = df.drop('Страна марки', 1)

df_dm = pd.get_dummies(
    df, drop_first=True)  # замена текстовых переменных на числовые

corr = df_dm.corr()
fig, ax = plt.subplots()
ax.matshow(corr, cmap='seismic')

for (i, j), z in np.ndenumerate(corr):
    ax.text(j, i, '{:0.2f}'.format(z), ha='center', va='center')

plt.xticks(range(len(corr.columns)), corr.columns, rotation=90)
plt.yticks(range(len(corr.columns)), corr.columns)
plt.show()
Example #46
0
def calcKmeans(files, numClusters=-1, description=""):

    trainList = Utils.readFile(files[0])
    testList = Utils.readFile(files[1])

    trainingInstancesList = []
    clusterClasses = []

    X_testing = []
    Y_testing = []
    '''
    for line in trainList:
        if line[0] == '@':
            if line.lower().startswith("@attribute class"):
                monClasses = line.split(" ")[2].split("{")[1].split("}")[0].split(",")
        else:
            #instancesList.append(float(line.split(",")[:-1]))
            trainingInstancesList.append([float(i) for i in line.split(",")[:-1]])
            #y.append(line.split(",")[-1])
    '''
    for line in trainList:
        if line[0] != '@':
            trainingInstancesList.append(
                [float(i) for i in line.split(",")[:-1]])
            currY = line.split(",")[-1]
            if not clusterClasses.__contains__(currY):
                clusterClasses.append(currY)

    if (numClusters == -1):
        numClusters = len(clusterClasses)
    '''
    for line in testList:
        if line[0] != '@':
             currY = line.split(",")[-1]
             if monClasses.__contains__(currY):  # add all testing monitored instances
                 X_testing.append([float(i) for i in line.split(",")[:-1]])
                 Y_testing.append(line.split(",")[-1])
             else: # nonMonitored instance
                 if not unmonClasses.__contains__(currY): # add one instance only from unmonitored classes
                     unmonClasses.append(currY)
                     X_testing.append([float(i) for i in line.split(",")[:-1]])
                     Y_testing.append(currY)
    '''
    for line in testList:
        if line[0] != '@':
            X_testing.append([float(i) for i in line.split(",")[:-1]])
            Y_testing.append(line.split(",")[-1])

        #if line[0] != '@':
        #    X_testing.append([float(i) for i in line.split(",")[:-1]])
        #    Y_testing.append(line.split(",")[-1])

    #print instancesList

    X = np.array(trainingInstancesList)

    #X = np.array([[1, 2],
    #              [5, 8],
    #              [1.5, 1.8],
    #              [8, 8],
    #              [1, 0.6],
    #              [9, 11]])
    #print X

    #X = (X - np.mean(X, 0)) / np.std(X, 0) # scale data before CPA

    #km = KMeans(n_clusters=numClusters, init='k-means++', max_iter=100, n_init=1, verbose=0)
    km = KMeans(n_clusters=numClusters,
                init='k-means++',
                max_iter=100,
                n_init=1,
                verbose=0)

    km.fit(X)  # building the clusters from the monitored instances only

    #print km.cluster_centers_[0]

    #print km.labels_

    # indexes of point in a specific cluster
    #index = [x[0] for x, value in np.ndenumerate(km.labels_) if value==0] # value==cluster number

    #print index

    # get radius of each cluster
    radius = [0] * len(
        km.cluster_centers_)  # initialize the radius list to zeros
    #print radius
    for clusIndx in range(len(km.cluster_centers_)):
        # indexes of points in a specific cluster
        pointsIndex = [
            x[0] for x, value in np.ndenumerate(km.labels_)
            if value == clusIndx
        ]
        maxDist = -1
        for i in pointsIndex:
            # Euclidean distance
            currDist = np.linalg.norm(X[i] - km.cluster_centers_[clusIndx])
            if currDist > maxDist:
                radius[clusIndx] = currDist
                #maxDist = currDist
                maxDist = currDist

    #X_testing = np.array([[1, 2],
    #              [5, 8],
    #              [1.5, 1.8],
    #              [8, 8],
    #              [1, 0.6],
    #              [9, 11]])

    #Y_testing = np.array([0,
    #              0,
    #              0,
    #              1,
    #              1,
    #              1])

    #monClasses = [0,1]

    tp = 0
    fp = 0
    tn = 0
    fn = 0
    inside = False

    for i in range(len(X_testing)):
        inside = False
        for clusIndx in range(len(km.cluster_centers_)):
            dist = np.linalg.norm(X_testing[i] - km.cluster_centers_[clusIndx])
            if dist <= radius[clusIndx]:  #/1.5:#/2.0:
                inside = True
        '''
        if inside:
           if clusterClasses.__contains__(Y_testing[i]):
               tp += 1
           else:
               fp += 1
        else:
           if clusterClasses.__contains__(Y_testing[i]):
               fn += 1
           else:

               tn += 1
        '''
        if inside:
            if clusterClasses.__contains__(Y_testing[i]):
                tn += 1
            else:
                fn += 1
        else:
            if clusterClasses.__contains__(Y_testing[i]):
                fp += 1
            else:
                tp += 1

    print "\n"
    print "radii: "
    print radius
    print "NumClusters: " + str(numClusters)
    print "dataset: " + str(files)

    print "tp = " + str(tp)
    print "tn = " + str(tn)
    print "fp = " + str(fp)
    print "fn = " + str(fn)

    tpr = str("%.2f" % (float(tp) / float(tp + fn)))
    fpr = str("%.2f" % (float(fp) / float(fp + tn)))
    Acc = str("%.2f" % (float(tp + tn) / float(tp + tn + fp + fn)))
    F2 = str("%.2f" % (float(5 * tp) / float((5 * tp) + (4 * fn) +
                                             (fp))))  # beta = 2
    print "tpr = " + tpr
    print "fpr = " + fpr
    print "Acc = " + Acc
    print "F2  = " + F2

    output = []
    output.append(tpr)
    output.append(fpr)
    output.append(Acc)
    output.append(F2)
    output.append(str(tp))
    output.append(str(tn))
    output.append(str(fp))
    output.append(str(fn))
    output.append(description)
    output.append(numClusters)
    output.append(config.RUN_ID)

    summary = '\t, '.join(itertools.imap(str, output))

    outputFilename = Utils.getOutputFileName(files[0])

    #f = open( outputFilename+'.output', 'a' )
    f = open(outputFilename, 'a')
    f.write("\n" + summary)
    f.close()

    print ''
Example #47
0
def convolve_to_grid(kernel_func, support,
                     image_size,
                     uv, vis,
                     oversampling=None,
                     raise_bounds=True):
    """
    Grid visibilities, calculating the exact kernel distribution for each.

    If ``oversampling=None`` then exact gridding is used, i.e. the kernel is
    recalculated for each visibility, with precise sub-pixel offset according to
    that visibility's UV co-ordinates. If an integer value is supplied, then
    instead of recalculating the kernel for each sub-pixel location, we
    pre-generate an oversampled kernel ahead of time - so e.g. for an
    oversampling of 5, the kernel is pre-generated at 0.2 pixel-width offsets.
    We then pick the pre-generated kernel corresponding to the sub-pixel offset
    nearest to that of the visibility.

    Kernel pre-generation results in improved performance, particularly with
    large numbers of visibilities and complex kernel functions, at the cost of
    introducing minor aliasing effects due to the 'step-like' nature of the
    oversampled kernel. This in turn can be minimised (at the cost of longer
    start-up times and larger memory usage) by pre-generating kernels with a
    larger oversampling ratio, to give finer interpolation.


    Args:
        kernel_func (callable): Callable object,
            (e.g. :class:`.conv_funcs.Pillbox`,)
            that returns a convolution
            co-efficient for a given distance in pixel-widths.
        support (int): Defines the 'radius' of the bounding box within
            which convolution takes place. `Box width in pixels = 2*support+1`.
            (The central pixel is the one nearest to the UV co-ordinates.)
            (This is sometimes known as the 'half-support')
        image_size (int): Width of the image in pixels. NB we assume
            the pixel `[image_size//2,image_size//2]` corresponds to the origin
            in UV-space.
        uv (numpy.ndarray): UV-coordinates of visibilities.
            2d array of `float_`, shape: `(n_vis, 2)`.
            assumed ordering is u-then-v, i.e. `u, v = uv[idx]`
        vis (numpy.ndarray): Complex visibilities.
            1d array, shape: `(n_vis,)`.
        oversampling (int): (Or None). Controls kernel-generation, see function
            description for details.
        raise_bounds (bool): Raise an exception if any of the UV
            samples lie outside (or too close to the edge) of the grid.

    Returns:
        tuple: (vis_grid, sampling_grid)
            Tuple of ndarrays representing the gridded visibilities and the
            sampling weights.
            These are 2d arrays of same dtype as **vis**,
            shape ``(image_size, image_size)``.
            Note numpy style index-order, i.e. access like ``vis_grid[v,u]``.

    """
    assert len(uv) == len(vis)
    # Calculate nearest integer pixel co-ords ('rounded positions')
    uv_rounded = np.around(uv)
    # Calculate sub-pixel vector from rounded-to-precise positions
    # ('fractional coords'):
    uv_frac = uv - uv_rounded
    uv_rounded_int = uv_rounded.astype(np.int)
    # Now get the corresponding grid-pixel indices by adding the origin offset
    kernel_centre_on_grid = uv_rounded_int + (image_size // 2, image_size // 2)

    # Check if any of our kernel placements will overlap / lie outside the
    # grid edge.
    good_vis_idx = _bounds_check_kernel_centre_locations(
        uv, kernel_centre_on_grid,
        support=support, image_size=image_size,
        raise_if_bad=raise_bounds)

    vis_grid = np.zeros((image_size, image_size), dtype=vis.dtype)
    # At the same time as we grid the visibilities, we track the grid-sampling
    # weights:
    sampling_grid = np.zeros_like(vis_grid)
    # Use either `1.0` or `1.0 +0j` depending on input dtype:
    typed_one = np.array(1, dtype=vis.dtype)

    if oversampling is not None:
        kernel_cache = populate_kernel_cache(
            kernel_func, support, oversampling)
        oversampled_offset = calculate_oversampled_kernel_indices(
            uv_frac, oversampling)

    for idx, vis_value in np.ndenumerate(vis[good_vis_idx]):
        gc_x, gc_y = kernel_centre_on_grid[idx]
        # Generate a convolution kernel with the precise offset required:
        xrange = slice(gc_x - support, gc_x + support + 1)
        yrange = slice(gc_y - support, gc_y + support + 1)
        if oversampling is None:
            kernel = Kernel(kernel_func=kernel_func, support=support,
                            offset=uv_frac[idx])
            normed_kernel_array = kernel.array
        else:
            normed_kernel_array = kernel_cache[oversampled_offset[idx]].array

        vis_grid[yrange, xrange] += vis_value * normed_kernel_array
        sampling_grid[yrange, xrange] += typed_one * normed_kernel_array
    return vis_grid, sampling_grid
Example #48
0
def calcKmeansCvxHullDelaunay_Mixed_KNN(files,
                                        numClusters=-1,
                                        description="",
                                        threshold=3):

    trainList = Utils.readFile(files[0])
    testList = Utils.readFile(files[1])

    trainingInstancesList = []
    clusterClasses = []

    X_testing = []
    Y_testing = []

    for line in trainList:
        if line[0] != '@':
            trainingInstancesList.append(
                [float(i) for i in line.split(",")[:-1]])
            currY = line.split(",")[-1]
            if not clusterClasses.__contains__(currY):
                clusterClasses.append(currY)

    if (numClusters == -1):
        numClusters = len(clusterClasses)

    for line in testList:
        if line[0] != '@':
            X_testing.append([float(i) for i in line.split(",")[:-1]])
            Y_testing.append(line.split(",")[-1])

    X = np.array(trainingInstancesList)

    #X = np.array([[1, 2],
    #              [5, 8],
    #              [1.5, 1.8],
    #              [8, 8],
    #              [1, 0.6],
    #              [9, 11]])
    #print X

    # preprocessing, normalizing
    #X = (X - np.mean(X, 0)) / np.std(X, 0) # scale data before PCA
    #X_testing = (X_testing - np.mean(X_testing, 0)) / np.std(X_testing, 0) # scale data before PCA

    km = KMeans(n_clusters=numClusters,
                init='k-means++',
                max_iter=100,
                n_init=1,
                verbose=0)

    km.fit(X)  # building the clusters from the monitored instances only

    #print km.cluster_centers_[0]

    #print km.labels_

    # indexes of point in a specific cluster
    #index = [x[0] for x, value in np.ndenumerate(km.labels_) if value==0] # value==cluster number

    #print index

    # get radius of each cluster
    radius = [0] * len(
        km.cluster_centers_)  # initialize the radius list to zeros

    for clusIndx in range(len(km.cluster_centers_)):
        # indexes of points in a specific cluster
        pointsIndex = [
            x[0] for x, value in np.ndenumerate(km.labels_)
            if value == clusIndx
        ]
        maxDist = -1
        for i in pointsIndex:
            # Euclidean distance
            currDist = np.linalg.norm(X[i] - km.cluster_centers_[clusIndx])
            if currDist > maxDist:
                radius[clusIndx] = currDist
                #maxDist = currDist
                maxDist = currDist

    hull = []

    fewPointsClusters = [
    ]  # indexes of clusters where there are < 12 points (convex hull needs 12 points)

    #print radius
    #http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.spatial.Delaunay.html
    for clusIndx in range(len(km.cluster_centers_)):
        # indexes of points in a specific cluster
        pointsIndex = [
            x[0] for x, value in np.ndenumerate(km.labels_)
            if value == clusIndx
        ]
        clusterPoints = list(
            X[pointsIndex]
        )  # Access multiple elements of list (here X) knowing their index
        if len(clusterPoints) >= 12:
            try:
                hull.append(Delaunay(clusterPoints))  #,qhull_options="C-0"))
            except:
                print "Convex Hull ERROR"
                description += " Convex Hull ERROR"
                print " Cluster # " + str(
                    clusIndx
                ) + " -- Convex Hull ERROR. Kmeans cluster is to be checked for the participating points."
                fewPointsClusters.append(clusIndx)
                pass
        else:
            print " Cluster # " + str(
                clusIndx
            ) + " doesn't have enough points to build a hull. Kmeans cluster is to be checked for the participating points."
            fewPointsClusters.append(clusIndx)

    #X_testing = np.array([[1, 2],
    #              [5, 8],
    #              [1.5, 1.8],
    #              [8, 8],
    #              [1, 0.6],
    #              [9, 11]])

    #Y_testing = np.array([0,
    #              0,
    #              0,
    #              1,
    #              1,
    #              1])

    #monClasses = [0,1]

    tp = 0
    fp = 0
    tn = 0
    fn = 0
    inside = False

    #looping over mixed (hulls + not-enough-point clusters)
    for i in range(len(X_testing)):

        inside = False

        # looping over hulls
        for hullIndx in range(len(hull)):
            if inside != True and Utils.in_hull(
                    X_testing[i],
                    hull[hullIndx]):  # returns true if point is inside hull
                inside = True

            # KNN
            if inside != True and Utils.is_knn_to_hull_border_points(
                    X, X_testing[i], hull[hullIndx], threshold):
                inside = True

        # looping over not-enough-point clusters
        if inside != True:
            for clusIndx in range(len(km.cluster_centers_)):
                if fewPointsClusters.__contains__(clusIndx):
                    #print " Cluster # " + str(clusIndx) + " is being examined."
                    dist = np.linalg.norm(X_testing[i] -
                                          km.cluster_centers_[clusIndx])
                    if dist <= radius[clusIndx]:  #/1.5:#/2.0:
                        inside = True

        if inside:
            if clusterClasses.__contains__(Y_testing[i]):
                tn += 1
            else:
                fn += 1
        else:
            if clusterClasses.__contains__(Y_testing[i]):
                fp += 1
            else:
                tp += 1

    print "\n"
    print "radii: "
    print radius
    print "NumClusters: " + str(numClusters)
    print "dataset: " + str(files)

    print "tp = " + str(tp)
    print "tn = " + str(tn)
    print "fp = " + str(fp)
    print "fn = " + str(fn)

    tpr = str("%.2f" % (float(tp) / float(tp + fn)))
    fpr = str("%.2f" % (float(fp) / float(fp + tn)))
    Acc = str("%.2f" % (float(tp + tn) / float(tp + tn + fp + fn)))
    F2 = str("%.2f" % (float(5 * tp) / float((5 * tp) + (4 * fn) +
                                             (fp))))  # beta = 2
    print "tpr = " + tpr
    print "fpr = " + fpr
    print "Acc = " + Acc
    print "F2  = " + F2

    output = []
    output.append(tpr)
    output.append(fpr)
    output.append(Acc)
    output.append(F2)
    output.append(str(tp))
    output.append(str(tn))
    output.append(str(fp))
    output.append(str(fn))
    output.append(description)
    output.append(numClusters)
    output.append(config.RUN_ID)

    summary = '\t, '.join(itertools.imap(str, output))

    outputFilename = Utils.getOutputFileName(files[0])

    f = open(outputFilename, 'a')
    f.write("\n" + summary)
    f.close()

    print ''


###########################################################################
Example #49
0
    months_sum = 0
    months_delta = 0
    for row in reader:
        counter += 1
        months_sum += float(row['Adj Close'])
        months_delta += 100 * (
            (float(row['Close']) - float(row['Open'])) / float(row['Open']))
        if (counter % 12 == 0):
            years = np.append(years, months_sum / 12)
            years2 = np.append(years2, months_delta / 12)
            months_sum = months_delta = counter = 0

    pres_sum = 0
    current_pres = 0
    y = 0
    for i, x in np.ndenumerate(years2):
        if ((1928 + i[0] in dem)
                and (1928 + i[0] < presidents[p[current_pres]])):
            pres_sum += x
            y += 1
        elif ((1928 + i[0] in dem)
              and (1928 + i[0] == presidents[p[current_pres]])):
            pres_sum += x
            y += 1
            dem_years2 = np.append(dem_years2, pres_sum / y)
            pres_sum = y = 0
            current_pres += 1
        elif ((1928 + i[0] in rep)
              and (1928 + i[0] < presidents[p[current_pres]])):
            pres_sum += x
            y += 1
Example #50
0
def calcKmeansCvxHullDelaunay(files, numClusters=-1, description=""):

    trainList = Utils.readFile(files[0])
    testList = Utils.readFile(files[1])

    trainingInstancesList = []
    clusterClasses = []

    X_testing = []
    Y_testing = []

    for line in trainList:
        if line[0] != '@':
            trainingInstancesList.append(
                [float(i) for i in line.split(",")[:-1]])
            currY = line.split(",")[-1]
            if not clusterClasses.__contains__(currY):
                clusterClasses.append(currY)

    if (numClusters == -1):
        numClusters = len(clusterClasses)

    for line in testList:
        if line[0] != '@':
            X_testing.append([float(i) for i in line.split(",")[:-1]])
            Y_testing.append(line.split(",")[-1])

    X = np.array(trainingInstancesList)

    #X = np.array([[1, 2],
    #              [5, 8],
    #              [1.5, 1.8],
    #              [8, 8],
    #              [1, 0.6],
    #              [9, 11]])
    #print X

    # preprocessing, normalizing
    #X = (X - np.mean(X, 0)) / np.std(X, 0) # scale data before CPA
    #X_testing = (X_testing - np.mean(X_testing, 0)) / np.std(X_testing, 0) # scale data before CPA

    km = KMeans(n_clusters=numClusters,
                init='k-means++',
                max_iter=100,
                n_init=1,
                verbose=0)

    km.fit(X)  # building the clusters from the monitored instances only

    #print km.cluster_centers_[0]

    #print km.labels_

    # indexes of point in a specific cluster
    #index = [x[0] for x, value in np.ndenumerate(km.labels_) if value==0] # value==cluster number

    #print index

    # get radius of each cluster
    radius = [0] * len(
        km.cluster_centers_)  # initialize the radius list to zeros

    hull = []

    #print radius
    for clusIndx in range(len(km.cluster_centers_)):
        # indexes of points in a specific cluster
        pointsIndex = [
            x[0] for x, value in np.ndenumerate(km.labels_)
            if value == clusIndx
        ]
        clusterPoints = list(
            X[pointsIndex]
        )  # Access multiple elements of list (here X) knowing their index
        #hull.append(Delaunay(clusterPoints))

        # hull needs at least 12 points
        if (len(clusterPoints) > 12):
            hull.append(Delaunay(clusterPoints))

    print '#hulls: ' + str(len(hull))
    #X_testing = np.array([[1, 2],
    #              [5, 8],
    #              [1.5, 1.8],
    #              [8, 8],
    #              [1, 0.6],
    #              [9, 11]])

    #Y_testing = np.array([0,
    #              0,
    #              0,
    #              1,
    #              1,
    #              1])

    #monClasses = [0,1]

    tp = 0
    fp = 0
    tn = 0
    fn = 0
    inside = False

    for i in range(len(X_testing)):
        inside = False
        for hullIndx in range(len(hull)):
            if Utils.in_hull(
                    X_testing[i],
                    hull[hullIndx]):  # returns true if point is inside hull
                inside = True

        if inside:
            if clusterClasses.__contains__(Y_testing[i]):
                tn += 1
            else:
                fn += 1
        else:
            if clusterClasses.__contains__(Y_testing[i]):
                fp += 1
            else:
                tp += 1

    print "\n"
    print "radii: "
    print radius
    print "NumClusters: " + str(numClusters)
    print "dataset: " + str(files)

    print "tp = " + str(tp)
    print "tn = " + str(tn)
    print "fp = " + str(fp)
    print "fn = " + str(fn)

    tpr = str("%.2f" % (float(tp) / float(tp + fn)))
    fpr = str("%.2f" % (float(fp) / float(fp + tn)))
    Acc = str("%.2f" % (float(tp + tn) / float(tp + tn + fp + fn)))
    F2 = str("%.2f" % (float(5 * tp) / float((5 * tp) + (4 * fn) +
                                             (fp))))  # beta = 2
    print "tpr = " + tpr
    print "fpr = " + fpr
    print "Acc = " + Acc
    print "F2  = " + F2

    output = []
    output.append(tpr)
    output.append(fpr)
    output.append(Acc)
    output.append(F2)
    output.append(str(tp))
    output.append(str(tn))
    output.append(str(fp))
    output.append(str(fn))
    output.append(description)
    output.append(numClusters)

    summary = '\t, '.join(itertools.imap(str, output))

    outputFilename = Utils.getOutputFileName(files[0])

    f = open(outputFilename, 'a')
    f.write("\n" + summary)
    f.close()

    print ''
Example #51
0
    def create_report(self, target_dir: str = None):
        """
        Creates a MaxQuantReport.pdf, which can be used as :ref:`quality control <plotters>`.

        | For overview of plots see :ref:`analysis options <plotters>`
        | For exemplary plot see :ref:`gallery <mqreport>`

        Parameters
        ----------
        target_dir
            directory where report will be written

        """
        def bar_from_counts(ax, counts, compare_counts=None, title=None, relative=False, yscale=None,
                            ylabel = None, bar_kwargs=None):
            if ylabel is not None:
                ax.set_ylabel(ylabel)
            elif relative:
                ax.set_ylabel("Relative counts")
                counts = counts / counts.sum()
            else:
                ax.set_ylabel("Counts")
            if title is not None:
                ax.set_title(title)
            if bar_kwargs is None:
                bar_kwargs = {}
            bar_container = ax.bar([x for x in range(len(counts))], counts.values, **bar_kwargs)

            if compare_counts is not None:
                if relative:
                    compare_counts = compare_counts / compare_counts.sum()
                for bar, height in zip(bar_container, compare_counts):
                    bar_x = bar.get_x()
                    bar_w = bar.get_width()
                    ax.plot((bar_x, bar_x, bar_x + bar_w, bar_x + bar_w),
                            (0, height, height, 0), color="black")

            ax.set_xticks([i for i in range(len(counts))])
            ax.set_xticklabels(counts.index)
            if yscale is not None:
                if isinstance(yscale, str):
                    ax.set_yscale(yscale)
                elif isinstance(yscale, dict):
                    ax.set_yscale(**yscale)
            return bar_container

        def hist2d_with_hist(xdata, ydata, title=None, xlabel=None, ylabel=None, max_x=145):
            fig = plt.figure(figsize=(14, 7))
            if title is not None:
                fig.suptitle(title)
            spec = fig.add_gridspec(ncols=2, nrows=2, width_ratios=[2, 1], height_ratios=[1, 2])

            ax2dhist = fig.add_subplot(spec[1, 0])
            ax1dhistvert = fig.add_subplot(spec[0, 0])
            ax1dhisthor = fig.add_subplot(spec[1, 1])

            h, xedges, yedges, image = ax2dhist.hist2d(xdata, ydata,
                                                       bins=100, range=((0, max_x), (0, 2)))  # TODO find bins
            ax2dhist.set_xlabel(xlabel)
            ax2dhist.set_ylabel(ylabel)

            ax1dhistvert.hist(xdata, bins=xedges)
            ax1dhistvert.set_ylabel("Peptide Counts")

            ax1dhisthor.hist(ydata, bins=yedges, orientation="horizontal")
            ax1dhisthor.set_xlabel("Peptide Counts")

            ax1dhistvert.set_xlim(*ax2dhist.get_xlim())
            ax1dhisthor.set_ylim(*ax2dhist.get_ylim())

            fig.tight_layout(rect=[0, 0.03, 1, 0.95])

            return fig, (ax2dhist, ax1dhistvert, ax1dhisthor)

        def get_plot_data_from_hist(data, density=False, n_bins=16):
            d_min, d_max = np.nanmin(data.values), np.nanmax(data.values)
            bins = np.linspace(d_min, d_max, n_bins)

            if data.shape[0] * data.shape[1] > 1e7:
                y = np.zeros(bins.shape[0] - 1)
                for i in range(0, data.shape[0] // 5000 + 1):
                    for j in range(0, data.shape[1] // 50 + 1):
                        y_del, x = np.histogram(data.iloc[
                                                i * 5000: (i + 1) * 5000, j * 50: (j + 1) * 50].values.flatten(),
                                                bins=bins)
                        y += y_del
                if density:
                    db = np.array(np.diff(bins), float)
                    y = y / db / y.sum()

            else:
                y, x = np.histogram(data.values.flatten(), bins=bins, density=density)
            y = np.concatenate(([0], np.repeat(y, 2), [0]))
            x = np.repeat(x, 2)
            return x, y, bins

        import matplotlib.cm as cm
        cmap = cm.get_cmap("jet")

        prefix = "Intensity "
        group_iter = None
        plot_colors = {}

        self.logger.info("Reading files")

        try:
            self.logger.debug("Reading parameters")
            parameters = self.required_reader_data['parameters']
        except KeyError:
            self.logger.warning("Did not find parameters")
            parameters = None
        try:
            self.logger.debug("Reading summary")
            summary = self.required_reader_data['summary']
        except KeyError:
            self.logger.warning("Did not find summary")
            summary = None
        try:
            self.logger.debug("Reading peptides")
            peptides = self.required_reader_data["peptides"]
            peptides_prefix_columns = [x for x in peptides.columns if x.startswith(prefix)]
            peptides_intensities = peptides[peptides_prefix_columns].replace({0: np.nan})
            peptides_intensities.columns = pd.MultiIndex.from_arrays(
                [["Grouped Intensity"] * len(peptides_intensities.columns), peptides_intensities.columns],
                names=("agg", "sample")
            )

            last_aa = pd.concat([peptides["Last amino acid"].rename(col)[peptides[col].notna()]
                                 for col in peptides.columns if col.startswith("Experiment")], axis=1)
            last_aa_counts = last_aa.apply(pd.Series.value_counts)
            last_aa_counts = last_aa_counts.fillna(0).rename(lambda x: x.replace("Experiment ", ""), axis=1)

            before_aa = pd.concat([peptides["Amino acid before"].rename(col)[peptides[col].notna()]
                                   for col in peptides.columns if col.startswith("Experiment")], axis=1)
            before_aa_counts = before_aa.apply(pd.Series.value_counts)
            before_aa_counts = before_aa_counts.fillna(0).rename(lambda x: x.replace("Experiment ", ""), axis=1)
        except KeyError:
            self.logger.warning("Did not find peptides")
            peptides = None
        try:
            self.logger.debug("Reading proteinGroups")
            prot_groups = self.required_reader_data["proteinGroups"]
            contaminants = self.required_reader_data["contaminants"]
            prot_groups_prefix_columns = [x for x in prot_groups.columns if x.startswith(prefix)]
            prot_groups_colors = [x.replace(prefix, "") for x in prot_groups_prefix_columns]
            plot_colors.update({col: cmap(i/len(prot_groups_colors)) for i, col in enumerate(prot_groups_colors)})
            prot_groups_intensities = prot_groups[prot_groups_prefix_columns].replace({0: np.nan})
            prot_groups_intensities.columns = pd.MultiIndex.from_arrays(
                [["Grouped Intensity"] * len(prot_groups_intensities.columns), prot_groups_intensities.columns],
                names=("agg", "sample")
            )
            has_lfq = str(any([x.startswith("LFQ") for x in prot_groups.columns]))
            has_ibaq = str(any([x.startswith("iBAQ") for x in prot_groups.columns]))
        except KeyError:
            self.logger.warning("Did not find proteinGroups")
            prot_groups = None
            contaminants = None
            has_lfq = "File is missing"
            has_ibaq = "File is missing"
        try:
            self.logger.debug("Reading evidence")
            evidence = self.required_reader_data["evidence"]
            mz = evidence.pivot(index=None, columns="Experiment", values="m/z")
            plot_colors.update({col: cmap(i/len(mz.columns)) for i, col in enumerate(mz.columns)})
            charge = evidence.pivot(index=None, columns="Experiment", values="Charge")
            charge = charge.apply(pd.Series.value_counts)
            charge.index = charge.index.astype(int)
            missed_cleavages = evidence.pivot(index=None, columns="Experiment", values="Missed cleavages")
            missed_cleavages = missed_cleavages.apply(pd.Series.value_counts)
            missed_cleavages.index = missed_cleavages.index.astype(int)
            retention_length = evidence.pivot(index=None, columns="Experiment", values="Retention length")
            retention_time = evidence.pivot(index=None, columns="Experiment", values="Retention time")
        except KeyError:
            self.logger.warning("Did not find evidence")
            evidence = None
        try:
            self.logger.debug("Reading msScans")
            ms_scans = self.required_reader_data["msScans"]
            ms_scan_groups = ms_scans.groupby("Raw file")
            group_iter = ms_scan_groups.groups
        except KeyError:
            self.logger.warning("Did not find msScans")
            ms_scans = None
        try:
            self.logger.debug("Reading msmsScans")
            msms_scans = self.required_reader_data["msmsScans"]
            msms_scan_groups = msms_scans.groupby("Raw file")
            group_iter = msms_scan_groups.groups
        except KeyError:
            self.logger.warning("Did not find msmsScans")
            msms_scans = None

        self.logger.info("Creating plots")
        target_dir = target_dir if target_dir is not None else self.start_dir
        with PdfPages(os.path.join(target_dir, "MaxQuantReport.pdf")) as pdf:
            self.logger.debug("Creating start page")
            fig = plt.figure(figsize=(14, 7))
            text_conf = dict(transform=fig.transFigure, size=24, ha="center")
            fig.text(0.5, 0.92, "MaxQuant report", **text_conf)
            text_conf.update({"size": 20})
            fig.text(0.5, 0.85, "parameter.txt info", **text_conf)
            text_conf.pop("size")
            if parameters is not None:
                fig.text(0.5, 0.8, f"Version: {parameters['Version']}, "
                         f"run at: {parameters['Date of writing']}", **text_conf)
                fig.text(0.5, 0.75, f"Fasta File: {os.path.split(parameters['Fasta file'])[1]}, "
                         f"Match between runs: {parameters['Match between runs']}", **text_conf)
                fig.text(0.5, 0.7, "Min. to Max. peptide length for unspecific search: "
                         f"{parameters['Min. peptide length for unspecific search']} to {parameters['Max. peptide length for unspecific search']}", **text_conf)
            else:
                fig.text(0.5, 0.8, "Missing", **text_conf)

            text_conf.update({"size": 20})
            fig.text(0.5, 0.65, "summary.txt info", **text_conf)
            text_conf.pop("size")
            if summary is not None:
                fig.text(0.5, 0.6, f"Used Enzyme: {summary.loc[1, 'Enzyme']}", **text_conf)
                fig.text(0.5, 0.55, f"Variable modifications: {summary.loc[1, 'Variable modifications']}", **text_conf)
                fig.text(0.5, 0.5, f"Mass Standard Deviation: mean {summary.loc[:, 'Mass Standard Deviation [ppm]'].mean():.5f} ppm, max {summary.loc[:, 'Mass Standard Deviation [ppm]'].max():.5f} ppm", **text_conf)
            else:
                fig.text(0.5, 0.6, "Missing", **text_conf)

            if prot_groups is not None:
                fig.text(0.5, 0.45, f"Identified proteins (without contaminants): {prot_groups.shape[0]}", **text_conf)
            if peptides is not None:
                fig.text(0.5, 0.4, f"Identified peptides (without contaminants): {peptides.shape[0]}", **text_conf)
            fig.text(0.5, 0.35, f"Has LFQ intensities: {has_lfq}", **text_conf)
            fig.text(0.5, 0.3, f"Has iBAQ: {has_ibaq}", **text_conf)

            pdf.savefig()
            plt.close(fig)
            # ######

            # figure
            if peptides is not None:
                self.logger.debug("Creating peptide overview")
                fig, axarr = plt.subplots(3, 1, figsize=(14, 7))
                bar_from_counts(axarr[0], peptides["Missed cleavages"].value_counts(), title="Missed Cleavages", relative=True)
                bar_from_counts(axarr[1], peptides["Amino acid before"].value_counts(), title="Amino acid before", yscale="log")
                bar_from_counts(axarr[2], peptides["Last amino acid"].value_counts(), title="Last amino acid", yscale="log")

                fig.tight_layout(rect=[0, 0.03, 1, 0.95])

                pdf.savefig()
                plt.close(fig)
            # ######

            # figure stuff
            self.logger.debug("Creating technical overview")
            fig, axarr = plt.subplots(2, 1, figsize=(14, 7))
            if peptides is not None:
                bar_from_counts(axarr[0], peptides["Charges"].str.split(";").explode().value_counts().sort_index(),
                                title="Peptide Charges")

            if evidence is not None:
                axarr[1].hist(evidence["m/z"])
                axarr[1].set_xlabel("m/z")
                axarr[1].set_ylabel("Counts")
                axarr[1].set_title("Peptide m/z")

            fig.tight_layout(rect=[0, 0.3, 1, 0.95])

            pdf.savefig()
            plt.close(fig)
            # ###########

            # hist with peptide m/z from evidence["m.s"]
            self.logger.debug("Creating identified proteins and peptides per sample")
            fig, axarr = plt.subplots(2, 1, figsize=(14, 7), sharex=True)
            # hist with identified proteins and hist with identified peptides, shared axis
            if prot_groups is not None:
                identified_proteins = (prot_groups_intensities["Grouped Intensity"] > 0).sum()
                identified_proteins = identified_proteins.rename(lambda x: x.replace("Intensity ", "").replace("_", " ")
                                                                 , axis=0)
                bar_from_counts(axarr[0], identified_proteins, title="Identified proteins")
            # proteins from proteinGroups, peptides from peptides file per sample
            if peptides is not None:
                identified_peptides = (peptides_intensities["Grouped Intensity"] > 0).sum()
                identified_peptides = identified_peptides.rename(lambda x: x.replace("Intensity ", "").replace("_", " ")
                                                                 , axis=0)
                bar_from_counts(axarr[1], identified_peptides, title="Identified peptides")
                axarr[1].xaxis.set_tick_params(rotation=90)

            fig.tight_layout(rect=[0, 0.03, 1, 0.95])

            pdf.savefig()
            plt.close(fig)
            # #####################

            # Page with stuff
            if summary is not None:
                self.logger.debug("Creating scan overview")
                fig, axarr = plt.subplots(3, 1, sharex=True, figsize=(14, 7))

                axarr[0].set_title("MS scans")
                axarr[0].bar(range(summary.shape[0]), summary["MS"])
                axarr[0].set_ylabel("Count")

                axarr[1].set_title("MS/MS scans")
                axarr[1].bar(range(summary.shape[0]), summary["MS/MS"])
                axarr[1].set_ylabel("Count")

                axarr[2].set_title("MS/MS identified [%]")
                axarr[2].bar(range(summary.shape[0]), summary["MS/MS Identified [%]"])
                axarr[2].set_ylabel("Percent")
                axarr[2].set_xticks(range(summary.shape[0]))
                labels = [sample.replace("_", " ") for sample in summary["Experiment"]]
                axarr[2].set_xticklabels(labels, rotation=90)

                fig.tight_layout(rect=[0, 0.03, 1, 0.95])

                pdf.savefig()
                plt.close(fig)
            # ##################

            # page with stuff
            if contaminants is not None:
                self.logger.debug("Creating overview of share of contamination intensity from total")
                df_contaminants_int = contaminants[prot_groups_prefix_columns]
                df_no_contaminants_int = prot_groups[prot_groups_prefix_columns]
                sum_int = pd.DataFrame({
                    "contaminants": df_contaminants_int.sum(axis=0),
                    "no_contaminants": df_no_contaminants_int.sum(axis=0)})
                sum_int["total"] = sum_int["contaminants"] + sum_int["no_contaminants"]
                sum_int["percent_cont"] = (sum_int["contaminants"] / sum_int["total"])*100
                sum_int["percent_no_cont"] = (sum_int["no_contaminants"] / sum_int["total"])*100
                fig, ax = plt.subplots(1, 1, sharex=True, figsize=(14, 7))

                labels = [label.replace("Intensity ", "").replace("_", " ") for label in sum_int.index.values]

                ax.bar(x=labels, height=sum_int["percent_cont"], width=0.8)
                ax.set_title("Intensity of contaminants from total intensity", size=14, pad=20)
                ax.set_ylabel("Percent", size=13)
                ax.set_xticks(range(len(labels)))
                ax.set_xticklabels(labels, rotation=90)
                ax.axhline(5, linestyle="-", linewidth=2, color="red", alpha=0.6)

                fig.tight_layout()

                pdf.savefig(figure=fig)
                plt.close(fig)
            # ############

            # page with stuff
            if prot_groups is not None:
                self.logger.debug("Creating overall intensity histograms")
                fig, axarr = plt.subplots(2, 1, sharex=True, figsize=(7, 7))

                # stacked histogram of log2 intensities
                colors = prot_groups_intensities["Grouped Intensity"].rename(lambda x: x.replace("Intensity ", ""), axis=1).columns
                colors = [plot_colors[c] for c in colors]
                matplotlib_plots.save_intensity_histogram_results(prot_groups_intensities, n_bins=11, histtype="barstacked",
                                                                  plot=(fig, axarr[0]), color=colors, show_mean=False,
                                                                  legend=False)
                # overlayed histogram of log2 intensities
                matplotlib_plots.save_intensity_histogram_results(prot_groups_intensities, n_bins=11, histtype="step",
                                                                  plot=(fig, axarr[1]), color=colors, show_mean=False,
                                                                  legend=False)
                fig.legend(bbox_to_anchor=(1.02, 0.5), loc="center left")

                fig.tight_layout()

                pdf.savefig()
                plt.close(fig)
            # ############

            # page with stuff
            # two histograms with heatmap
            # retention time vs retention length
            # from evidence["Retention time"], evidence["Retention length"]
            if evidence is not None:
                self.logger.debug("Creating overall retention time vs retention length")

                fig, ax = hist2d_with_hist(title="Overall Retention time vs Retention length",
                                           xdata=evidence["Retention time"], ydata=evidence["Retention length"],
                                           xlabel="Retention time [min]", ylabel="Retention length [min]",
                                           max_x=evidence["Retention time"].max())

                pdf.savefig(figure=fig)
                plt.close(fig)
            # ##############

            # individual comparison
            if evidence is not None:
                self.logger.debug("Creating individual experiment comparison")
                charge_flat = charge.sum(axis=1)
                missed_cleavages_flat = missed_cleavages.sum(axis=1)
                before_aa_counts_flat = before_aa_counts.sum(axis=1)
                last_aa_counts_flat = last_aa_counts.sum(axis=1)

                mz_x, mz_y, mz_bins = get_plot_data_from_hist(mz, n_bins=15, density=True)

                for experiment in mz.columns:
                    plot_color = plot_colors[experiment]
                    fig, axarr = plt.subplots(3, 2, figsize=(14, 7))
                    fig.suptitle(experiment.replace("_", " "))

                    axarr[0, 0].hist(mz[experiment], density=True, color=plot_color, bins=mz_bins)
                    axarr[0, 0].plot(mz_x, mz_y, color="black")
                    # axarr[0, 0].hist(mz.drop(experiment, axis=1).values.flatten(), histtype="step", density=True, color="black", bins=bins, linewidth=2)
                    # axarr[0, 0].hist(mz_flat, histtype="step", density=True, color="black", bins=bins, linewidth=2)
                    axarr[0, 0].set_xlabel("m/z")
                    axarr[0, 0].set_ylabel("Density")
                    axarr[0, 0].set_title("Peptide m/z")

                    bar_from_counts(axarr[0, 1], charge[experiment],
                                    compare_counts=charge_flat,
                                    relative=True,
                                    title="Peptide charges", bar_kwargs={"color": plot_color})
                    axarr[0, 1].set_xlabel("Peptide charge")

                    bar_from_counts(axarr[1, 0], missed_cleavages[experiment],
                                    compare_counts=missed_cleavages_flat,
                                    relative=True,
                                    title="Number of missed cleavages", bar_kwargs={"color": plot_color})
                    axarr[1, 0].set_xlabel("Missed cleavages")

                    # TODO this might be missing
                    bar_from_counts(axarr[1, 1], before_aa_counts[experiment],
                                    compare_counts=before_aa_counts_flat,
                                    relative=True,
                                    bar_kwargs={"color": plot_color})
                    axarr[1, 1].set_title("Amino acid before")

                    bar_from_counts(axarr[2, 0], last_aa_counts[experiment],
                                    compare_counts=last_aa_counts_flat,
                                    relative=True,
                                    bar_kwargs={"color": plot_color})
                    axarr[2, 0].set_title("Last amino acid")

                    axarr[2, 1].remove()
                    fig.tight_layout()

                    pdf.savefig()
                    plt.close(fig)
            # ###############

            # Intensity histograms of individual samples compared to remaining
            if prot_groups is not None:
                self.logger.debug("Creating individual intensity histograms")
                log2_intensities = np.log2(prot_groups_intensities["Grouped Intensity"])
                log2_intensities = log2_intensities.rename(lambda x: x.replace("Intensity ", ""), axis=1)

                b, h, bins = get_plot_data_from_hist(log2_intensities, density=True, n_bins=16)

                n_figures = int(np.ceil(len(log2_intensities.columns) / 9))
                for n_figure in range(n_figures):
                    fig, axarr = plt.subplots(3, 3, figsize=(15, 15))
                    for i, (pos, ax) in enumerate(np.ndenumerate(axarr)):
                        idx = n_figure * 9 + i
                        try:
                            experiment = log2_intensities.columns[idx]
                        except IndexError:
                            break
                        ax.hist(log2_intensities.loc[:, experiment], bins=bins, density=True,
                                color=plot_colors[experiment])
                        ax.plot(b, h, color="black")
                        ax.set_title(experiment.replace("_", " "))
                        ax.set_xlabel("Intensity")
                        ax.set_ylabel("Density")

                    if n_figure == (n_figures - 1):
                        n_empty = n_figures * 9 - len(log2_intensities.columns)
                        for i in range(1, n_empty + 1):
                            axarr.flat[-i].remove()

                    fig.tight_layout(rect=[0, 0.03, 1, 0.95])

                    pdf.savefig(fig)
                    plt.close(fig)
            # ################

            # Retention time of individuals samples vs remaining
            if evidence is not None:
                self.logger.debug("Creating individual retention time histograms")
                b, h, bins = get_plot_data_from_hist(retention_time, density=True, n_bins=25)

                n_figures = int(np.ceil(len(retention_time.columns) / 9))
                for n_figure in range(n_figures):
                    fig, axarr = plt.subplots(3, 3, figsize=(15, 15))
                    for i, (pos, ax) in enumerate(np.ndenumerate(axarr)):
                        idx = n_figure * 9 + i
                        try:
                            experiment = retention_time.columns[idx]
                        except IndexError:
                            break
                        ax.hist(retention_time.loc[:, experiment], bins=bins, density=True,
                                color=plot_colors[experiment])
                        ax.plot(b, h, color="black")
                        ax.set_title(experiment.replace("_", " "))
                        ax.set_xlabel("Retention time")
                        ax.set_ylabel("Density")

                    if n_figure == (n_figures - 1):
                        n_empty = n_figures * 9 - len(retention_time.columns)
                        for i in range(1, n_empty + 1):
                            axarr.flat[-i].remove()

                    fig.tight_layout(rect=[0, 0.03, 1, 0.95])

                    pdf.savefig(fig)
                    plt.close(fig)

            # retention time vs retention length individual
            if evidence is not None:
                self.logger.debug("Creating individual retention time vs retention length")
                for experiment in retention_length.columns:
                    fig, ax = hist2d_with_hist(title=experiment.replace("_", " "), xdata=retention_time[experiment],
                                               ydata=retention_length[experiment], xlabel="Retention time [min]",
                                               ylabel="Retention length [min]", max_x=retention_time.max().max())

                    pdf.savefig(figure=fig)
                    plt.close(fig)
            # ##########

            # total ion current vs retention length
            import matplotlib.ticker as ticker

            @ticker.FuncFormatter
            def scientific_formatter(x, pos):
                if x != 0:
                    return f"{x:.1E}"
                else:
                    return "0"

            if group_iter is not None:
                self.logger.debug("Creating MS scan and MSMS scan overview")
                for n_plot in range(int(np.ceil(len(group_iter) / 4))):
                    fig = plt.figure(figsize=(14, 7))
                    outer = fig.add_gridspec(2, 2, wspace=0.2, hspace=0.4)

                    for i in range(4):
                        inner = outer[i].subgridspec(2, 1, wspace=0.1, hspace=0.0)

                        group_counter = 4 * n_plot + i
                        try:
                            if msms_scans is not None:
                                group_name = list(msms_scan_groups.groups.keys())[group_counter]
                            elif ms_scans is not None:
                                group_name = list(ms_scan_groups.groups.keys())[group_counter]
                            else:
                                raise ValueError("Logic error")
                        except IndexError:
                            break

                        # msms plot
                        ax_msms: plt.Axes = plt.subplot(inner[1])
                        ax_msms.text(0.1, 0.9, 'MSMS', horizontalalignment='center',
                                     verticalalignment='center', transform=ax_msms.transAxes)
                        if msms_scans is not None:
                            df = msms_scan_groups.get_group(group_name)
                            ax_msms.plot(df["Retention time"], df["Total ion current"], color="black", linewidth=0.2)
                        ax_msms.yaxis.set_major_formatter(scientific_formatter)
                        ax_msms.set_xlabel("Retention time")
                        ax_msms.set_ylabel("Total ion current")
                        fig.add_subplot(ax_msms)

                        # ms plot
                        # get the axis with shared x axis
                        ax_ms: plt.Axes = plt.subplot(inner[0], sharex=ax_msms)
                        # add the text
                        ax_ms.text(0.1, 0.9, 'MS', horizontalalignment='center',
                                   verticalalignment='center', transform=ax_ms.transAxes)
                        # disable the axis ticks
                        ax_ms.tick_params(axis="x", which="both", bottom=False, labelbottom=False)
                        ax_ms.set_title(group_name)
                        if ms_scans is not None:
                            df = ms_scan_groups.get_group(group_name)
                            ax_ms.plot(df["Retention time"], df["Total ion current"], color="black", linewidth=0.2)
                        ax_ms.yaxis.set_major_formatter(scientific_formatter)
                        fig.add_subplot(ax_ms)

                    pdf.savefig()
                    plt.close(fig)

            self.logger.info("Done creating report")
Example #52
0
    def run(self,
            t_max,
            downsample=1,
            record_from_syns=False,
            record_from_iclamps=False,
            record_from_vclamps=False,
            record_from_channels=False,
            record_v_deriv=False,
            record_concentrations=[],
            pprint=False):
        """
        Run the NEURON simulation. Records at all locations stored
        under the name 'rec locs' on `self` (see `MorphTree.storeLocs()`)

        Parameters
        ----------
        t_max: float
            Duration of the simulation
        downsample: int (> 0)
            Records the state of the model every `downsample` time-steps
        record_from_syns: bool (default ``False``)
            Record currents of synapstic point processes (in `self.syns`).
            Accessible as `np.ndarray` in the output dict under key 'i_syn'
        record_from_iclamps: bool (default ``False``)
            Record currents of iclamps (in `self.iclamps`)
            Accessible as `np.ndarray` in the output dict under key 'i_clamp'
        record_from_vclamps: bool (default ``False``)
            Record currents of vclamps (in `self.vclamps`)
            Accessible as `np.ndarray` in the output dict under key 'i_vclamp'
        record_from_channels: bool (default ``False``)
            Record channel state variables from `neat` defined channels in `self`,
            at locations stored under 'rec locs'
            Accessible as `np.ndarray` in the output dict under key 'chan'
        record_v_deriv: bool (default ``False``)
            Record voltage derivative at locations stored under 'rec locs'
            Accessible as `np.ndarray` in the output dict under key 'dv_dt'
        record_from_concentrations: bool (default ``False``)
            Record ion concentration at locations stored under 'rec locs'
            Accessible as `np.ndarray` in the output dict with as key the ion's
            name

        Returns
        -------
        dict
            Dictionary with the results of the simulation. Contains time and
            voltage as `np.ndarray` at locations stored under the name '
            rec locs', respectively with keys 't' and 'v_m'. Also contains
            traces of other recorded variables if the option to record them was
            set to ``True``
        """
        assert isinstance(downsample, int) and downsample > 0
        # simulation time recorder
        res = {'t': h.Vector()}
        res['t'].record(h._ref_t)
        # voltage recorders
        res['v_m'] = []
        for loc in self.getLocs('rec locs'):
            res['v_m'].append(h.Vector())
            res['v_m'][-1].record(self.sections[loc['node']](loc['x'])._ref_v)
        # synapse current recorders
        if record_from_syns:
            res['i_syn'] = []
            for syn in self.syns:
                res['i_syn'].append(h.Vector())
                res['i_syn'][-1].record(syn._ref_i)
        # current clamp current recorders
        if record_from_iclamps:
            res['i_clamp'] = []
            for iclamp in self.iclamps:
                res['i_clamp'].append(h.Vector())
                res['i_clamp'][-1].record(iclamp._ref_i)
        # voltage clamp current recorders
        if record_from_vclamps:
            res['i_vclamp'] = []
            for vclamp in self.vclamps:
                res['i_vclamp'].append(h.Vector())
                res['i_vclamp'][-1].record(vclamp._ref_i)
        # channel state variable recordings
        if record_from_channels:
            res['chan'] = {}
            channel_names = self.getChannelsInTree()
            for channel_name in channel_names:
                res['chan'][channel_name] = {
                    var: []
                    for var in
                    self.channel_storage[channel_name].varnames.flatten()
                }
                for loc in self.getLocs('rec locs'):
                    for ind, varname in enumerate(
                            self.channel_storage[channel_name].varnames.
                            flatten()):
                        # assure xcoordinate is refering to proper neuron section (not endpoint)
                        xx = loc['x']
                        if xx < 1e-3: xx += 1e-3
                        elif xx > 1. - 1e-3: xx -= 1e-3
                        # create the recorder
                        try:
                            rec = h.Vector()
                            exec('rec.record(self.sections[loc[0]](xx).' +
                                 mechname[channel_name] + '._ref_var' +
                                 str(ind) + ')')
                            res['chan'][channel_name][varname].append(rec)
                        except AttributeError:
                            # the channel does not exist here
                            res['chan'][channel_name][varname].append([])
        if len(record_concentrations) > 0:
            for c_ion in record_concentrations:
                res[c_ion] = []
                for loc in self.getLocs('rec locs'):
                    res[c_ion].append(h.Vector())
                    exec(
                        'res[c_ion][-1].record(self.sections[loc[\'node\']](loc[\'x\'])._ref_'
                        + c_ion + 'i)')
        # record voltage derivative
        if record_v_deriv:
            res['dv_dt'] = []
            for ii, loc in enumerate(self.getLocs('rec locs')):
                res['dv_dt'].append(h.Vector())
                # res['dv_dt'][-1].deriv(res['v_m'][ii], self.dt)

        # initialize
        # neuron.celsius=37.
        h.finitialize(self.v_init)
        h.dt = self.dt

        # simulate
        if pprint:
            print('>>> Simulating the NEURON model for ' + str(t_max) +
                  ' ms. <<<')
        start = posix.times()[0]
        neuron.run(t_max + self.t_calibrate)
        stop = posix.times()[0]
        if pprint:
            print('>>> Elapsed time: ' + str(stop - start) + ' seconds. <<<')
        runtime = stop - start

        # compute derivative
        if 'dv_dt' in res:
            for ii, loc in enumerate(self.getLocs('rec locs')):
                res['dv_dt'][ii].deriv(res['v_m'][ii], h.dt, 2)
                res['dv_dt'][ii] = np.array(
                    res['dv_dt'][ii])[self.indstart:][::downsample]
            res['dv_dt'] = np.array(res['dv_dt'])
        # cast recordings into numpy arrays
        res['t'] = np.array(
            res['t'])[self.indstart:][::downsample] - self.t_calibrate
        for key in set(res.keys()) - {'t', 'chan', 'dv_dt'}:
            if key in res and len(res[key]) > 0:
                res[key] = np.array([np.array(reslist)[self.indstart:][::downsample] \
                                     for reslist in res[key]])
                if key in ('i_syn', 'i_clamp', 'i_vclamp'):
                    res[key] *= -1.
        # cast channel recordings into numpy arrays
        if 'chan' in res:
            for channel_name in channel_names:
                channel = self.channel_storage[channel_name]
                for ind0, varname in enumerate(channel.varnames.flatten()):
                    for ind1 in range(len(self.getLocs('rec locs'))):
                        res['chan'][channel_name][varname][ind1] = \
                                np.array(res['chan'][channel_name][varname][ind1])[self.indstart:][::downsample]
                        if len(res['chan'][channel_name][varname][ind1]) == 0:
                            res['chan'][channel_name][varname][
                                ind1] = np.zeros_like(res['t'])
                    res['chan'][channel_name][varname] = \
                            np.array(res['chan'][channel_name][varname])
                # compute P_open
                sv = np.zeros(
                    (channel.varnames.shape[0], channel.varnames.shape[1],
                     len(self.getLocs('rec locs')), len(res['t'])))
                for (ii, jj), varname in np.ndenumerate(channel.varnames):
                    sv[ii, jj, :, :] = res['chan'][channel_name][varname]
                res['chan'][channel_name]['p_open'] = channel.computePOpen(
                    res['v_m'], statevars=sv)

        return res
Example #53
0
 def generator():
     last_x = 0
     for i, x in np.ndenumerate(times):
         x = int(x)
         yield sig[last_x:x, ...]
         last_x = x
Example #54
0
    ew_true = table["EW_TRUE"]
    z = table["z_redshift_space_true"]

    ew_obs_rest = ew_obs / (1.0 + z)
    ew_true_rest = ew_true / (1.0 + z)

    # Loop over all of the columns,
    # computing the indices in each
    # set of bins
    indices_cat = []
    for col, bins in iteritems(bin_dict):
        param = table[col]
        indices_cat.append(digitize(param, bins=bins))

    # Loop over all possible indices of output cube
    for indices, junk in ndenumerate(zeros(shape[:-1])):

        # select sources in this n-dimensional bin
        indices_in = len(ew_obs_rest) * [True]
        for n, ind in enumerate(indices):
            indices_in = indices_in & (indices_cat[n] == (ind + 1))

        tew_obs_rest = ew_obs_rest[indices_in]
        hist_obs = histogram(tew_obs_rest, bins=ew_bins)[0]

        hist_cube[indices][:] += hist_obs

        # also include stuff below the range in this number
        nabove[indices] += len(tew_obs_rest[(tew_obs_rest > ew_max) |
                                            (tew_obs_rest < ew_min)])
    def build_tree(
        self,
        X,
        y,
        thresholds,
        entropy,
        distribution_cls,
        distribution,
        depth,
        max_depth,
        leaf,
    ):
        self.depth = depth
        best_splits = []
        best_distributions_cls = []
        best_distributions = []
        best_entropies = []

        if leaf is False and depth < max_depth:
            for (_, att), threshold in np.ndenumerate(thresholds):
                (
                    info_gain,
                    splits,
                    distributions_cls,
                    distributions,
                    entropies,
                ) = self.information_gain(X, y, att, threshold, entropy)

                if info_gain > self.best_gain:
                    self.best_split = att
                    self.best_threshold = threshold
                    self.best_gain = info_gain
                    self.best_margin = -1
                    best_splits = splits
                    best_distributions_cls = distributions_cls
                    best_distributions = distributions
                    best_entropies = entropies
                elif info_gain == self.best_gain and info_gain > 0.000001:
                    margin = self.margin_gain(X, att, threshold)
                    if self.best_margin == -1:
                        self.best_margin = self.margin_gain(
                            X, self.best_split, self.best_threshold)

                    if margin > self.best_margin or (
                            margin == self.best_margin
                            and self.random_state.choice([True, False])):
                        self.best_split = att
                        self.best_threshold = threshold
                        self.best_margin = margin
                        best_splits = splits
                        best_distributions_cls = distributions_cls
                        best_distributions = distributions
                        best_entropies = entropies

        if self.best_split > -1:
            self.children = [None, None, None]

            if sum(best_splits[0]) > 0:
                self.children[0] = TreeNode(random_state=self.random_state)
                self.children[0].build_tree(
                    X[best_splits[0]],
                    y[best_splits[0]],
                    thresholds,
                    best_entropies[0],
                    best_distributions_cls[0],
                    best_distributions[0],
                    depth + 1,
                    max_depth,
                    len(best_distributions[0]) == 1,
                )
            else:
                self.children[0] = TreeNode(random_state=self.random_state)
                self.children[0].build_tree(
                    X,
                    y,
                    thresholds,
                    entropy,
                    distribution_cls,
                    distribution,
                    depth + 1,
                    max_depth,
                    True,
                )

            if sum(best_splits[1]) > 0:
                self.children[1] = TreeNode(random_state=self.random_state)
                self.children[1].build_tree(
                    X[best_splits[1]],
                    y[best_splits[1]],
                    thresholds,
                    best_entropies[1],
                    best_distributions_cls[1],
                    best_distributions[1],
                    depth + 1,
                    max_depth,
                    len(best_distributions[1]) == 1,
                )
            else:
                self.children[1] = TreeNode(random_state=self.random_state)
                self.children[1].build_tree(
                    X,
                    y,
                    thresholds,
                    entropy,
                    distribution_cls,
                    distribution,
                    depth + 1,
                    max_depth,
                    True,
                )

            if sum(best_splits[2]) > 0:
                self.children[2] = TreeNode(random_state=self.random_state)
                self.children[2].build_tree(
                    X[best_splits[2]],
                    y[best_splits[2]],
                    thresholds,
                    best_entropies[2],
                    best_distributions_cls[2],
                    best_distributions[2],
                    depth + 1,
                    max_depth,
                    len(best_distributions[2]) == 1,
                )
            else:
                self.children[2] = TreeNode(random_state=self.random_state)
                self.children[2].build_tree(
                    X,
                    y,
                    thresholds,
                    entropy,
                    distribution_cls,
                    distribution,
                    depth + 1,
                    max_depth,
                    True,
                )
        else:
            self.leaf_distribution_cls = list(distribution_cls)
            self.leaf_distribution = list(distribution / distribution.sum())

        return self
Example #56
0
def _NumpySub(ref, indices, updates):
    for i, indx in np.ndenumerate(indices):
        ref[indx] -= updates[i]
Example #57
0
def _NumpyMax(ref, indices, updates):
    for i, indx in np.ndenumerate(indices):
        ref[indx] = np.maximum(ref[indx], updates[i])
    def visualise(self):
        """
		Visualises the result of analysing the GridworldV2.

		"""
        # Obtain the current working directory.
        curr_dir = os.path.dirname(os.path.abspath(__file__))

        # Creates the required diagram and assigns a title to it, which includes
        # the number of iterations performed as part of the specified method.
        fig, (ax1, ax2) = plt.subplots(1,
                                       2,
                                       figsize=(12, 6),
                                       constrained_layout=True)
        graph_title = ' '.join(
            [substr.title() for substr in self.solve_method.split('_')])
        fig.suptitle('GridworldV2 %s Results: %i Iterations' %
                     (graph_title, self.current_iter),
                     fontsize=30)

        # Disable the display of every axes of every subplot.
        ax1.set_axis_off()
        ax2.set_axis_off()

        # Creates a table, representing a grid, for every subplot.
        tb1 = Table(ax1, bbox=[0, 0, 1, 1])
        tb2 = Table(ax2, bbox=[0, 0, 1, 1])

        # Computes the height and width of each cell of the grid during
        # visualisation.
        height, width = 1.0 / self.size[0], 1.0 / self.size[1]

        # Loops across all possible states of the GridworldV2.
        for (j, i), val in np.ndenumerate(self.v):
            # Creates a cell in every table for each state, with the cell in the
            # leftmost table containing the approximate final value estimate and
            # the cells in the other tables being empty.
            if [j, i] in self.terminal_coords_list:
                facecolor = 'gray'
            else:
                facecolor = 'none'
            tb1.add_cell(j,
                         i,
                         width,
                         height,
                         text=val,
                         loc='center',
                         facecolor='none')
            tb2.add_cell(j,
                         i,
                         width,
                         height,
                         loc='center',
                         facecolor=facecolor)

            # Loops across all possible actions of the current state.
            for (a, ), pol in np.ndenumerate(self.pi[j, i]):
                # Adds the action to the rightmost table if the action is
                # part of the final policy. The action is shown in the form
                # of an arrow.
                if pol:
                    # Finds the corresponding coordinate change of the current
                    # action.
                    current_action = self.actions_list[a]

                    # Adds the arrow to the rightmost table.
                    ax2.arrow(width * (i + 0.5),
                              1 - height * (j + 0.5),
                              0.25 * current_action[1] * width,
                              -0.25 * current_action[0] * height,
                              head_width=0.1 * width,
                              head_length=0.1 * height,
                              color='black')

        # Adds the row indexes for each of the tables.
        for j in range(self.size[0]):
            tb1.add_cell(j,
                         -1,
                         width / 2,
                         height,
                         text=j,
                         loc='right',
                         edgecolor='none',
                         facecolor='none')
            tb2.add_cell(j,
                         -1,
                         width / 2,
                         height,
                         text=j,
                         loc='right',
                         edgecolor='none',
                         facecolor='none')

        # Adds the column indexes for each of the tables.
        for i in range(self.size[1]):
            tb1.add_cell(-1,
                         i,
                         width,
                         height / 4,
                         text=i,
                         loc='center',
                         edgecolor='none',
                         facecolor='none')
            tb2.add_cell(-1,
                         i,
                         width,
                         height / 4,
                         text=i,
                         loc='center',
                         edgecolor='none',
                         facecolor='none')

        # Adds the completed tables to their respective subplots.
        ax1.add_table(tb1)
        ax2.add_table(tb2)

        # Sets the titles for all three subplots.
        title_height = 1.05
        title_size = 15
        ax1.set_title('Optimum Value', y=title_height, size=title_size)
        ax2.set_title('Optimum Path', y=title_height, size=title_size)

        # Saves the plotted diagram with the name of the selected method.
        plt.savefig(
            os.path.join(
                curr_dir, 'gridworldv2_%s_%s_results.png' %
                (self.name, self.solve_method)))
        plt.close()
Example #59
0
def _NumpyDiv(ref, indices, updates):
    for i, indx in np.ndenumerate(indices):
        ref[indx] /= updates[i]
Example #60
0
def _NumpyMaxScalar(ref, indices, update):
    for _, indx in np.ndenumerate(indices):
        ref[indx] = np.maximum(ref[indx], update)