def featureInfoGain(X,y,feat):
    """ 
    Calculate maximum information gain w.r.t. the feature which is specified in column feat of the 2-dimensional array X.
    """
    EntWithoutSplit=getEntropy(y)
    feature=X[:,feat]
    length=len(feature)
    valueList=list(set(feature))
    splits=np.diff(valueList)/2.0+valueList[:-1]
    maxGain=0
    bestSplit=0
    bestPart1=[]
    bestPart2=[]
    for split in splits:
        Part1idx=np.argwhere(feature<=split)
        Part2idx=np.argwhere(feature>split)
        E1=getEntropy(y[Part1idx[:,0]])
        l1=len(Part1idx)
        E2=getEntropy(y[Part2idx[:,0]])
        l2=len(Part2idx)
        Gain=EntWithoutSplit-(l1*1.0/length*E1+l2*1.0/length*E2)
        if Gain > maxGain:
            maxGain=Gain
            bestSplit=split
            bestPart1=Part1idx
            bestPart2=Part2idx
    return maxGain#,bestSplit,bestPart1,bestPart2  
    def save_segmented_image(self, filepath_image, modality='t1c', show=False):
        '''
        Creates an image of original brain with segmentation overlay and save it in ./predictions
        INPUT   (1) str 'filepath_image': filepath to test image for segmentation, including file extension
                (2) str 'modality': imaging modality to use as background. defaults to t1c. options: (flair, t1, t1c, t2)
                (3) bool 'show': If true, shows output image. defaults to False.
        OUTPUT  (1) if show is True, shows image of segmentation results
                (2) if show is false, returns segmented image.
        '''
        modes = {'flair': 0, 't1': 1, 't1c': 2, 't2': 3}

        segmentation = self.predict_image(filepath_image, show=False)
        print 'segmentation = ' + str(segmentation)
        img_mask = np.pad(segmentation, (16, 16), mode='edge')
        ones = np.argwhere(img_mask == 1)
        twos = np.argwhere(img_mask == 2)
        threes = np.argwhere(img_mask == 3)
        fours = np.argwhere(img_mask == 4)

        test_im = io.imread(filepath_image)
        test_back = test_im.reshape(5, 216, 160)[modes[modality]]
        # overlay = mark_boundaries(test_back, img_mask)
        gray_img = img_as_float(test_back)

        # adjust gamma of image
        image = adjust_gamma(color.gray2rgb(gray_img), 0.65)
        sliced_image = image.copy()
        red_multiplier = [1, 0.2, 0.2]
        yellow_multiplier = [1, 1, 0.25]
        green_multiplier = [0.35, 0.75, 0.25]
        blue_multiplier = [0, 0.25, 0.9]

        print str(len(ones))
        print str(len(twos))
        print str(len(threes))
        print str(len(fours))

        # change colors of segmented classes
        for i in xrange(len(ones)):
            sliced_image[ones[i][0]][ones[i][1]] = red_multiplier
        for i in xrange(len(twos)):
            sliced_image[twos[i][0]][twos[i][1]] = green_multiplier
        for i in xrange(len(threes)):
            sliced_image[threes[i][0]][threes[i][1]] = blue_multiplier
        for i in xrange(len(fours)):
            sliced_image[fours[i][0]][fours[i][1]] = yellow_multiplier
        #if show=True show the prediction
        if show:
            print 'Showing...'
            io.imshow(sliced_image)
            plt.show()
        #save the prediction
        print 'Saving...'
        try:
            mkdir_p('./predictions/')
            io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
            print 'prediction saved.'
        except:
            io.imsave('./predictions/' + os.path.basename(filepath_image) + '.png', sliced_image)
            print 'prediction saved.'
Example #3
0
def sar(el2no):
    """
    extract spatial difference matrix on the neighbores of each element
    in 2D fem using triangular mesh.

    Parameters
    ----------
    el2no : NDArray
        triangle structures

    Returns
    -------
    NDArray
        SAR matrix
    """
    ne = el2no.shape[0]
    L = np.eye(ne)
    for i in range(ne):
        ei = el2no[i, :]
        #
        i0 = np.argwhere(el2no == ei[0])[:, 0]
        i1 = np.argwhere(el2no == ei[1])[:, 0]
        i2 = np.argwhere(el2no == ei[2])[:, 0]
        idx = np.unique(np.hstack([i0, i1, i2]))
        # build row-i
        for j in idx:
            L[i, j] = -1
        nn = idx.size - 1
        L[i, i] = nn
    return L
def spike_find(input_array, t, max_spike_width):
    """
    Find the spikes in the input_array.
    Inputs:
        input_array              : a numpy array (1-dimensional) holding 
                                   floats.
        t                        : threshold for spike detection
        max_spike_width          : crossings further apart than this will 
                                   disqualify the spike
    Returns:
        spikes                   : a numpy array (1-dimensional) holding
                                   integers (spike index values)
    """
    crossings = fast_thresh_detect(input_array, threshold=t)
    spikes = []
    if len(crossings) > 1:
        if t > 0.0:
            # find first positive crossing then pair up crossings
            first_p = numpy.argwhere(input_array[crossings] < t)[0]
            for p, n in itertools.izip(crossings[first_p::2], crossings[first_p + 1 :: 2]):
                if abs(p - n) <= max_spike_width:
                    peak_index = numpy.argsort(input_array[p : n + 1])[-1] + p
                    spikes.append(peak_index)
        else:
            # find first negative crossing then pair up crossings
            first_n = numpy.argwhere(input_array[crossings] > t)[0]
            for n, p in itertools.izip(crossings[first_n::2], crossings[first_n + 1 :: 2]):
                if abs(p - n) <= max_spike_width:
                    peak_index = numpy.argsort(input_array[n : p + 1])[0] + n
                    spikes.append(peak_index)
    return numpy.array(spikes)
def crop_data(bg, overlay):
    '''
    Crop the data to get ride of large amounts of black space surrounding the
    background image.
    '''
    #---------------------------------------------------------------
    # First find all the slices that contain data you want
    slices_list_x = list(np.argwhere(np.sum(bg, (1,2)) != 0)[:,0])
    slices_list_y = list(np.argwhere(np.sum(bg, (0,2)) != 0)[:,0])
    slices_list_z = list(np.argwhere(np.sum(bg, (0,1)) != 0)[:,0])

    slices_list = [slices_list_x, slices_list_y, slices_list_z]
    
    #---------------------------------------------------------------
    # Make a copy of the data
    bg_cropped = np.copy(bg)
    overlay_cropped = np.copy(overlay)
    
    #---------------------------------------------------------------
    # Remove all slices that have no data in the background image
    bg_cropped = bg_cropped[ slices_list_x, :, : ]
    overlay_cropped = overlay_cropped[ slices_list_x, :, : ]
    
    bg_cropped = bg_cropped[ :, slices_list_y, : ]
    overlay_cropped = overlay_cropped[ :, slices_list_y, : ]
    
    bg_cropped = bg_cropped[ :, :, slices_list_z ]
    overlay_cropped = overlay_cropped[ :, :, slices_list_z ]
        
    return bg_cropped, overlay_cropped, slices_list
		def voigtking(v,a):
			oneonsqrtpi=0.56418958354775630
			h0 = np.array([ 1.0e0, 0.9975031223974601240368798e0, 0.9900498337491680535739060e0, 0.9777512371933363639286036e0, 0.9607894391523232094392107e0, 0.9394130628134757861197108e0, 0.9139311852712281867473535e0, 0.8847059049434835594929548e0, 0.8521437889662113384563470e0, 0.8166864825981108401538061e0, 0.7788007830714048682451703e0, 0.7389684882589442416058206e0, 0.6976763260710310572091293e0, 0.6554062543268405127576690e0, 0.6126263941844160689885800e0, 0.5697828247309230097666297e0, 0.5272924240430485572436946e0, 0.4855368951540794399916001e0, 0.4448580662229411344814454e0, 0.4055545050633205516443034e0, 0.3678794411714423215955238e0, 0.3320399453446606420249195e0, 0.2981972794298873779316010e0, 0.2664682978135241116965901e0, 0.2369277586821217567233665e0, 0.2096113871510978225241101e0, 0.1845195239929892676298138e0, 0.1616211924653392539324509e0, 0.1408584209210449961479715e0, 0.1221506695399900084151679e0, 0.1053992245618643367832177e0, 0.9049144166369591062935159e-1, 0.7730474044329974599046566e-1, 0.6571027322750286139200605e-1, 0.5557621261148306865356766e-1, 0.4677062238395898365276137e-1, 0.3916389509898707373977109e-1, 0.3263075599289603180381419e-1, 0.2705184686635041108596167e-1, 0.2231491477696640649487920e-1, 0.1831563888873418029371802e-1, 0.1495813470057748930092482e-1, 0.1215517832991493721502629e-1, 0.9828194835379685936011149e-2, 0.7907054051593440493635646e-2, 0.6329715427485746576865117e-2, 0.5041760259690979102410257e-2, 0.3995845830084632413030896e-2, 0.3151111598444440557819106e-2, 0.2472563035874193226953048e-2, 0.1930454136227709242213512e-2, 0.1499685289329846120368399e-2, 0.1159229173904591150012118e-2, 0.8915937199952195568639939e-3, 0.6823280527563766163014506e-3, 0.5195746821548384817648154e-3, 0.3936690406550782109805393e-3, 0.2967857677932108344855019e-3, 0.2226298569188890101840659e-3, 0.1661698666072774484528398e-3, 0.1234098040866795494976367e-3, 0.9119595636226606575873788e-4, 0.6705482430281108867614262e-4, 0.4905835745620769579106241e-4, 0.3571284964163521691234528e-4, 0.2586810022265412127035909e-4, 0.1864374233151683041526522e-4, 0.1336996212084380475632834e-4, 0.9540162873079234841590110e-5, 0.6773449997703748098370991e-5, 0.4785117392129009089609771e-5, 0.3363595724825637829225185e-5, 0.2352575200009772922652510e-5, 0.1637237807196195233271403e-5, 0.1133727138747965652009438e-5, 0.7811489408304490795473004e-6, 0.5355347802793106157479094e-6, 0.3653171341207511214363159e-6, 0.2479596018045029629499234e-6, 0.1674635703137489046698250e-6, 0.1125351747192591145137752e-6, 0.7524623257644829651017174e-7, 0.5006218020767042215644986e-7, 0.3314082270898834287088712e-7, 0.2182957795125479209083827e-7, 0.1430724191856768833467676e-7, 0.9330287574504991120387842e-8, 0.6054282282484886644264747e-8, 0.3908938434264861859681131e-8, 0.2511212833271291589987176e-8, 0.1605228055185611608653934e-8, 0.1020982947159334870301705e-8, 0.6461431773106108989429857e-9, 0.4068811450655793356678124e-9, 0.2549381880391968872012880e-9, 0.1589391009451636652873474e-9, 0.9859505575991508240729766e-10, 0.6085665105518337082108266e-10, 0.3737571327944262032923964e-10, 0.2284017657993705413027994e-10, 0.1388794386496402059466176e-10, 0.8402431396484308187150245e-11, 0.5058252742843793235026422e-11, 0.3029874246723653849216172e-11, 0.1805831437513215621913785e-11, 0.1070923238250807645586450e-11, 0.6319285885175366663984108e-12, 0.3710275783094727281418983e-12, 0.2167568882618961942307398e-12, 0.1259993054847742150188394e-12, 0.7287724095819692419343177e-13, 0.4194152536192217185131208e-13, 0.2401734781620959445230543e-13, 0.1368467228126496785536523e-13, 0.7758402075696070467242451e-14, 0.4376618502870849893821267e-14, 0.2456595368792144453705261e-14, 0.1372009419645128473380053e-14, 0.7624459905389739760616425e-15, 0.4215893238174252040735029e-15, 0.2319522830243569388312264e-15, 0.1269802641377875575018264e-15, 0.6916753975541448863883054e-16, 0.3748840457745443581785685e-16, 0.2021715848695342027119482e-16, 0.1084855264042937802512215e-16, 0.5792312885394857923477507e-17, 0.3077235638152508657901574e-17, 0.1626664621453244338034305e-17, 0.8555862896902856300749061e-18, 0.4477732441718301199042103e-18, 0.2331744656246116743545942e-18, 0.1208182019899973571654094e-18, 0.6228913128535643653088166e-19, 0.3195366717748344275120932e-19, 0.1631013922670185678641901e-19, 0.8283677007682876110228791e-20, 0.4186173006145967657832773e-20, 0.2104939978339734445589080e-20, 0.1053151347744013743766989e-20, 0.5242885663363463937171805e-21, 0.2597039249246848208769072e-21, 0.1280015319051641983953037e-21, 0.6277407889747195099574399e-22, 0.3063190864577440373821128e-22, 0.1487292181651270619154227e-22, 0.7185335635902193010046941e-23, 0.3454031957013868448981675e-23, 0.1652091782314268593068387e-23, 0.7862678502984538622254116e-24, 0.3723363121750510429289070e-24, 0.1754400713566556605465117e-24, 0.8225280651606668501925640e-25, 0.3837082905344536379879530e-25, 0.1781066634757091357021587e-25, 0.8225980595143903024275237e-26, 0.3780277844776084635218009e-26, 0.1728575244037268289032505e-26, 0.7864685935766448441713277e-27, 0.3560434556451067378310069e-27, 0.1603810890548637852976087e-27, 0.7188393394953158727447087e-28, 0.3205819323394999444158648e-28, 0.1422573701362478490703169e-28, 0.6281148147605989215436687e-29, 0.2759509067522042024589005e-29, 0.1206293927781149203841840e-29, 0.5246902396795390138796640e-30, 0.2270812922026396509517690e-30, 0.9778860615814667663870901e-31, 0.4190093194494397377123780e-31, 0.1786436718517518413888050e-31, 0.7578445267618382646037748e-32, 0.3198903416725805416294188e-32, 0.1343540197758737662452134e-32, 0.5614728092387934579799402e-33, 0.2334722783487267408869808e-33, 0.9659851300583384710233199e-34, 0.3976803097901655265751816e-34, 0.1629019426220514693169818e-34, 0.6639677199580734400702255e-35, 0.2692751000456178970430831e-35, 0.1086610640745980532852592e-35, 0.4362950029268711046345153e-36, 0.1743070896645292498913954e-36, 0.6929124938815710000577778e-37, 0.2740755284722598699701951e-37, 0.1078675105373929991550997e-37, 0.4224152406206200437573993e-38, 0.1645951484063258284098658e-38, 0.6381503448060790393554118e-39, 0.2461826907787885454919214e-39, 0.9449754976491185028813549e-40, 0.3609209642415355020302235e-40, 0.1371614910949353618952282e-40, 0.5186576811908572940413120e-41, 0.1951452380295377748121319e-41, 0.7305730197111493885868359e-42, 0.2721434140093713884466599e-42, 0.1008696596314342558322441e-42, 0.3720075976020835962959696e-43, 0.1365122395620087240477630e-43 ], dtype=np.float64)
			h1 = np.array([ -1.128379167095512573896159e0, -1.122746665023313894112994e0, -1.105961434222613497822717e0, -1.078356949458362356972974e0, -1.040477963566390226869037e0, -0.9930644092865188274925694e0, -0.9370297574325730524254160e0, -0.8734346738611667009559691e0, -0.8034569860177944012914767e0, -0.7283590897795191457635390e0, -0.6494539941944691013512214e0, -0.5680712138345335512208471e0, -0.4855236771153186839197872e0, -0.4030767281964792012404736e0, -0.3219201665209207840831093e0, -0.2431441002236951675148354e0, -0.1677191974661332963609891e0, -0.9648171389061105293546881e-1, -0.3012346558870770535102483e-1, 0.3081328457047809980986685e-1, 0.8593624458727488433391777e-1, 0.1349991935349749351748713e0, 0.1778942744880748462232135e0, 0.2146410885736963723412265e0, 0.2453732617833523433216744e0, 0.2703231847626659615037426e0, 0.2898056218155761132507312e0, 0.3042008523837261147222841e0, 0.3139379509747736418513567e0, 0.3194787353320834397089635e0, 0.3213028233267945998845488e0, 0.3198941423604233541674753e0, 0.3157291364070343763776039e0, 0.3092668200208504802085382e0, 0.3009407397271468294117335e0, 0.2911528243392948676821857e0, 0.2802690390913659378360681e0, 0.2686167052981096351368975e0, 0.2564833079412283848897372e0, 0.2441165877658165024921633e0, 0.2317257011687522312257119e0, 0.2194832289213470945135105e0, 0.2075278218310246553881156e0, 0.1959672858880207128215797e0, 0.1848819293094190730287360e0, 0.1743280173110208640535652e0, 0.1643412057011470302647273e0, 0.1549398500207542791790132e0, 0.1461281117364874603340094e0, 0.1378988059908943461128856e0, 0.1302359559637753421977543e0, 0.1231170365911391556632533e0, 0.1165149050377156668055896e0, 0.1103994269264874144398788e0, 0.1047388160423518894772002e0, 0.9950071130235648759030670e-1, 0.9465301854781620910441970e-1, 0.9016454652735125189272609e-1, 0.8600546667768981700419079e-1, 0.8214762533231104047151097e-1, 0.7856473513008974607178765e-1, 0.7523246995193424459351750e-1, 0.7212848493340500348466924e-1, 0.6923238018945846374255513e-1, 0.6652562400245432725286132e-1, 0.6399144848312167544450556e-1, 0.6161472819590847810012464e-1, 0.5938184999317344054777048e-1, 0.5728058034957269600588669e-1, 0.5529993483145627029203620e-1, 0.5343005296426139233134751e-1, 0.5166208065197234887486323e-1, 0.4998806142885727821214551e-1, 0.4840083715410895783485349e-1, 0.4689395826338997495993764e-1, 0.4546160333748704598916335e-1, 0.4409850750954268216573793e-1, 0.4279989908392569899980027e-1, 0.4156144366035708515282858e-1, 0.4037919502845779134315796e-1, 0.3924955210570969222557380e-1, 0.3816922122416471946490538e-1, 0.3713518311895684989765586e-1, 0.3614466402785612590311943e-1, 0.3519511037069617482332004e-1, 0.3428416653694949866994660e-1, 0.3340965536664229903158673e-1, 0.3256956096272257612903376e-1, 0.3176201352112533673779090e-1, 0.3098527590780517228496903e-1, 0.3023773174995156695256252e-1, 0.2951787484170619418302355e-1, 0.2882429969333463230632146e-1, 0.2815569307740452259166926e-1, 0.2751082644654734935368337e-1, 0.2688854911528297388431485e-1, 0.2628778211358937241904422e-1, 0.2570751263279204975253415e-1, 0.2514678899527364475073049e-1, 0.2460471608876676259183765e-1, 0.2408045121385331090696902e-1, 0.2357320029997478838776359e-1, 0.2308221445094914570064896e-1, 0.2260678678585010840991674e-1, 0.2214624954526743636682309e-1, 0.2169997143654264861646818e-1, 0.2126735519465680897241377e-1, 0.2084783533811200664569883e-1, 0.2044087610146017752978434e-1, 0.2004596952814515567227767e-1, 0.1966263370908071277476715e-1, 0.1929041115392591487587378e-1, 0.1892886728337045173071115e-1, 0.1857758903193275942486415e-1, 0.1823618355182474294515453e-1, 0.1790427700936730343669473e-1, 0.1758151346626646308038721e-1, 0.1726755383879409857500321e-1, 0.1696207492857163038741910e-1, 0.1666476851923932358834102e-1, 0.1637534053381661837450139e-1, 0.1609351024802744708797459e-1, 0.1581900955528515170398058e-1, 0.1555158227940989996039230e-1, 0.1529098353149220739767610e-1, 0.1503697910762349625920090e-1, 0.1478934492449222808347731e-1, 0.1454786649009525295887101e-1, 0.1431233840704145462214254e-1, 0.1408256390613103046576229e-1, 0.1385835440808103075999097e-1, 0.1363952911143803959964144e-1, 0.1342591460487383719630737e-1, 0.1321734450220107129175951e-1, 0.1301365909857474699723209e-1, 0.1281470504646293252049926e-1, 0.1262033505007755515762735e-1, 0.1243040757705449418533892e-1, 0.1224478658626222948827240e-1, 0.1206334127070085131071308e-1, 0.1188594581452897199141430e-1, 0.1171247916332562864755594e-1, 0.1154282480675818732553606e-1, 0.1137687057288605896976939e-1, 0.1121450843338417065773542e-1, 0.1105563431902001242285305e-1, 0.1090014794476407143162512e-1, 0.1074795264395590657657700e-1, 0.1059895521098731117021612e-1, 0.1045306575200023435008377e-1, 0.1031019754313063242129945e-1, 0.1017026689586042607609242e-1, 0.1003319302906845397201302e-1, 0.9898897947397924639729408e-2, 0.9767306325582547468180475e-2, 0.9638345398396424782187982e-2, 0.9511944855914052317394595e-2, 0.9388036743786533882143785e-2, 0.9266555368258485665416943e-2, 0.9147437205667194364984339e-2, 0.9030620816181499749829423e-2, 0.8916046761552686783940876e-2, 0.8803657526663477808232965e-2, 0.8693397444674087410976982e-2, 0.8585212625576311168220303e-2, 0.8479050887977828363904268e-2, 0.8374861693949366877024963e-2, 0.8272596086777159693185345e-2, 0.8172206631472266686907249e-2, 0.8073647357896888215194357e-2, 0.7976873706375800846399120e-2, 0.7881842475668539112571351e-2, 0.7788511773184966394916599e-2, 0.7696840967333456047851643e-2, 0.7606790641897071224649652e-2, 0.7518322552338916854888971e-2, 0.7431399583943265980411531e-2, 0.7345985711704159367477213e-2, 0.7262045961877964368036759e-2, 0.7179546375120877141317720e-2, 0.7098453971136580788416864e-2, 0.7018736714763248519923831e-2, 0.6940363483432822204243367e-2, 0.6863304035939017881037086e-2, 0.6787528982453825324020280e-2, 0.6713009755735391745310971e-2, 0.6639718583473122562606414e-2, 0.6567628461718606252976457e-2, 0.6496713129353586350126915e-2, 0.6426947043548671526978323e-2, 0.6358305356168803683625031e-2, 0.6290763891083702643557758e-2, 0.6224299122343582476647260e-2, 0.6158888153182396103862750e-2, 0.6094508695812718682782931e-2, 0.6031139051978132847456608e-2, 0.5968758094230636272231571e-2, 0.5907345247902159938278185e-2, 0.5846880473740769223255677e-2, 0.5787344251183524483318654e-2, 0.5728717562239307805652498e-2, 0.5670981875956182433959706e-2 ], dtype=np.float64)
			h2 = np.array([ 1.0e0, 0.9925156067854728234166954e0, 0.9702488370741846925024279e0, 0.9337524315196362275518164e0, 0.8839262840201373526840738e0, 0.8219864299617913128547470e0, 0.7494235719224071131328299e0, 0.6679529582323300874171809e0, 0.5794577764970237101503160e0, 0.4859284571458759498915146e0, 0.3894003915357024341225852e0, 0.2918925528622829754342991e0, 0.1953493712998886960185562e0, 0.1015879694206602794774387e0, 0.1225252788368832137977160e-1, -0.7122285309136537622082871e-1, -0.1476418787320535960282345e0, -0.2160639183435653507962620e0, -0.2758120010582235033784961e0, -0.3264713765759730440736642e0, -0.3678794411714423215955238e0, -0.4001081341403160736400280e0, -0.4234401367904400766628734e0, -0.4383403499032471637408907e0, -0.4454241863223889026399290e0, -0.4454241976960828728637340e0, -0.4391564671033144569589568e0, -0.4274880540708223266513326e0, -0.4113065890894513887520768e0, -0.3914928958756679769706131e0, -0.3688972859665251787412620e0, -0.3443199355303629399446828e0, -0.3184955306263949534807185e0, -0.2920821644962502188874669e0, -0.2656542962828890681640534e0, -0.2396994397177897912204020e0, -0.2146181451424491640939456e0, -0.1907267687784773058932939e0, -0.1682624875086995569546816e0, -0.1473900121018631148986771e0, -0.1282094722211392620560261e0, -0.1107649874577763082733483e0, -0.9505349453993480902150559e-1, -0.8103346641770551054241192e-1, -0.6863322916783106348475741e-1, -0.5775865327580743751389419e-1, -0.4830006328783957980109026e-1, -0.4013827136320013258889535e-1, -0.3314969401563551466825700e-1, -0.2721055620979549646261829e-1, -0.2220022256661865628545539e-1, -0.1800372189840480267502263e-1, -0.1451354925728548119815172e-1, -0.1163084007733763911929080e-1, -0.9266014956431594449373699e-2, -0.7338992385437093554928018e-2, -0.5779061516816548137194317e-2, -0.4524499030007499171731476e-2, -0.3522004336456824141111923e-2, -0.2726016661692386541868837e-2, -0.2097966669473552341459824e-2, -0.1605504811757694087682580e-2, -0.1221738898797218035679319e-2, -0.9245047462622340271825711e-3, -0.6956863110190540254524861e-3, -0.5205955169809141905659767e-3, -0.3874169656489197360292113e-3, -0.2867188376814953929994613e-3, -0.2110284027525126746959732e-3, -0.1544685271976339753833504e-3, -0.1124502587150317136058296e-3, -0.8141583451940456365639560e-4, -0.5862617398424354123250055e-4, -0.4198696356554642675724513e-4, -0.2990772192017133390000897e-4, -0.2118866502002593128272052e-4, -0.1493070967418717996705171e-4, -0.1046450930688891587354327e-4, -0.7294971485088477169986746e-5, -0.5058237141326785665552064e-5, -0.3488590416297032549927031e-5, -0.2393206427093938070506012e-5, -0.1633028318374209170743394e-5, -0.1108394815502115127316820e-5, -0.7483179321690142728739359e-6, -0.5025418723896900527555212e-6, -0.3357037469306895805115546e-6, -0.2230700306981556484079346e-6, -0.1474451577404705893471723e-6, -0.9694537142843821183145493e-7, -0.6340650817983165854183039e-7, -0.4125281597997292543454039e-7, -0.2669863608647444234432417e-7, -0.1718869397329539903528673e-7, -0.1100823095953252158935162e-7, -0.7013187829205346730804204e-8, -0.4444665113656971914920979e-8, -0.2802144497835918309456751e-8, -0.1757406038399392007880848e-8, -0.1096442676719878283524089e-8, -0.6805092493832370091384262e-9, -0.4201635819811978308984480e-9, -0.2580720549398903308510481e-9, -0.1576898051707325645824557e-9, -0.9585353270320148521118371e-10, -0.5796372027032496381736661e-10, -0.3486981951439767325186431e-10, -0.2086844614201629359434107e-10, -0.1242450483517188985330601e-10, -0.7358989436838238028175315e-11, -0.4336195837012716989509190e-11, -0.2541866144559293225048769e-11, -0.1482350707216456169596291e-11, -0.8600132295160969048704279e-12, -0.4963825648030345884941720e-12, -0.2850272799994640993351100e-12, -0.1628231410435433343915847e-12, -0.9253517530796568988711767e-13, -0.5231904387078439423734991e-13, -0.2942904274907536637035087e-13, -0.1646861209472934265701707e-13, -0.9168609972068950589419375e-14, -0.5078280768842531755862938e-14, -0.2798321959684086361623925e-14, -0.1534077985990025530178263e-14, -0.8366946223931157801875458e-15, -0.4540014839572489640421670e-15, -0.2450864324006565520585709e-15, -0.1316297011679965318337360e-15, -0.7033347094398993022030766e-16, -0.3738906588834781501200156e-16, -0.1977436055729519304364136e-16, -0.1040486355537857239908506e-16, -0.5446873085247993592442947e-17, -0.2836846572016980047452363e-17, -0.1469951297806504842876013e-17, -0.7577907726628295065637298e-18, -0.3886652327556223671914838e-18, -0.1983274447591697794634031e-18, -0.1006865346010664339728430e-18, -0.5085599093462560019056651e-19, -0.2555616473221360979839205e-19, -0.1277711291477349028381922e-19, -0.6355561617974547678564100e-20, -0.3145284379748115775839534e-20, -0.1548642984144385532194339e-20, -0.7586277364385535380007560e-21, -0.3697368508385495481212434e-21, -0.1792850002167444277197814e-21, -0.8649339487208141711410640e-22, -0.4151549880751819128657313e-22, -0.1982560526365887292005855e-22, -0.9419591402219956768405243e-23, -0.4452742857507067242031201e-23, -0.2094178149147388017585982e-23, -0.9799199383965174477667876e-24, -0.4562039303075778937781093e-24, -0.2113096807073358619927786e-24, -0.9738054125666016460529380e-25, -0.4464962955517461045769742e-25, -0.2036839830996770073279630e-25, -0.9244633325579509781433326e-26, -0.4174617922924968276183391e-26, -0.1875592296561359766067593e-26, -0.8384076547424474404764890e-27, -0.3728786627489159285725893e-27, -0.1649968834419055881014869e-27, -0.7264074023243377877657008e-28, -0.3181863066343386789136187e-28, -0.1386691329625598948075213e-28, -0.6012783734099460236172624e-29, -0.2593995437123362612886143e-29, -0.1113425178718492778355866e-29, -0.4755009983792073461050496e-30, -0.2020415749389589696795519e-30, -0.8541405110545145479519840e-31, -0.3592671419230207088768861e-31, -0.1503507555679300913224246e-31, -0.6260283436716785719346509e-32, -0.2593480377514370417261009e-32, -0.1068988029132498238513063e-32, -0.4383933266292682172809914e-33, -0.1788778436796033153181937e-33, -0.7261912176216306101089190e-34, -0.2933239704874698217172402e-34, -0.1178817380216022663848294e-34, -0.4713550938665925243747415e-35, -0.1875222736937308593811831e-35, -0.7422680608185535408905020e-36, -0.2923292133270549875473422e-36, -0.1145479868926911875642964e-36, -0.4465877102072613609496200e-37, -0.1732329082290364039482100e-37, -0.6685880402092324407358875e-38, -0.2567388790315000103954881e-38, -0.9809113395522088573556313e-39, -0.3728835208268407801110216e-39, -0.1410334685901388337197457e-39, -0.5307340860010760817486761e-40, -0.1987182729569070557023125e-40, -0.7402951192281463566289795e-41, -0.2743964271316156357722060e-41 ], dtype=np.float64)
			h3 = np.array([ -0.7522527780636750492641059e0, -0.7447490315497708463240858e0, -0.7224619689626252165385118e0, -0.6860552061846493969863268e0, -0.6366054955061156295204758e0, -0.5755603365344096850483262e0, -0.5046815829547811446478382e0, -0.4259777864640005624125117e0, -0.3416285184773921405216660e0, -0.2539042236274465364534081e0, -0.1650852727968867264939651e0, -0.7738379667939842709258988e-1, 0.7128394424195324853014844e-2, 0.8658293927736663174097951e-1, 0.1593668102410841966827594e0, 0.2241613263920280449352809e0, 0.2799673824845877680517527e0, 0.3261167006652041288605015e0, 0.3622695948610319801705815e0, 0.3884003473857446343896496e0, 0.4047718038942624860766923e0, 0.4119011753186058824533937e0, 0.4105192820995319949018743e0, 0.4015255845130582620257648e0, 0.3859413195031716183649201e0, 0.3648629230000597762360636e0, 0.3394176769351978836202936e0, 0.3107232057693364099667621e0, 0.2798520840662402744643034e0, 0.2478024303401173430156194e0, 0.2154749773684402246897790e0, 0.1836567467116494732079552e0, 0.1530111326375332319918793e0, 0.1240739307148443832620940e0, 0.9725463688468146271051371e-1, 0.7284219701173870412977577e-1, 0.5101430368585303674221369e-1, 0.3184931174142700893159512e-1, 0.1533986919450959655382290e-1, 0.1407426811309193306581366e-2, -0.1008311291608286074413380e-1, -0.1930922840812282398312132e-1, -0.2647758532035030682089135e-1, -0.3181217775839225922486926e-1, -0.3554404023046894464526427e-1, -0.3790265208183702749516685e-1, -0.3910905737306063850349279e-1, -0.3937064210715186736633504e-1, -0.3887744829978686271653342e-1, -0.3779986028416367095012508e-1, -0.3628747152011772566083547e-1, -0.3446892799961155950489723e-1, -0.3245254463375208029954651e-1, -0.3032750110251363953076864e-1, -0.2816544089164076874994184e-1, -0.2602231914851994604543481e-1, -0.2394036936359898584929537e-1, -0.2195008388641825247433045e-1, -0.2007212746338689903391700e-1, -0.1831912527214265469865516e-1, -0.1669728661861120572688442e-1, -0.1520784216814043766189564e-1, -0.1384828617477219420839203e-1, -0.1261342573197174928239427e-1, -0.1149624682246302216128454e-1, -0.1048861222035593117850278e-1, -0.9581809474549548274726564e-2, -0.8766968673914992518412266e-2, -0.8035369845963356580758239e-2, -0.7378659024311709843220737e-2, -0.6788990545369120409265684e-2, -0.6259111260511144290061333e-2, -0.5782400284632080908741386e-2, -0.5352875804464578036313191e-2, -0.4965178455311671875710459e-2, -0.4614538919616485527188256e-2, -0.4296735750484013517713710e-2, -0.4008047998562558651176877e-2, -0.3745206023826233664801882e-2, -0.3505342894046476979204381e-2, -0.3285947990022833548498951e-2, -0.3084823830238963251792028e-2, -0.2900046668982056656687612e-2, -0.2729931086807768375811907e-2, -0.2572998556853316466871207e-2, -0.2427949813646523181953355e-2, -0.2293640754330915732383722e-2, -0.2169061550185197106672818e-2, -0.2053318626361484792588433e-2, -0.1945619169898585865047079e-2, -0.1845257842557062274985012e-2, -0.1751605400071271234291063e-2, -0.1664098948801722796139379e-2, -0.1582233601544145935191528e-2, -0.1505555324435757496173641e-2, -0.1433654795260900326865144e-2, -0.1366162119305863305428748e-2, -0.1302742271937752484738341e-2, -0.1243091157235637593921277e-2, -0.1186932189400779713774489e-2, -0.1134013318531012910469058e-2, -0.1084104434925714219487149e-2, -0.1036995096671516116549004e-2, -0.9924925341187004927105684e-3, -0.9504198922493585737226438e-3, -0.9106146780909950790852145e-3, -0.8729273854455090856168734e-3, -0.8372202734577421999252958e-3, -0.8033662790881490171689315e-3, -0.7712480465049772662387464e-3, -0.7407570588761368843162862e-3, -0.7117928601052224681383490e-3, -0.6842623557902678206459998e-3, -0.6580791841453911032352388e-3, -0.6331631488616646148452257e-3, -0.6094397069328185150662371e-3, -0.5868395053652243037188589e-3, -0.5652979614557357816469240e-3, -0.5447548819764808379193485e-3, -0.5251541171699206704315699e-3, -0.5064432459446979814905582e-3, -0.4885732890847829717111949e-3, -0.4714984476509869340551945e-3, -0.4551758640732029088500233e-3, -0.4395654037105695480727411e-3, -0.4246294549008608587718018e-3, -0.4103327457346023732872108e-3, -0.3966421759777806984761265e-3, -0.3835266627330082944382909e-3, -0.3709569985755748701109446e-3, -0.3589057210304776810509891e-3, -0.3473469923714317173865229e-3, -0.3362564888248703524021643e-3, -0.3256112983526542094353014e-3, -0.3153898262679901745636708e-3, -0.3055717080111181576022737e-3, -0.2961377284756872027881530e-3, -0.2870697473343167903391904e-3, -0.2783506298634098374691914e-3, -0.2699641828135376810549337e-3, -0.2618950949132550960609061e-3, -0.2541288816315519571599965e-3, -0.2466518338577700959600751e-3, -0.2394509701881161861915701e-3, -0.2325139925352426415436720e-3, -0.2258292448020649292499711e-3, -0.2193856743833149971866920e-3, -0.2131727962785441468085678e-3, -0.2071806596186039850962257e-3, -0.2013998164242456949121338e-3, -0.1958212924305587453675523e-3, -0.1904365598246742268269672e-3, -0.1852375117566223449189077e-3, -0.1802164384945805472907308e-3, -0.1753660051060874920343825e-3, -0.1706792305562262391906103e-3, -0.1661494681223850440721571e-3, -0.1617703870330642541013594e-3, -0.1575359552453832883093564e-3, -0.1534404232825155716084045e-3, -0.1494783090582982775062280e-3, -0.1456443836217787948749789e-3, -0.1419336577595168918308957e-3, -0.1383413693981019940302776e-3, -0.1348629717536061671270311e-3, -0.1314941221786090162018978e-3, -0.1282306716610312631043834e-3, -0.1250686549323268134999138e-3, -0.1220042811456336331711188e-3, -0.1190339250872943430156140e-3, -0.1161541188877486230913414e-3, -0.1133615442001899065679247e-3, -0.1106530248175853517419676e-3, -0.1080255197006960803598953e-3, -0.1054761163916181391183883e-3, -0.1030020247891063146774180e-3, -0.1006005712635544029343839e-3, -0.9826919309099737327798045e-4, -0.9600543318688272806740460e-4, -0.9380693512163903486436983e-4, -0.9167143840125715403094134e-4, -0.8959677399720145006388879e-4, -0.8758086011099098595144745e-4, -0.8562169815974051700802759e-4, -0.8371736896983366064768422e-4, -0.8186602916672109476829247e-4, -0.8006590774959976520266573e-4, -0.7831530284043921555152064e-4, -0.7661257859748228262605498e-4, -0.7495616228396319961002592e-4, -0.7334454148335998246272097e-4, -0.7177626145303295708079228e-4, -0.7024992260860025649833230e-4, -0.6876417813186671874001603e-4, -0.6731773169555726054493046e-4, -0.6590933529851172806481185e-4, -0.6453778720537748581672358e-4, -0.6320192998519050404738797e-4, -0.6190064864356719221383246e-4, -0.6063286884353932444622322e-4, -0.5939755521035460581086281e-4, -0.5819370971583712468698264e-4 ], dtype=np.float64)

			# Voigt function is symmetric, so -v = v
			if len(np.argwhere(v<0.0)) != 0: v[np.argwhere(v<0.0)] *= -1.0
			# if a is exactly zero go to 3 for exact expression
			if (a == 0.0):
				return np.exp(-(v*v))
			# Scale up v for ease with lookup tables
			v0 = v*10.0
			n=np.array(v0,dtype=np.int_)
			voigt_prof = np.zeros(np.size(v))
			nl=np.argwhere(n<100)
			nh=np.argwhere(n>=100)
			if len(nh) != 0:
				r=1.0/v[nh]**2
				voigt_prof[nh] = a*r*oneonsqrtpi*(1.0 + r*(1.5 + r*(3.75 + r*(13.125 + 59.0625*r))) - a*a*r*(1.0 + r*(5.0 +26.25*r)))
			if len(nl) != 0:
				v0[nl] = 2.0*v[nl]*10.0
				p=np.int_(v0[nl])
				p1=p+1
				p2=p+2
				x=0.5*np.int_(v0[nl])
				y=x+0.5
				z=x+1.0
				v1 = v0[nl] * 0.5
				voigt_prof[nl] = 2.0*((v1-y)*(v1-z)*(h0[p]+a*(h1[p]+a*(h2[p]+a*h3[p]))) - (v1-x)*(v1-z)*2.0*(h0[p1] + a*(h1[p1]+a*(h2[p1]+a*h3[p1]))) + (v1-x)*(v1-y)*(h0[p2] + a*(h1[p2]+a*(h2[p2]+a*h3[p2]))))
			del nl, nh
			return voigt_prof
Example #7
0
def rmNanAndOutliers():
    """Plot without NAN and outliers from selected pen.
    """
    if len(ds.EpmDatasetAnalysisPens.SelectedPens) < 1:
        sr.msgBox('EPM Python Plugin - Demo Tools', 'Please select a single pen before applying this function!', 'Warning')
        return 0
    sd = 6
    epmData = ds.EpmDatasetAnalysisPens.SelectedPens[0].values
    y = epmData['Value']
    t = epmData['Timestamp']
    nanPos = np.argwhere(np.isnan(y))
    y = np.delete(y,nanPos)
    t = np.delete(t,nanPos)
    s3 = np.floor(sd * np.sqrt(y.std()))
    smin = y.mean() - s3
    smax = y.mean() + s3
    outPos = np.argwhere(y<smin)
    y = np.delete(y,outPos)
    t = np.delete(t,outPos)
    outPos = np.argwhere(y>smax)
    y = np.delete(y,outPos)
    t = np.delete(t,outPos)
    res = vec2epm(t,y)
    penName = ds.EpmDatasetAnalysisPens.SelectedPens[0].name + '_NoOutliers'
    sr.plot(penName, res)
    return res
Example #8
0
def tg2csv_nodiff(npyfile,outname):
    data=np.load(npyfile)
    data=data[()]

    mykeys=['00065','00060','00055','00053','00046']#,'00129']
    cons=['M2  ','N2  ','S2  ','K1  ','O1  ','M4  ']
    consout=['M2 O','M2 M','M2 D','N2 O','N2 M','N2 D','S2 O','S2 M','S2 D','K1 O','K1 M','K1 D','O1 O','O1 M','O1 D','M4 O','M4 M','M4 D']
    
    tidecon=np.empty((5,18))
    for i in range(5):
        for j in range(6):
            idx=np.argwhere(data[mykeys[i]]['df'].index==cons[j])
            tidecon[i,3*j]=data[mykeys[i]]['wlevc'][idx,0]
            tidecon[i,(3*j)+1]=data[mykeys[i]]['outc'][idx,0]
            tidecon[i,(3*j)+2]=data[mykeys[i]]['df'].lookup([cons[j]],['Amp diff'])

    df=pd.DataFrame(tidecon,columns=consout,index=mykeys)
    df.to_csv(outname+'_amp.csv')


    tidecon=np.empty((5,18))
    for i in range(5):
        for j in range(6):
            idx=np.argwhere(data[mykeys[i]]['df'].index==cons[j])
            tidecon[i,3*j]=data[mykeys[i]]['wlevc'][idx,1]
            tidecon[i,(3*j)+1]=data[mykeys[i]]['outc'][idx,1]
            tidecon[i,(3*j)+2]=data[mykeys[i]]['df'].lookup([cons[j]],['Phase diff'])

    df=pd.DataFrame(tidecon,columns=consout,index=mykeys)
    df.to_csv(outname+'_phase.csv')
Example #9
0
def to_js(fname,low=-3,high=3,template_file='template.js'):
    name,suffix=fname.split('.')
    im=Image.open(fname).convert('1')
    mat=np.array(get_mat(im))
    d1=np.argwhere(np.diff(mat.all(axis=1)))
    d1min=np.min(d1)
    d1max=np.max(d1)
    d2=np.argwhere(np.diff(mat.all(axis=0)))
    d2min=np.min(d2)
    d2max=np.max(d2)
    d1_scale=Scale([d1min,d1max],[low,high])
    d2_scale=Scale([d2min,d2max],[high,low])
    l=[]
    for i in range(mat.shape[0]):
        for j in range(mat.shape[1]):
            if mat[(i,j)]==0:
                l.append([d1_scale(i),d2_scale(j)])
    with open(template_file) as f:
        s=f.read()
        template=jinja2.Template(s)
    ss=template.render(name=name+'Graph',point_list=l)
    with open(name+'.js','w') as f:
        f.write(ss)
    

        
        
#xScale=scale([0,206],[-5,5])
#yScale=scale([0,195],[-5,5])
Example #10
0
def histClim( imData, cutoff = 0.01, bins_ = 512 ):
    '''Compute display range based on a confidence interval-style, from a histogram
    (i.e. ignore the 'cutoff' proportion lowest/highest value pixels)'''
    
    if( cutoff <= 0.0 ):
        return imData.min(), imData.max()
    # compute image histogram
    hh, bins_ = imHist(imData, bins_)
    hh = hh.astype( 'float' )

    # number of pixels
    Npx = np.sum(hh)
    hh_csum = np.cumsum( hh )
    
    # Find indices where hh_csum is < and > Npx*cutoff
    try:
        i_forward = np.argwhere( hh_csum < Npx*(1.0 - cutoff) )[-1][0]
        i_backward = np.argwhere( hh_csum > Npx*cutoff )[0][0]
    except IndexError:
        print( "histClim failed, returning confidence interval instead" )
        from scipy.special import erfinv
        
        sigma = np.sqrt(2) * erfinv( 1.0 - cutoff )
        return ciClim( imData, sigma )
    
    clim =  np.array( [bins_[i_backward], bins_[i_forward]] )
    if clim[0] > clim[1]:
        clim = np.array( [clim[1], clim[0]] )
    return clim
Example #11
0
def dfsi(bitmap):
    #startpixel = [bitmapnonzero.item(0, 0), bitmapnonzero.item(0, 1)]

    startrow = 0
    while True:
        scanline = bitmap[startrow, :]
        if np.any(scanline):
            startcol = np.argwhere(scanline)[0, 0]
            break
        startrow += 1

    startpixel = [startrow, startcol]
    #print startpixel
    
    stack = [startpixel]
    objpix = [startpixel]

    bound = False
    while stack:
        row, col = stack.pop()
        if row == 0 or col == 0: bound = True
        edges = np.argwhere(bitmap[row-1:row+2, col-1:col+2]) - [1, 1]

        for edge in edges:
            nextpixel = [row+edge[0], col+edge[1]]
            if nextpixel not in objpix:
                stack += [nextpixel]
                objpix += [nextpixel]

    [bitmap.itemset((pix[0], pix[1]), False) for pix in objpix]

    if bound: return [], bitmap
    else: return objpix, bitmap
def gain_ratio(examples, attribute):
    "Returns the gain function for one attribute."
    ent = h(examples)

    remainder = 0
    values = examples[:, attribute]
    for v in value_attrs[attribute]:
        exs_idx = np.argwhere(values == v).flatten()
        count_v = len(exs_idx)
        exs = examples[exs_idx]
        if count_v != 0:
            remainder += float(count_v)/len(examples) * h(exs)
    ig = ent - remainder

    iv = 0
    for v in value_attrs[attribute]:
        exs_idx = np.argwhere(values == v).flatten()
        count_v = len(exs_idx)
        exs = examples[exs_idx]
        if count_v != 0:
            iv -= float(count_v)/len(examples) * np.log2(float(count_v)/len(examples))
    if iv > 1e-6:
        return ig/iv
    else:
        return ig/1e-6
Example #13
0
def findValidFFTWDim( inputDims ):
    """
    Finds a valid dimension for which FFTW can optimize its calculations. The 
    return is a shape which is forced to be square, as this gives uniform pixel
    size in x-y in Fourier space.
    
    If you want a minimum padding size, call as findValidFFTWDim( image.shape + 128 ) 
    or similar.
    """
    dim = np.max( np.round( inputDims ) )
    maxPow2 = np.int( np.ceil( math.log( dim, 2 ) ) )
    maxPow3 = np.int( np.ceil( math.log( dim, 3 ) ) )
    maxPow5 = np.int( np.ceil( math.log( dim, 5 ) ) )
    maxPow7 = np.int( np.ceil( math.log( dim, 7 ) ) )   
    
    dimList = np.zeros( [(maxPow2+1)*(maxPow3+1)*(maxPow5+1)*(maxPow7+1)] )
    count = 0
    for I in np.arange(0,maxPow7+1):
        for J in np.arange(0,maxPow5+1):
            for K in np.arange(0,maxPow3+1):
                for L in np.arange(0,maxPow2+1):
                    dimList[count] = 2**L * 3**K * 5**J * 7**I
                    count += 1
    dimList = np.sort( np.unique( dimList ) )
    dimList = dimList[ np.argwhere(dimList < 2*dim)].squeeze()
    dimList = dimList.astype('int64')
    # Throw out odd image shapes, this just causes more problems with many 
    # functions
    dimList = dimList[ np.mod(dimList,2)==0 ]
    
    # Find first dim that equals or exceeds dim
    nextValidDim =  dimList[np.argwhere( dimList >= dim)[0,0]]
    return np.array( [nextValidDim, nextValidDim] )    
Example #14
0
    def addStatus(self, test, err=None, error=False, failure=False):
        """Negotiate global status immediately after a test has been run.
        Called on all processor with the local test status (i.e. error, failure
        or success). 'err' is a tuple of values as returned by sys.exc_info().
        """
        if error and failure:
            raise RuntimeError('Parallel unittest can\'t handle simultaneous' \
                               + ' errors and failures within a single test.')

        self.comm.all_gather(np.array([error]), self.last_errors)
        if self.last_errors.any():
            all_texts = []
            for rank in np.argwhere(self.last_errors).ravel():
                if rank == self.comm.rank:
                    assert self.last_errors[self.comm.rank]
                    text = self._exc_info_to_string(err, test)
                else:
                    text = None
                text = broadcast_string(text, root=rank, comm=self.comm)
                all_texts.append((rank,text))
            self.errors.append((test, all_texts))

        self.comm.all_gather(np.array([failure]), self.last_failed)
        if self.last_failed.any():
            all_texts = []
            for rank in np.argwhere(self.last_failed).ravel():
                if rank == self.comm.rank:
                    assert self.last_failed[self.comm.rank]
                    text = self._exc_info_to_string(err, test)
                else:
                    text = None
                text = broadcast_string(text, root=rank, comm=self.comm)
                all_texts.append((rank,text))
            self.failures.append((test, all_texts))
Example #15
0
def testing(test_model, img_num_per_cam_person, img_data, test_ind, test_size):
    rank = numpy.zeros((test_size,))
    for j in test_ind:
        target_inds = get_abs_ind_one(img_num_per_cam_person, j, 0)
        cand_inds = get_abs_ind_per_cam_person(
            img_num_per_cam_person,
            test_ind,
            numpy.asarray([1], dtype='int32')
        )

        [inds_a, inds_b] = numpy.meshgrid(target_inds, cand_inds)
        scores = test_model(img_data[inds_a.flatten(), :, :, :], img_data[inds_b.flatten(), :, :, :])[:, 0]

        test_num = img_num_per_cam_person[test_ind, 1] * img_num_per_cam_person[j, 0]
        test_cum_num = numpy.cumsum(test_num)
        assert(scores.size == test_cum_num[-1])
        score_per_person = numpy.array_split(scores, test_cum_num[:-1])
        score_sum_per_person = []
        for row in score_per_person:
            score_sum_per_person.append(numpy.sum(row))
        # score_sum_per_person = numpy.zeros((test_size,), dtype='float32')
        # cnt = 0
        # for p in test_ind:
        #     cand_inds = get_abs_ind_one(img_num_per_cam_person, p, 1)
        #     [inds_a, inds_b] = numpy.meshgrid(target_inds, cand_inds)
        #     scores = test_model(img_data[inds_a.flatten(), :, :, :], img_data[inds_b.flatten(), :, :, :])[:, 0]
        #     score_sum_per_person[cnt] = (numpy.sum(scores))
        #     cnt += 1
        score_sum_per_person = numpy.asarray(score_sum_per_person, dtype='float32')
        k = numpy.argwhere(test_ind == j)
        r = (test_size - 1) - numpy.argwhere(numpy.argsort(score_sum_per_person) == k[0])[0]
        rank[r] += 1

    rank = numpy.cumsum(rank)
    return rank
def initialize_seed(CAC, from_gt=True):
    image = CAC.image_obj.hsi_image
    if from_gt:
        print 'Seed from ground truth...'
        inside_mask_seed = CAC.ground_truth_obj
        outside_mask_seed = CAC.ground_truth_obj
    else:
        center = CAC.mask_obj.center
        radius_point = CAC.mask_obj.radius_point
        radius = np.linalg.norm(np.array(radius_point) - np.array(center))

        inside_seed_omega = [center[0] + radius * 0.2, center[1]]
        outside_seed_omega = [center[0] + radius * 1.8, center[1]]

        inside_mask_seed = MaskClass()
        outside_mask_seed = MaskClass()

        inside_mask_seed.from_points_and_image(center, inside_seed_omega, image)
        outside_mask_seed.from_points_and_image(center, outside_seed_omega, image)

    inside_seed = inside_mask_seed.mask
    outside_seed = 255. - outside_mask_seed.mask
    # inside_mask_seed.plot_image()
    # CAC.mask_obj.plot_image()
    # utils.printNpArray(outside_seed)
    inside_coordinates = np.argwhere(inside_seed == 255.)
    outside_coordinates = np.argwhere(outside_seed == 255.)

    omega_in_mean = mean_color_in_region(inside_coordinates, image)
    omega_out_mean = mean_color_in_region(outside_coordinates, image)
    return omega_in_mean, omega_out_mean
Example #17
0
    def corners(self, bandNames=None):
        "Return the corners of the tilted rectangle of valid image data as (x, y) pixel coordinates."
        alpha = self.mask(bandNames)
        alphaT = numpy.transpose(alpha)
        ysize, xsize = alpha.shape

        output = []
        for i in xrange(ysize):
            if numpy.count_nonzero(alpha[i]) > 0:
                break
        output.append((numpy.argwhere(alpha[i]).mean(), i))

        for i in xrange(xsize):
            if numpy.count_nonzero(alphaT[i]) > 0:
                break
        output.append((i, numpy.argwhere(alphaT[i]).mean()))

        for i in xrange(ysize - 1, 0, -1):
            if numpy.count_nonzero(alpha[i]) > 0:
                break
        output.append((numpy.argwhere(alpha[i]).mean(), i))

        for i in xrange(xsize - 1, 0, -1):
            if numpy.count_nonzero(alphaT[i]) > 0:
                break
        output.append((i, numpy.argwhere(alphaT[i]).mean()))

        return output
def fwhm(array):
    """
    Computes the full width half maximum of a 1-d array
    Returns the indices of the array elements left and right closest to the
    maximum that cross below half the maximum value
    """

    assert (type(array) == type(np.ndarray([])))

    # Find the maximum in the interior of the array
    fwhm = 0.5 * array[1:-1].max()
    max_idx = array[1:-1].argmax() + 1
    # Divide the intervall in halfs at the peak and find the index of the
    # value in the left half of the intervall before it increases over max/2

    # The FWHM is between the indices closest to the maximum, whose values
    # are below 0.5 times the max
    try:
        left_idx = np.argwhere(array[1: max_idx] < fwhm)[-1] + 1
    except IndexError:
        # This can occurs if there is no value in the array smaller than
        # 0.5*max.
        # In this case, return the left limit of the array as the FWHM.
        left_idx = 0
    try:
        right_idx = np.argwhere(array[max_idx:-1] < fwhm)[0] + max_idx
    except IndexError:
        # Subtract one because the index of an array with size n is
        # within 0..n-1
        right_idx = array.size() - 1

    return np.array([int(left_idx), int(right_idx)])
Example #19
0
def bqp_cluster(bqp, init):
	'''
	binary quadratic programming with clustering
	:return:
	'''
	n = bqp.size()
	maxiter = 10
	q = 0.5 * bqp.q - bqp.col_sum(range(n))
	diag = bqp.diag()
	lamb = 320
	lamb_diag = lamb - diag

	# initialize cluster
	res = init
	cluster = np.argwhere(res >= 0)[:, 0]

	# find final cluster
	for i in xrange(maxiter):
		dist_to_cluster = bqp.col_sum(cluster) + 0.5 * q
		dist_to_cluster[cluster] += lamb_diag[cluster]
		# print i, dist_to_cluster
		med = np.median(dist_to_cluster)
		new_res = np.where(dist_to_cluster > med, 1, -1)
		# print np.sum(res==1), np.sum(new_res==1),np.sum(res != new_res)
		if np.sum(res != new_res) < 0.001*n:
			break
		cluster = np.argwhere(new_res >= 0)[:, 0]
		res = new_res
	res = new_res
	return res
Example #20
0
    def psf(self,emin,emax,cthmin,cthmax):
        """Return energy- and livetime-weighted PSF density vector as
        a function of angular offset for a bin in energy and
        inclination angle."""
        
        logemin = np.log10(emin)
        logemax = np.log10(emax)

        ilo = np.argwhere(self._energy > emin)[0,0]
        ihi = np.argwhere(self._energy < emax)[-1,0]+1
        
        jlo = np.argwhere(self._ctheta_axis.center > cthmin)[0,0]
        jhi = np.argwhere(self._ctheta_axis.center < cthmax)[-1,0] +1
        
        weights = (self._energy[ilo:ihi,np.newaxis]*
                   self._exp[ilo:ihi,jlo:jhi]*
                   self._wfn(self._energy[ilo:ihi,np.newaxis]))
        
        wsum = np.sum(weights)
        psf = np.apply_over_axes(np.sum,
                                 self._psf[:,ilo:ihi,jlo:jhi]*
                                 weights[np.newaxis,...],
                                 [1,2])
        psf = np.squeeze(psf)                   
        psf *= (1./wsum)
        return self._dtheta, psf
 def test_binary_classsification_should_out_row_vector_of_0_1_only(self):
     model = Model([np.random.rand(4, 6), np.random.rand(1, 5)])
     prediction = model.predict_binary_classification(np.random.rand(10, 5))
     self.assertEqual(prediction.shape, (10, 1))
     zeros = len(np.argwhere(prediction == 0))
     ones = len(np.argwhere(prediction == 1))
     self.assertEqual(zeros + ones, 10)
Example #22
0
	def crop(self, img, edges, orig):
		'''
		Crops an image so that it is a rectangle
		@npimg: an image to crop
		@return: the cropped image
		'''
		#find the extents in the y direction
		maxX = orig[0,...].max()
		maxY = orig[1,...].max()
		x1 = 0 - edges[0,0]
		x2 = maxX - edges[1,0]
		y1 = 0 - edges[0,1]
		y2 = -(maxY - edges [1,1])
		
		#slice the image in y direction
		img = img[y1+1:img.shape[0]-y2-1]
		
		#find the extents in the x direction
		cropTop = numpy.argwhere(img[0,:,3]!=0)
		cropBot = numpy.argwhere(img[-1,:,3]!=0)
		minT = cropTop.min()
		maxT = cropTop.max()
		minB = cropBot.min()
		maxB = cropBot.max()
		
		#grab the correct extents
		xMin = max(minT,minB)
		xMax = min(maxT,maxB)
		
		#slice the image in x direction
		img = img[:,xMin:xMax]
		
		return img
    def Simulate(self, T):
        self.T = T
        x = np.zeros((self.N, self.T), dtype=int)
        v = np.zeros((self.N, self.T), dtype=int)
        Q = np.zeros((self.N, self.T), dtype=int)
        
        x[:,0] = self.x0
        v[:,0] = self.v0
        
        for t in range(1,self.T):
            X  = np.argwhere(x[:,t-1]==1)[:,0]
            dX = (np.roll(X, -1)-X)%self.N
            V  = v[X, t-1] 
            
            #Rule 1: Accelerate if possible
            V += np.logical_and(V<np.ones(len(V))*self.vmax,V<(dX+1))
            #Rule 2: Slow down if needed
            ind = np.argwhere(dX<=V)[:,0]
            V[ind] = dX[ind]-1
            #Rule 3: Slow down with chance p
            V -= np.logical_and(np.random.rand(len(V))<0.5, 0<V)
            #Rule 4: Take next time step
            x[(X+V)%self.N, t] = 1
            v[(X+V)%self.N, t] = V
            
            #Obtain the flow at each position
            for i, xi in enumerate(X):
                Q[xi:(xi+V[i])%self.N,t] +=1

        self.X = x
        self.V = v
        self.Q = Q
Example #24
0
def np_combine_rare(Xtrain, Xtest, col_list = list(), rare_line=1):
    if Xtrain.shape[1] != Xtest.shape[1]:
        print 'Xtrain, Xtest shape not match.'
        return

    if not col_list :
        col_list = range(Xtrain.shape[1])
        check_int = True
    else:
        check_int = False

    n_train = Xtrain.shape[0]
    for col in col_list:
        col_data_train = Xtrain[:, col]
        col_data_test = Xtest[:, col]
        col_data = np.hstack((col_data_train, col_data_test))
        # print col_data[0]
        if issubclass(col_data.dtype.type, np.integer) or (not check_int):
            le = preprocessing.LabelEncoder()
            le.fit(col_data)
            col_data = le.transform(col_data)
            max_label = np.amax(col_data)
            counts = np.bincount(col_data)
            rare_cats = np.argwhere(counts <= rare_line)
            rare_cats = rare_cats.reshape(rare_cats.shape[0])
            rare_positions = [np.argwhere(col_data == rare_cat)[0,0] for rare_cat in rare_cats]
            # print len(rare_positions)
            col_data[rare_positions] = max_label+1
            Xtrain[:, col] = col_data[:n_train]
            Xtest[:, col] = col_data[n_train:]
        else:
            print 'col:{0:d} not integer'.format(col)
Example #25
0
 def integralsChanged(self):
     self.integralLimits = []
     for row in range(self.integralTable.rowCount()-1):
         if self.integralTable.item(row, 0) \
                 and self.integralTable.item(row, 1):
             try:
                 limit_1 = float(self.integralTable.item(row, 0).text())
                 limit_2 = float(self.integralTable.item(row, 1).text())
                 self.integralLimits.append(
                     [row, min([limit_1, limit_2]), max([limit_1, limit_2])])
             except ValueError:
                 pass
     if not self.integralLimits or not self.qvals:
         return
     self.integralPlotWindow.clearCurves()
     for limit in self.integralLimits:
         yint = []
         xint = []
         for i, qval in enumerate(self.qvals):
             xint.append(qval)
             if self.integralMethodIntegral.isChecked():
                 yint.append(self.yvals[i][
                     np.argwhere(self.xvals[i]>=limit[1])[0][0]: \
                     np.argwhere(self.xvals[i]<=limit[2])[-1][0]].sum())
             else:
                 yint.append(self.yvals[i][
                     np.argwhere(self.xvals[i]>=limit[1])[0][0]: \
                     np.argwhere(self.xvals[i]<=limit[2])[-1][0]].mean())
             int2plot = np.vstack([xint, yint]).T
             int2plot = int2plot[int2plot[:,0].argsort()]
         self.integralPlotWindow.addCurve(int2plot[:,0], int2plot[:,1], 
             legend='Region %d' % (limit[0]+1), ylabel=' ',
             symbol='o')
     return
 def Initialize(self, rho=0.1, method='Flow'):        
     self.Initialization = method
     
     #Create 3 different initializations by setting initial position and velocity:
         #Jam:    All cars are behind each other with velocity 0 (they stand for example before a traffic light)
         #Flow:   The cars are equally spaced on the available road with the maximum velocity
         #Random: The cars are randomly positioned and have a random velocity between 0-vmax
     
     #Initial condition positions:
     self.x0 = np.zeros((self.N), dtype=int)
     Xbase = np.zeros(int(1/rho))
     Xbase[0] = 1
     self.x0 = np.tile(Xbase, self.N/len(Xbase))   
     if method=='Random':
         np.random.shuffle(self.x0)   
     elif method=='Jam':
         self.x0 = np.sort(self.x0)
         self.x0 = np.roll(self.x0, int(self.N/2))
     
     #Initial condition velocities
     self.v0 = np.zeros((self.N), dtype=int)
     if method=='Flow':
         self.v0[np.argwhere(self.x0)] = np.ones(np.sum(self.x0))*self.vmax
     elif method=='Random':
         self.v0[np.argwhere(self.x0)] = np.random.randint(0, self.vmax, (np.sum(self.x0), 1))
def multivariate_initialize_seed(CAC, from_gt=True):
    image = CAC.image_obj.image
    if from_gt:
        print 'Seed from ground truth...'
        inside_mask_seed = CAC.ground_truth_obj
        outside_mask_seed = CAC.ground_truth_obj
    else:
        center = CAC.mask_obj.center
        radius_point = CAC.mask_obj.radius_point
        print 'CENTER:', center
        print 'RADIUS POINT:', radius_point
        print 'RADIUS:', np.linalg.norm(np.array(radius_point) - np.array(center))
        radius = np.linalg.norm(np.array(radius_point) - np.array(center))

        inside_seed_omega = [center[0] + radius * 0.2, center[1]]
        outside_seed_omega = [center[0] + radius * 1.8, center[1]]

        inside_mask_seed = MaskClass()
        outside_mask_seed = MaskClass()

        inside_mask_seed.from_points_and_image(center, inside_seed_omega, image)
        outside_mask_seed.from_points_and_image(center, outside_seed_omega, image)

    inside_seed = inside_mask_seed.mask
    outside_seed = 255. - outside_mask_seed.mask
    # inside_mask_seed.plot_image()
    # CAC.mask_obj.plot_image()
    # utils.printNpArray(outside_seed)
    inside_coordinates = np.argwhere(inside_seed == 255.)
    outside_coordinates = np.argwhere(outside_seed == 255.)

    inside_gmm = get_values_in_region(inside_coordinates, image)
    outside_gmm = get_values_in_region(outside_coordinates, image)
    return inside_gmm, outside_gmm
def _mark_cells_by_markergene(adata, genes_dict, label, condition):
    """
    Attention: This function already assumes that all genes are in adata object!!
    Mark cells or spots by a marker gene.
    If a spot contains more than one marker gene it is marked as double, triple ... positive cell or spot

    :param adata: [annData]
    :param genes_dict: [dict]
    :param label: [string] label or observable name
    :param condition: [numpy.operation]
        for AND operation on a matrix use np.all for OR operation use np.sum or np.any
    :return:
    """
    # Get CD4 and CD8 cells which contain CD4+ or CD8A+/CD8B+

    obs_counts = "_".join([label, 'counts'])
    obs_label = "_".join([label, 'others'])

    # mark cells which don't contain those genes as others
    default_label = np.array(['Others'] * adata.shape[0], dtype='<U32')

    # initialize mask
    mask_matrix_genes = np.zeros(shape=(adata.shape[0],
                                        len(genes_dict.keys())))
    index_genes = []

    # get counts
    if "counts" in adata.layers.keys():
        default_counts = np.copy(adata.layers['counts']).sum(axis=1)
    else:
        default_counts = np.copy(adata.X).sum(axis=1)

    for ind, cell in enumerate(genes_dict.keys()):
        # First check if genes are in data set
        available_genes = list(set(adata.var.index) & set(genes_dict[cell]))

        if len(available_genes) > 0:
            # Get index of genes and other genes; the position of the index = position of genes in available_genes list
            varindex_genes = np.where(adata.var.index[
                np.newaxis, :] == np.array(available_genes)[:, np.newaxis])[1]
            index_genes.extend(varindex_genes)
            # get counts
            if "counts" in adata.layers.keys():
                counts_genes = np.copy(adata.layers["counts"])[:,
                                                               varindex_genes]
            else:
                counts_genes = np.copy(adata.X)[:, varindex_genes]

            # create mask
            if counts_genes.shape[1] > 1:
                m_genes = counts_genes > 0
                # Identify where counts > 0:
                #   for AND operation on a matrix use np.all (bool array) for
                #   OR operation use np.sum (integer array) or np.any (bool array)
                m_array_genes = condition[ind](m_genes, axis=1)
                # get counts
                default_counts[m_array_genes] = np.sum(counts_genes,
                                                       axis=1)[m_array_genes]
            else:
                m_array_genes = counts_genes[:, 0] > 0
                counts_genes = counts_genes[:, 0]
                default_counts[m_array_genes] = counts_genes[m_array_genes]

            # # Add label of cell type
            default_label[m_array_genes] = cell
            mask_matrix_genes[:, ind] = m_array_genes

    # apply double positive label to those and get counts
    dp_genes = np.asarray(adata.var_names[np.unique(index_genes)])
    # get position of double positive cells
    if len(dp_genes) == 2:
        indices = np.argwhere(np.all(mask_matrix_genes == 1, axis=1))[:, 0]
        default_label[indices] = " & ".join(dp_genes)
    else:
        # Get all possible combinations for length 2 to number of double positive (dp) genes
        comb_dp_genes = []
        for n in range(2, len(dp_genes) + 1):
            comb_dp_genes.extend([i for i in combinations(dp_genes, n)])
        indices = []
        # keys as array
        dictkey_array = np.asarray(list(genes_dict.keys()))
        for combi_genes in comb_dp_genes:
            # find out the position of the genes in the combination in the dictionary
            index_combgenes = np.where(dictkey_array[
                np.newaxis, :] == np.array(list(combi_genes))[:,
                                                              np.newaxis])[1]
            # read out indices and save joint label
            temp_indices = np.argwhere(
                np.all(mask_matrix_genes[:, index_combgenes] == 1, axis=1))[:,
                                                                            0]
            default_label[temp_indices] = " & ".join(list(combi_genes))
            indices.extend(temp_indices)

    # get counts
    if "counts" in adata.layers.keys():
        counts_genes = np.copy(adata.layers["counts"])[:, index_genes]
    else:
        counts_genes = np.copy(adata.X)[:, index_genes]
    default_counts[indices] = np.sum(counts_genes[indices], axis=1)

    # 4. Add observables to adata
    adata.obs[obs_label] = default_label
    adata.obs[obs_counts] = default_counts

    return adata, obs_label
Example #29
0
    def PerformLiftingMD(self, DZ, bPlot=True, export=[]):
        fig = plt.figure()
        x = self.Mol.Atom[:, 1]
        y = self.Mol.Atom[:, 2]
        z = self.Mol.Atom[:, 3]

        L = np.max(x) - np.min(x)

        DX = np.zeros(len(DZ))
        N = np.zeros(len(DZ))

        if bPlot:
            axLift3D = fig.add_subplot(2, 3, 1, projection='3d')
            axLift2D = fig.add_subplot(2, 3, 2)
            axcbar = fig.add_axes([0.35, 0.535, 0.01, 0.37])
            norm = mpl.colors.Normalize(0, np.max(DZ))
            cmap = mpl.cm.get_cmap('viridis')
            cbar = mpl.colorbar.ColorbarBase(axcbar,
                                             cmap=cmap,
                                             norm=norm,
                                             orientation='vertical')
            cbar.set_label('$\Delta z$')
            self.cmap = cmap

        xEnd = None

        for i, dz in enumerate(DZ):
            if bPlot:
                ax1 = fig.add_subplot(4, len(DZ), i + 1 + 2 * len(DZ))
                ax2 = fig.add_subplot(4, len(DZ), i + 1 + 3 * len(DZ))

            r = self.GetConfigurationOptimum(
                dz,
                ax=[ax1, ax2] if bPlot else None,
                xEnd=xEnd,
                export=['Energy'] if 'Energy' in export else [])
            xEnd = np.max(r[1])

            if 'xyz' in export:
                xx, x0 = np.meshgrid(x, r[0])
                xyz = np.zeros((self.Mol.N, 3))
                xyz[:, 0] = r[1][np.argmin(abs(xx - x0), axis=0)]
                xyz[:, 1] = self.Mol.Atom[:, 2]
                xyz[:, 2] = r[2][np.argmin(abs(xx - x0), axis=0)]
                ExportXYZ(self.Mol, i, xyz, dz)
                if 'MD' in export:
                    atom_xyz = np.zeros((4, self.Mol.N))
                    atom_xyz[0] = self.Mol.Atom[:, 0]
                    atom_xyz[1:4] = xyz.T
                    ShowConfiguration(atom_xyz,
                                      STM=1,
                                      szFileNameSave=self.Mol.szOutputFolder +
                                      'MD/' + self.Mol.Name + ' - dz=' +
                                      str(dz) + '.png')

            if bPlot:
                ax1.plot(r[1], r[2], c=cmap(dz / np.max(DZ)), lw=2)
                ax1.set_xlim([-10, 1.1 * L])
                ax1.set_ylim([0, 1.1 * L])

                xx, x0 = np.meshgrid(x, r[0])
                X = r[1][np.argmin(abs(xx - x0), axis=0)]
                Y = y
                Z = r[2][np.argmin(abs(xx - x0), axis=0)]
                axLift2D.plot(r[1], r[2], c=cmap(dz / np.max(DZ)))
                axLift2D.scatter(X, Z, c=cmap(dz / np.max(DZ)), s=10, lw=0)
                axLift3D.scatter(X,
                                 Y,
                                 Z,
                                 c=cmap(dz / np.max(DZ)),
                                 lw=0,
                                 s=10,
                                 alpha=0.7)

                ax2.set_xlabel('$\Delta z = %0.1f$' % (dz))

                if i == 0:
                    ax1.set_ylabel('Position')
                    ax2.set_ylabel('Energy [eV]')
                    ax2.legend([
                        'Bending energy', 'Bonding energy', 'Friction',
                        'Total energy'
                    ])

                z = np.argwhere(r[2] > 1.5)
                N[i] = np.sum(np.exp(-Z / 1.5))
                DX[i] = 0 if len(z) == 0 else r[0][np.max(z)]
                axLift2D.plot(r[1, 0 if len(z) == 0 else np.max(z)],
                              0.1,
                              '*',
                              c=cmap(dz / np.max(DZ)),
                              lw=0)

        if bPlot:
            axLift3D.set_xlim([0, 1.0 * L])
            axLift3D.set_ylim([0, 1.0 * L])
            axLift3D.set_zlim([0, 1.0 * L])

            axDX = fig.add_subplot(2, 3, 3)
            axDX.plot(DZ, DX, c='k')
            axDX.set_xlim(0, np.max(dz))
            axDX.set_xlabel('$\Delta z$')
            axDX.set_ylim(0, L * 1.1)
            axDX.set_ylabel('$\Delta x$')
            axDX.scatter(DZ, DX, c=cmap(DZ / np.max(DZ)), lw=0, s=50)
            axN = axDX.twinx()
            axN.scatter(DZ, N, c=cmap(DZ / np.max(DZ)), lw=0, s=50, marker='*')
            axN.set_ylabel('# connected atoms')

        if 'dx-n' in export:
            header = ['dz', 'dx', 'N']
            values = np.zeros((len(DZ), 3))
            values[:, 0] = DZ
            values[:, 1] = DX
            values[:, 2] = N
            ExportData(
                self.Mol.szOutputFolder + '/' + self.Mol.Name +
                ' - dz-dz-N.txt', values, header)

Object_num = 150
all_cls_attributes_info[all_cls_attributes_info < 0.3] = 0
similarityMatrix_cls_part = np.zeros((Class_num, Class_num, Object_num))
for i_obj in range(Object_num):
    print(i_obj)
    similarityMatrix_cls_part[:, :, i_obj] = compute_ClsSimilarity_underObject(
        all_cls_attributes_info[:, i_obj])

# np.save('./ade/similarityMatrix_cls_part.npy', similarityMatrix_cls_part)
# similarityMatrix_cls_part = np.load('./ade/similarityMatrix_cls_part.npy')

# threshold and remain most similar parts between each class pair
threshold = np.sort(similarityMatrix_cls_part.flatten())[int(
    0.99 * Class_num * Class_num * Object_num)]
similarityMatrix_cls_part_copy = np.copy(similarityMatrix_cls_part)
similarityMatrix_cls_part_copy[similarityMatrix_cls_part >= threshold] = 1
similarityMatrix_cls_part_copy[similarityMatrix_cls_part < threshold] = 0
similarityMatrix_cls_part_copy = similarityMatrix_cls_part_copy.astype(int)

com_extracted_attributes = np.zeros((Class_num, Class_num), dtype=object)
for i in range(Class_num):
    for j in range(i + 1, Class_num):
        object_idx = np.argwhere(
            similarityMatrix_cls_part_copy[i, j, :] == 1).flatten()
        object_idx = object_idx + 1
        object_idx = object_idx.tolist()
        com_extracted_attributes[i, j] = object_idx
        com_extracted_attributes[j, i] = object_idx
np.save('./ade/com_extracted_attributes_001.npy', com_extracted_attributes)
# (Change the folders as appropriate for where the data is)

# b) Clean:
clean.cleanup_kilosorted_data(kilosort_folder,
                              number_of_channels_in_binary_file=const.
                              NUMBER_OF_AP_CHANNELS_IN_BINARY_FILE,
                              binary_data_filename=binary_data_filename,
                              prb_file=const.prb_file,
                              type_of_binary=const.BINARY_FILE_ENCODING,
                              order_of_binary='F',
                              sampling_frequency=20000,
                              num_of_shanks_for_vis=1)

# c) Remove some types
template_marking = np.load(join(kilosort_folder, 'template_marking.npy'))
print('Noise: {}'.format(len(np.argwhere(template_marking == 0))))
print('Single: {}'.format(len(np.argwhere(template_marking == 1))))
print('Contaminated: {}'.format(len(np.argwhere(template_marking == 2))))
print('Putative: {}'.format(len(np.argwhere(template_marking == 3))))
print('Multi: {}'.format(len(np.argwhere(template_marking == 4))))
print('Non Multi: {}'.format(
    len(np.argwhere(template_marking == 1)) +
    len(np.argwhere(template_marking == 2)) +
    len(np.argwhere(template_marking == 3))))

# </editor-fold>
# ----------------------------------------------------------------------------------------------------------------------

# ----------------------------------------------------------------------------------------------------------------------
# <editor-fold desc = "STEP 2: CREATE TEMPLATE INFO OF ALL THE CLEANED TEMPLATES"
    def cal_steer_angle(self, img, vis_img=None, depth_img=None):
        """
        Calculate steering angle for car
        :param img: bgr image
        :return: steering angle (-60 to 60)
        """
        # Init steering angle to 0
        steer_angle = 0

        # Get birdview image
        img_bv = self.bird_view(img)

        # Run semantic segmentation on RGB image
        seg_masks = self.segmentation.get_masks(img)

        if vis_img is not None:
            vis_img = self.segmentation.get_visualization_img(
                vis_img, seg_masks)

        # Get road mask
        road_mask = seg_masks[TrafficObject.ROAD.name]

        # Filter to get only largest white area in road mask
        if cv2.getVersionMajor() in [2, 4]:
            # OpenCV 2, OpenCV 4 case
            contours, hierarchy = cv2.findContours(road_mask, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
        else:
            # OpenCV 3 case
            _, contours, hierarchy = cv2.findContours(road_mask, cv2.RETR_TREE,
                                                      cv2.CHAIN_APPROX_SIMPLE)

        # Choose largest contour
        best = -1
        maxsize = -1
        count = 0
        for cnt in contours:
            if cv2.contourArea(cnt) > maxsize:
                maxsize = cv2.contourArea(cnt)
                best = count
            count = count + 1

        road_mask[:, :] = 0
        if best != -1:
            cv2.drawContours(road_mask, [contours[best]], 0, 255, -1)

        # cv2.imshow("Debug", road_mask)
        # cv2.waitKey(1)

        car_mask = seg_masks[TrafficObject.CAR.name]
        perdestrian_mask = seg_masks[TrafficObject.PERDESTRIAN.name]
        road_mask = road_mask & cv2.bitwise_not(car_mask)

        # Clear car mask if not use it
        if not config.USE_CAR_MASK_SEMANTIC_SEG:
            car_mask[:, :] = 0

        # Use depth image
        if depth_img is not None:
            obstacle_mask = depth_img & road_mask
            car_mask = obstacle_mask | car_mask

            if self.debug_stream:
                self.debug_stream.update_image('obstacle_mask', obstacle_mask)

        # Convert to bird view
        road_mask_bv = self.bird_view(road_mask)

        # ====== Turning =======

        if self.is_turning:
            if self.turning_time_begin + config.TURNING_TIME < time.time():
                self.is_turning = False
            else:
                return self.current_turning_direction * config.TURNING_ANGLE, vis_img
        else:
            interested_area = road_mask_bv[80:180, :]
            lane_area = np.count_nonzero(interested_area)

            if config.SHOW_AREA:
                print("Lane area: {}".format(lane_area))

            if lane_area > config.ROAD_AREA_TO_TURN:
                print("Turning")
                self.is_turning = True
                self.turning_time_begin = time.time()

                self.current_turning_direction = self.current_traffic_sign
                print(self.current_turning_direction)

                # Reset traffic sign
                self.current_traffic_sign = config.SIGN_NO_SIGN

                return self.current_turning_direction * config.TURNING_ANGLE, vis_img

        # ====== If not turning, calculate steering angle using middle point =======

        # TODO: The method to calculate the middle point and angle now is so simple.
        # Research for others in the future
        interested_row = road_mask_bv[int(road_mask_bv.shape[0] / 3 *
                                          2), :].reshape((-1, ))
        white_pixels = np.argwhere(interested_row > 0)

        if white_pixels.size != 0:
            middle_pos = np.mean(white_pixels)
        else:
            middle_pos = 160

        if middle_pos != middle_pos:  # is NaN
            middle_pos = 0

        # ====== Obstacle avoidance =======
        danger_zone, danger_zone_y = self.obstacle_detector.find_danger_zone(
            car_mask, perdestrian_mask)

        # print(danger_zone, danger_zone_y)

        # Avoid obstacles
        if danger_zone != (0, 0):

            # 2 objects
            if danger_zone[0] == -1:
                self.object_avoidance_direction = 0
                # middle_pos = danger_zone[1]

            # single object
            else:
                center_danger_zone = int((danger_zone[0] + danger_zone[1]) / 2)

                count_road_pixels_left = np.count_nonzero(
                    road_mask[danger_zone_y, :center_danger_zone])
                count_road_pixels_right = np.count_nonzero(
                    road_mask[danger_zone_y, center_danger_zone:])

                # obstacle is on the right
                if count_road_pixels_left > count_road_pixels_right:
                    self.object_avoidance_direction = -1
                    self.last_object_time = time.time()
                    # middle_pos = danger_zone[0]
                    print("OBSTACLE: RIGHT")
                # left
                elif count_road_pixels_left < count_road_pixels_right:
                    self.object_avoidance_direction = 1
                    self.last_object_time = time.time()
                    # middle_pos = danger_zone[1]
                    print("OBSTACLE: LEFT")

        # Object avoidance
        if self.last_object_time > time.time(
        ) - config.OBSTACLE_AVOIDANCE_TIME:
            middle_pos += config.OBSTACLE_AVOIDANCE_OFFSET * self.object_avoidance_direction
            print("Obstacle avoidance direction: " +
                  str(self.object_avoidance_direction))
        elif self.last_object_time < time.time(
        ) - config.OBSTACLE_AVOIDANCE_TIME - 1:
            self.object_avoidance_direction = 0
            # print("Obstacle was over")

        if self.debug_stream:
            half_car_width = config.CAR_WIDTH // 2
            cv2.line(img_bv, (int(middle_pos), self.h // 2),
                     (self.w // 2, self.h), (255, 0, 0), 2)
            cv2.line(img_bv, (int(middle_pos) + half_car_width, self.h // 2),
                     (self.w // 2 + half_car_width, self.h), (255, 0, 255), 3)
            cv2.line(img_bv, (int(middle_pos) - half_car_width, self.h // 2),
                     (self.w // 2 - half_car_width, self.h), (255, 0, 255), 3)
            self.debug_stream.update_image('car_controlling', img_bv)

        # Add offset to middle pos
        middle_pos += config.MIDDLE_POS_OFFSET

        # Distance between MiddlePos and CarPos
        distance_x = middle_pos - self.w / 2
        distance_y = self.h - self.h / 3 * 2

        # Angle to middle position
        steer_angle = math.atan(
            float(distance_x) / distance_y) * 180 / math.pi * 1.2

        # QIK MATH
        # steer_angle = ((middle_pos - 160) / 160) * 60

        return steer_angle, vis_img
Example #33
0
def logistic_regression_glove(positive_tweets, negative_tweets):
    """ Return a logistic Regression model fitted on GloVe embeddings

    Keyword arguments:
    positive_tweets -- the file (.txt) that contains the positive tweets
    negative_tweets -- the file (.txt) that contains the negative tweets
    """
    emb = np.load('embeddings.npy')

    with open("vocab.pkl", "rb") as f:
        vocab = pickle.load(f)

    #embedd positive tweets
    num_lines_pos = sum(1 for line in open(positive_tweets))

    train_pos = np.zeros((num_lines_pos, emb.shape[1]))
    with open(positive_tweets) as f:
        for line_index, line in enumerate(f):
            words = line.split()
            index = [vocab[word] for word in words if word in vocab.keys()]
            line_fet = np.mean(np.array([emb[i] for i in index]), axis=0)
            train_pos[line_index] = line_fet

    index_to_remove_pos = np.unique(
        [x for x, y in np.argwhere(np.isnan(train_pos))])

    train_pos_2 = np.delete(train_pos, index_to_remove_pos, axis=0)

    #embedd negative tweets
    num_lines_neg = sum(1 for line in open(negative_tweets))

    train_neg = np.zeros((num_lines_neg, emb.shape[1]))
    with open(negative_tweets) as f:
        for line_index, line in enumerate(f):
            words = line.split()
            index = [vocab[word] for word in words if word in vocab.keys()]
            line_fet = np.mean(np.array([emb[i] for i in index]), axis=0)
            train_neg[line_index] = line_fet

    index_to_remove_neg = np.unique(
        [x for x, y in np.argwhere(np.isnan(train_neg))])

    train_neg_2 = np.delete(train_neg, index_to_remove_neg, axis=0)

    # Combine positive and negative tweets to have the whole training set
    X = np.vstack((train_pos_2, train_neg_2))
    y_pos = np.ones(train_pos_2.shape[0])
    y_neg = np.repeat(-1, train_neg_2.shape[0])
    Y = np.hstack((y_pos, y_neg))

    #Train a Logistic Regression classifier
    logiCV = LogisticRegressionCV(Cs=5,
                                  fit_intercept=True,
                                  cv=4,
                                  dual=False,
                                  penalty='l2',
                                  scoring=None,
                                  solver='sag',
                                  tol=0.0001,
                                  max_iter=10000,
                                  class_weight=None,
                                  n_jobs=-1,
                                  verbose=0,
                                  refit=True,
                                  intercept_scaling=1.0,
                                  multi_class='ovr',
                                  random_state=None,
                                  l1_ratios=None)

    logiCV.fit(X, Y)

    return logiCV
Example #34
0
def logistic_regression_word2vec(positive_tweets, negative_tweets):
    """ Return a logistic Regression model fitted on Word2vec embeddings

    Keyword arguments:
    positive_tweets -- the file (.txt) that contains the positive tweets
    negative_tweets -- the file (.txt) that contains the negative tweets
    """
    f = open(positive_tweets)
    tweets_pos = [line.split() for line in f.readlines()]
    f.close()

    f = open(negative_tweets)
    tweets_neg = [line.split() for line in f.readlines()]
    f.close()

    # Parameters for Word2vec
    size = 300
    min_count = 5
    epoch = 10

    #training Word2vec on tweets
    model = word2vec.Word2Vec(sentences=tweets_pos + tweets_neg,
                              corpus_file=None,
                              size=size,
                              alpha=0.025,
                              window=5,
                              min_count=min_count,
                              max_vocab_size=None,
                              sample=0.001,
                              seed=1,
                              workers=1,
                              min_alpha=0.0001,
                              sg=0,
                              hs=0,
                              negative=5,
                              ns_exponent=0.75,
                              cbow_mean=1,
                              iter=epoch,
                              null_word=0,
                              trim_rule=None,
                              sorted_vocab=1,
                              batch_words=10000,
                              compute_loss=False,
                              callbacks=(),
                              max_final_vocab=None)

    #embedd positive tweets
    train_pos = np.zeros((len(tweets_pos), size))
    for index, tokens in enumerate(tweets_pos):
        vect = [model.wv[token] for token in tokens if token in model.wv]
        train_pos[index] = np.mean(vect, axis=0)

    index_to_remove_pos = np.unique(
        [x for x, y in np.argwhere(np.isnan(train_pos))])

    train_pos_2 = np.delete(train_pos, index_to_remove_pos, axis=0)

    #embedd negative tweets
    train_neg = np.zeros((len(tweets_neg), size))
    for index, tokens in enumerate(tweets_neg):
        vect = [model.wv[token] for token in tokens if token in model.wv]
        train_neg[index] = np.mean(vect, axis=0)

    index_to_remove_neg = np.unique(
        [x for x, y in np.argwhere(np.isnan(train_neg))])

    train_neg_2 = np.delete(train_neg, index_to_remove_neg, axis=0)

    # Combine positive and negative tweets to have the whole training set
    X = np.vstack((train_pos_2, train_neg_2))
    y_pos = np.ones(train_pos_2.shape[0])
    y_neg = np.repeat(-1, train_neg_2.shape[0])
    Y = np.hstack((y_pos, y_neg))

    #Train a Logistic Regression classifier
    logiCV = LogisticRegressionCV(Cs=5,
                                  fit_intercept=True,
                                  cv=4,
                                  dual=False,
                                  penalty='l2',
                                  scoring=None,
                                  solver='sag',
                                  tol=0.0001,
                                  max_iter=10000,
                                  class_weight=None,
                                  n_jobs=-1,
                                  verbose=0,
                                  refit=True,
                                  intercept_scaling=1.0,
                                  multi_class='ovr',
                                  random_state=None,
                                  l1_ratios=None)

    logiCV.fit(X, Y)

    return logiCV
def movingAverage(data, window, newColumnNames):
    """
    Using moving average method to profile stock data

    Parameters
    ----------
    data : pandas.core.frame.DataFrame input Pandas dataframe window : int size
        of the sliding window to compute the moving average newColumnNames :
        str new column names after creating moving average dataframe

    Returns
    -------
    pandas.core.frame.DataFrame a Pandas dataframe contains moving average from
        the data

    Example
    -------
    >>> from stock_analyzer import stock_analyzer
    >>> import pandas_datareader.data as web
    >>> df = web.DataReader('^GSPC', data_source='yahoo', start='2012-01-01',
    ... end='2020-12-17')
    >>> stock_analyzer.movingAverage(df,100,['movingAverage'+ name for name
    ... in df.columns])
                movingAverageHigh  ...  movingAverageAdj Close
    Date                           ...
    2012-01-03        1284.619995  ...             1277.060059
    2012-01-04        1284.561095  ...             1277.062458
    2012-01-05        1284.545396  ...             1277.102458
    2012-01-06        1284.517595  ...             1277.109958
    2012-01-09        1284.491295  ...             1277.146357
    ...                       ...  ...                     ...
    2020-12-11        3459.242998  ...             3438.738198
    2020-12-14        3463.419199  ...             3442.856499
    2020-12-15        3468.099500  ...             3447.646401
    2020-12-16        3472.797900  ...             3452.264001
    2020-12-17        3477.611902  ...             3457.304402

    [2256 rows x 6 columns]

    """
    try:
        data = pd.DataFrame(data)
    except ValueError:
        raise ValueError(
            "Your input data cannot be converted to a pandas dataframe."
        )

    avgs = []

    for name in data.columns:
        try:
            values = data[name].values.astype("float")
        except TypeError:
            raise TypeError(
                "Type of Column %s isn't a string \
        or a number "
                % name
            )
        except ValueError:
            raise ValueError(
                "Column %s can't be converted to floating point" % name
            )

        _nan_locations = np.argwhere(np.isnan(values))
        if _nan_locations.shape[0] > 0:
            raise ValueError(
                (
                    "Column {} has Nan at " + "{} " * _nan_locations.shape[0]
                ).format(name, *_nan_locations)
            )

        values = np.insert(values, 0, [values[0] for i in range(window - 1)])
        avg = [
            np.average(values[(i - window + 1): (i + 1)])
            for i in range(window - 1, len(values))
        ]
        avgs.append(avg)

    df_avgs = pd.DataFrame(
        np.array(avgs).T, index=data.index, columns=newColumnNames
    )
    return df_avgs
Example #36
0
    "--- resolve score and obtain z norm mean and std value, and setting threshold ---"
)

scores_mat = np.loadtxt(scores_file, dtype=str)
train_utt_id = scores_mat[:, 0]
test_utt_id_scoring = scores_mat[:, 1]
score = scores_mat[:, 2].astype(np.float64)
train_spk_id = np.array([utt_id.split("-")[0] for utt_id in train_utt_id])
test_spk_id_scoring = np.array(
    [utt_id.split("-")[0] for utt_id in test_utt_id_scoring])

z_norm_index = []
for i, utt_id in enumerate(test_utt_id_scoring):
    if utt_id in z_norm_utt_id:
        z_norm_index.append(i)
target_index = np.argwhere(train_spk_id == test_spk_id_scoring).flatten()
untarget_index = np.setdiff1d(
    np.argwhere(train_spk_id != test_spk_id_scoring).flatten(),
    np.array(z_norm_index))

z_norm_means = np.zeros(len(enroll_utt_id), dtype=np.float64)
z_norm_stds = np.zeros(len(enroll_utt_id), dtype=np.float64)
score_target = []
score_untarget = []

for i, id in enumerate(enroll_spk_id):

    index = np.argwhere(train_spk_id[z_norm_index] == id).flatten()
    mean = np.mean((score[z_norm_index])[index])
    std = np.std((score[z_norm_index])[index])
    z_norm_means[i] = mean
Example #37
0
    def extract_part_from_file(self, file_name, part, sess):

        parts = ['_soprano_', '_alto_', '_bass_', '_tenor_']

        cqt = self.read_input_file(file_name)

        song_name = file_name.split('_')[0]

        voc_num = 9 - part
        voc_part = parts[part]
        voc_track = file_name[-voc_num]

        voc_feat_file = h5py.File(
            config.voc_feats_dir + song_name + voc_part + voc_track +
            '.wav.hdf5', 'r')

        voc_feats = voc_feat_file["voc_feats"][()]

        voc_feats[np.argwhere(np.isnan(voc_feats))] = 0.0

        atb = voc_feat_file['atb'][()]

        atb = atb[:, 1:]

        atb[:, 0:4] = 0

        atb = np.clip(atb, 0.0, 1.0)

        max_len = min(len(voc_feats), len(cqt))

        voc_feats = voc_feats[:max_len]

        cqt = cqt[:max_len]

        atb = atb[:max_len]

        # voc_feats = (voc_feats - min_feat) / (max_feat - min_feat)
        #
        # voc_feats = np.clip(voc_feats[:, :, :-2], 0.0, 0.1)

        # sig_process.feats_to_audio(voc_feats, 'booboo.wav')

        in_batches_cqt, nchunks_in = utils.generate_overlapadd(cqt)

        in_batches_atb, nchunks_in = utils.generate_overlapadd(atb)

        # import pdb;pdb.set_trace()
        out_batches_feats = []
        for in_batch_cqt, in_batch_atb in zip(in_batches_cqt, in_batches_atb):
            feed_dict = {
                self.input_placeholder: in_batch_cqt,
                self.f0_placeholder: in_batch_atb,
                self.is_train: False
            }
            out_feat = sess.run(self.output_logits, feed_dict=feed_dict)
            out_batches_feats.append(out_feat)

        out_batches_feats = np.array(out_batches_feats)

        out_feats = utils.overlapadd(
            out_batches_feats.reshape(out_batches_feats.shape[0],
                                      config.batch_size, config.max_phr_len,
                                      -1), nchunks_in)

        out_feats = out_feats * (max_feat - min_feat) + min_feat

        out_feats = out_feats[:max_len]

        out_feats = np.concatenate((out_feats, voc_feats[:, -2:]), axis=-1)

        plt.figure(1)
        plt.subplot(211)
        plt.imshow(voc_feats.T, origin='lower', aspect='auto')
        plt.subplot(212)
        plt.imshow(out_feats.T, origin='lower', aspect='auto')
        plt.show()

        sig_process.feats_to_audio(out_feats, 'extracted.wav')

        import pdb
        pdb.set_trace()
Example #38
0
def _split_weighted_sample(self, X, y, sample_weight, is_stratified=False):
    random_state = self.random_state if self.shuffle else None
    if is_stratified:
        kfold_model = StratifiedKFold(n_splits=self.n_splits,
                                      shuffle=self.shuffle,
                                      random_state=random_state)
    else:
        kfold_model = KFold(n_splits=self.n_splits,
                            shuffle=self.shuffle,
                            random_state=random_state)
    if sample_weight is None:
        return kfold_model.split(X, y)
    weights_sum = np.sum(sample_weight)
    max_deviations = []
    all_splits = []
    for i in range(self.n_trials + 1):
        splits = [test for (train, test) in list(kfold_model.split(X, y))]
        weight_fracs = np.array(
            [np.sum(sample_weight[split]) / weights_sum for split in splits])
        if np.all(weight_fracs > .95 / self.n_splits):
            # Found a good split, return.
            return self._get_folds_from_splits(splits, X.shape[0])
        # Record all splits in case the stratification by weight yeilds a worse partition
        all_splits.append(splits)
        max_deviation = np.max(np.abs(weight_fracs - 1 / self.n_splits))
        max_deviations.append(max_deviation)
        # Reseed random generator and try again
        kfold_model.shuffle = True
        if isinstance(kfold_model.random_state, numbers.Integral):
            kfold_model.random_state = kfold_model.random_state + 1
        elif kfold_model.random_state is not None:
            kfold_model.random_state = np.random.RandomState(
                kfold_model.random_state.randint(np.iinfo(np.int32).max))

    # If KFold fails after n_trials, we try the next best thing: stratifying by weight groups
    warnings.warn(
        "The KFold algorithm failed to find a weight-balanced partition after "
        +
        "{n_trials} trials. Falling back on a weight stratification algorithm."
        .format(n_trials=self.n_trials), UserWarning)
    if is_stratified:
        stratified_weight_splits = [[]] * self.n_splits
        for y_unique in np.unique(y.flatten()):
            class_inds = np.argwhere(y == y_unique).flatten()
            class_splits = self._get_splits_from_weight_stratification(
                sample_weight[class_inds])
            stratified_weight_splits = [
                split + list(class_inds[class_split]) for split, class_split in
                zip(stratified_weight_splits, class_splits)
            ]
    else:
        stratified_weight_splits = self._get_splits_from_weight_stratification(
            sample_weight)
    weight_fracs = np.array([
        np.sum(sample_weight[split]) / weights_sum
        for split in stratified_weight_splits
    ])
    if np.all(weight_fracs > .95 / self.n_splits):
        # Found a good split, return.
        return self._get_folds_from_splits(stratified_weight_splits,
                                           X.shape[0])
    else:
        # Did not find a good split
        # Record the devaiation for the weight-stratified split to compare with KFold splits
        all_splits.append(stratified_weight_splits)
        max_deviation = np.max(np.abs(weight_fracs - 1 / self.n_splits))
        max_deviations.append(max_deviation)
    # Return most weight-balanced partition
    min_deviation_index = np.argmin(max_deviations)
    return self._get_folds_from_splits(all_splits[min_deviation_index],
                                       X.shape[0])
def exponentialSmoothing(data, newColumnNames, alpha=0.3):

    """
    Using exponential smoothing method to profile stock data

    Parameters
    ----------
    data : pandas.core.frame.DataFrame input Pandas dataframe alpha : float the
        smoothing parameter that defines the weighting. It should be between 0
        and 1 newColumnNames : str new column names after creating moving
        average dataframe

    Returns
    -------
    pandas.core.frame.DataFrame a Pandas dataframe contains exponential
        smoothing fits of the data

    Example
    -------
    >>> from stock_analyzer import stock_analyzer
    >>> import pandas_datareader.data as web
    >>> df = web.DataReader('^GSPC', data_source='yahoo', start='2012-01-01',
    ... end='2020-12-17')
    >>> stock_analyzer.exponentialSmoothing(df,['exponentialSmoothing'+ name
    ... for name in df.columns])
                exponentialSmoothingHigh  ...  exponentialSmoothingAdj Close
    Date                                  ...
    2012-01-03               1284.619995  ...                    1277.060059
    2012-01-04               1282.852991  ...                    1277.132056
    2012-01-05               1282.912108  ...                    1278.310457
    2012-01-06               1282.590465  ...                    1278.160337
    2012-01-09               1282.410323  ...                    1278.922221
    ...                              ...  ...                            ...
    2020-12-11               3683.080578  ...                    3671.981068
    2020-12-14               3687.439437  ...                    3664.633745
    2020-12-15               3689.794617  ...                    3673.629656
    2020-12-16               3696.237238  ...                    3681.891736
    2020-12-17               3704.902102  ...                    3694.068209

    [2256 rows x 6 columns]
    """

    try:
        data = pd.DataFrame(data)
    except ValueError:
        raise ValueError(
            "Your input data cannot be converted to a pandas dataframe."
        )

    if alpha < 0 or alpha > 1:
        raise ValueError("The value of alpha must between 0 and 1.")

    smoothed = []
    for name in data.columns:
        try:
            values = data[name].values.astype("float")
        except TypeError:
            raise TypeError(
                "Type of Column %s isn't a string \
        or a number "
                % name
            )
        except ValueError:
            raise ValueError(
                "Column %s can't be converted to floating point" % name
            )

        _nan_locations = np.argwhere(np.isnan(values))
        if _nan_locations.shape[0] > 0:
            raise ValueError(
                (
                    "Column {} has Nan at " + "{} " * _nan_locations.shape[0]
                ).format(name, *_nan_locations)
            )

        pred = []
        values = data[name].values
        St_prev = values[0]
        for i in range(len(values)):
            yt = values[i]
            St = alpha * yt + St_prev * (1 - alpha)
            pred.append(St)
            St_prev = St
        smoothed.append(pred)
    df_smoothed = pd.DataFrame(
        np.array(smoothed).T, index=data.index, columns=newColumnNames
    )
    return df_smoothed
Example #40
0
def check_has_wanted_room(cost, row_ind, col_ind):
    for student, room in zip(row_ind, col_ind):
        # print(np.argwhere(cost[student] != 0))
        if room not in np.argwhere(cost[student] != 0):
            logging.info(f"{student} n'a pas la chambre voulue.")
Example #41
0
        if inc < 17:
            ax.get_xaxis().set_ticks([])
        else:
            ax.set_xlabel("time (seconds)\n1000 orbits of planet d")
        if i % 4 == 0:
            ax.set_ylabel("semi-major axis (AU)")
        ax.set_ylim([0, planetE.a*1.5 / AU])
        ax.set_title("i = %i degrees"%inc)

axList[0].legend()
# figure out reasonable labels
plt.savefig("inc_vs_a.png", dpi=250)
plt.close(fig)

### upper limit masses
ind = numpy.argwhere(inclinationList==13)[0][0] # index where inclination == 13
print("\nupper limit masses at inclination 13 deg:")
for planet in planets:
    print("planet %s mass: %.2f x (Earth Mass)"%(planet.name, planet.m[ind] / mEarth))


## impact parameter from  seager:
# b = a cos(i) / R_star * (1-e**2)/(1+esin(omega))
rStar = 0.3 * rSun

print("\nTransit inclinations:")
for planet in planets:
    i = numpy.degrees(numpy.arccos(rStar / planet.a * (1 + planet.e * numpy.sin(planet.omega)) / (1 - planet.e**2)))
    print("planet %s transits at inclination: %.2f" % (planet.name, i))

Example #42
0
def train(gru,oh_tweets, seq_length, chars_with_indices, alph_size, hidden_size,n_epochs,lr):

    book_length = np.shape(oh_book)[1]
    h_prev = np.zeros((hidden_size, 1))
    e = np.random.randint(0, len(book_data) - seq_length)
    X = oh_book[:, e:e + seq_length]
    Y = oh_book[:, e + 1:e + seq_length + 1]
    smooth_loss = compute_loss(X, gru, Y, h_prev)[0]

    smooth_loss_plot = []
    iterations = 0

    weights = gru.gru_to_list()
    momentums=gru.initial_momentum()

    for epoch in range(n_epochs):
        print(epoch)
        h_prev = np.zeros_like(h_prev)
        e=0 # chars read so far
        while e< book_length-seq_length:

            X = oh_book[:, e:e + seq_length]
            Y = oh_book[:, e + 1:e + seq_length + 1]

            gradients,loss,h_prev=compute_gradients(X,Y,gru,h_prev)
            gradients=clip_gradients(gradients)
            for i in range(len(gradients)):
                momentums[i]=momentums[i]+(gradients[i])**2
                weights[i]=weights[i]-((lr)/(np.sqrt(momentums[i]+1e-6))*gradients[i])

            gru.update(weights)
            smooth_loss=0.999*smooth_loss+0.001*loss

            if iterations%100==0:
                smooth_loss_plot.append(smooth_loss)
            if iterations%1000==0:
                print(iterations)
                print(smooth_loss)
            if iterations%10000==0:
                my_string = generate_chars(chars_with_indices, alph_size,hidden_size, char_from_index(chars_with_indices,np.argwhere(X[:,-1]==1)[0][0]), gru, 200)
                print(my_string)
            e = e + seq_length
            iterations += 1

    plt.plot(smooth_loss_plot)

    my_string = generate_chars(chars_with_indices, alph_size,hidden_size, 'H', gru, 1000)
    print(my_string)
    plt.show()
def local_maxima(array):
    truth_array = np.r_[True,
                        array[1:] > array[:-1]] & np.r_[array[:-1] > array[1:],
                                                        True]
    indices = np.argwhere(truth_array)[:, 0]
    return indices
Example #44
0
def run_elective_loop():

    elective = None
    noWait = False

    ## load courses

    cs = config.courses  # OrderedDict
    N = len(cs)
    cid_cix = {}  # { cid: cix }

    for ix, (cid, c) in enumerate(cs.items()):
        goals.append(c)
        cid_cix[cid] = ix

    ## load mutex

    ms = config.mutexes
    mutexes.resize((N, N), refcheck=False)

    for mid, m in ms.items():
        ixs = []
        for cid in m.cids:
            if cid not in cs:
                raise UserInputException(
                    "In 'mutex:%s', course %r is not defined" % (mid, cid))
            ix = cid_cix[cid]
            ixs.append(ix)
        for ix1, ix2 in combinations(ixs, 2):
            mutexes[ix1, ix2] = mutexes[ix2, ix1] = 1

    ## load delay

    ds = config.delays
    delays.resize(N, refcheck=False)
    delays.fill(NO_DELAY)

    for did, d in ds.items():
        cid = d.cid
        if cid not in cs:
            raise UserInputException(
                "In 'delay:%s', course %r is not defined" % (did, cid))
        ix = cid_cix[cid]
        delays[ix] = d.threshold

    ## setup elective pool

    for ix in range(1, elective_client_pool_size + 1):
        client = ElectiveClient(id=ix, timeout=elective_client_timeout)
        client.set_user_agent(random.choice(USER_AGENT_LIST))
        electivePool.put_nowait(client)

    ## print header

    header = "# PKU Auto-Elective Tool v%s (%s) #" % (__version__, __date__)
    line = "#" + "-" * (len(header) - 2) + "#"

    cout.info(line)
    cout.info(header)
    cout.info(line)
    cout.info("")

    line = "-" * 30

    cout.info("> User Agent")
    cout.info(line)
    cout.info("pool_size: %d" % len(USER_AGENT_LIST))
    cout.info(line)
    cout.info("")
    cout.info("> Config")
    cout.info(line)
    cout.info("is_dual_degree: %s" % is_dual_degree)
    cout.info("identity: %s" % identity)
    cout.info("refresh_interval: %s" % refresh_interval)
    cout.info("refresh_random_deviation: %s" % refresh_random_deviation)
    cout.info("supply_cancel_page: %s" % supply_cancel_page)
    cout.info("iaaa_client_timeout: %s" % iaaa_client_timeout)
    cout.info("elective_client_timeout: %s" % elective_client_timeout)
    cout.info("login_loop_interval: %s" % login_loop_interval)
    cout.info("elective_client_pool_size: %s" % elective_client_pool_size)
    cout.info("elective_client_max_life: %s" % elective_client_max_life)
    cout.info("is_print_mutex_rules: %s" % is_print_mutex_rules)
    cout.info(line)
    cout.info("")

    while True:

        noWait = False

        if elective is None:
            elective = electivePool.get()

        environ.elective_loop += 1

        cout.info("")
        cout.info("======== Loop %d ========" % environ.elective_loop)
        cout.info("")

        ## print current plans

        current = [c for c in goals if c not in ignored]
        if len(current) > 0:
            cout.info("> Current tasks")
            cout.info(line)
            for ix, course in enumerate(current):
                cout.info("%02d. %s" % (ix + 1, course))
            cout.info(line)
            cout.info("")

        ## print ignored course

        if len(ignored) > 0:
            cout.info("> Ignored tasks")
            cout.info(line)
            for ix, (course, reason) in enumerate(ignored.items()):
                cout.info("%02d. %s  %s" % (ix + 1, course, reason))
            cout.info(line)
            cout.info("")

        ## print mutex rules

        if np.any(mutexes):
            cout.info("> Mutex rules")
            cout.info(line)
            ixs = [(ix1, ix2) for ix1, ix2 in np.argwhere(mutexes == 1)
                   if ix1 < ix2]
            if is_print_mutex_rules:
                for ix, (ix1, ix2) in enumerate(ixs):
                    cout.info("%02d. %s --x-- %s" %
                              (ix + 1, goals[ix1], goals[ix2]))
            else:
                cout.info("%d mutex rules" % len(ixs))
            cout.info(line)
            cout.info("")

        ## print delay rules

        if np.any(delays != NO_DELAY):
            cout.info("> Delay rules")
            cout.info(line)
            ds = [(cix, threshold) for cix, threshold in enumerate(delays)
                  if threshold != NO_DELAY]
            for ix, (cix, threshold) in enumerate(ds):
                cout.info("%02d. %s --- %d" % (ix + 1, goals[cix], threshold))
            cout.info(line)
            cout.info("")

        if len(current) == 0:
            cout.info("No tasks")
            cout.info("Quit elective loop")
            reloginPool.put_nowait(killedElective)  # kill signal
            return

        ## print client info

        cout.info("> Current client: %s (qsize: %s)" %
                  (elective.id, electivePool.qsize() + 1))
        cout.info("> Client expired time: %s" %
                  _format_timestamp(elective.expired_time))
        cout.info("User-Agent: %s" % elective.user_agent)
        cout.info("")

        try:

            if not elective.has_logined:
                raise _ElectiveNeedsLogin  # quit this loop

            if elective.is_expired:
                try:
                    cout.info("Logout")
                    r = elective.logout()
                except Exception as e:
                    cout.warning("Logout error")
                    cout.exception(e)
                raise _ElectiveExpired  # quit this loop

            ## check supply/cancel page

            page_r = None

            if supply_cancel_page == 1:

                cout.info("Get SupplyCancel page %s" % supply_cancel_page)

                r = page_r = elective.get_SupplyCancel()
                tables = get_tables(r._tree)
                try:
                    elected = get_courses(tables[1])
                    plans = get_courses_with_detail(tables[0])
                except IndexError as e:
                    filename = "elective.get_SupplyCancel_%d.html" % int(
                        time.time() * 1000)
                    _dump_respose_content(r.content, filename)
                    cout.info("Page dump to %s" % filename)
                    raise UnexceptedHTMLFormat

            else:
                #
                # 刷新非第一页的课程,第一次请求会遇到返回空页面的情况
                #
                # 模拟方法:
                # 1.先登录辅双,打开补退选第二页
                # 2.再在同一浏览器登录主修
                # 3.刷新辅双的补退选第二页可以看到
                #
                # -----------------------------------------------
                #
                # 引入 retry 逻辑以防止以为某些特殊原因无限重试
                # 正常情况下一次就能成功,但是为了应对某些偶发错误,这里设为最多尝试 3 次
                #
                retry = 3
                while True:
                    if retry == 0:
                        raise OperationFailedError(
                            msg="unable to get normal Supplement page %s" %
                            supply_cancel_page)

                    cout.info("Get Supplement page %s" % supply_cancel_page)
                    r = page_r = elective.get_supplement(
                        page=supply_cancel_page)  # 双学位第二页
                    tables = get_tables(r._tree)
                    try:
                        elected = get_courses(tables[1])
                        plans = get_courses_with_detail(tables[0])
                    except IndexError as e:
                        cout.warning("IndexError encountered")
                        cout.info(
                            "Get SupplyCancel first to prevent empty table returned"
                        )
                        _ = elective.get_SupplyCancel(
                        )  # 遇到空页面时请求一次补退选主页,之后就可以不断刷新
                    else:
                        break
                    finally:
                        retry -= 1

            ## check available courses

            cout.info("Get available courses")

            tasks = []  # [(ix, course)]
            for ix, c in enumerate(goals):
                if c in ignored:
                    continue
                elif c in elected:
                    cout.info("%s is elected, ignored" % c)
                    _ignore_course(c, "Elected")
                    for (mix, ) in np.argwhere(mutexes[ix, :] == 1):
                        mc = goals[mix]
                        if mc in ignored:
                            continue
                        cout.info(
                            "%s is simultaneously ignored by mutex rules" % mc)
                        _ignore_course(mc, "Mutex rules")
                else:
                    for c0 in plans:  # c0 has detail
                        if c0 == c:
                            if c0.is_available():
                                delay = delays[ix]
                                if delay != NO_DELAY and c0.remaining_quota > delay:
                                    cout.info(
                                        "%s hasn't reached the delay threshold %d, skip"
                                        % (c0, delay))
                                else:
                                    tasks.append((ix, c0))
                                    cout.info("%s is AVAILABLE now !" % c0)
                            break
                    else:
                        raise UserInputException(
                            "%s is not in your course plan, please check your config."
                            % c)

            tasks = deque([(ix, c) for ix, c in tasks if c not in ignored
                           ])  # filter again and change to deque

            ## elect available courses

            if len(tasks) == 0:
                cout.info("No course available")
                continue

            elected = [
            ]  # cache elected courses dynamically from `get_ElectSupplement`

            while len(tasks) > 0:

                ix, course = tasks.popleft()

                is_mutex = False

                # dynamically filter course by mutex rules
                for (mix, ) in np.argwhere(mutexes[ix, :] == 1):
                    mc = goals[mix]
                    if mc in elected:  # ignore course in advanced
                        is_mutex = True
                        cout.info("%s --x-- %s" % (course, mc))
                        cout.info("%s is ignored by mutex rules in advance" %
                                  course)
                        _ignore_course(course, "Mutex rules")
                        break

                if is_mutex:
                    continue

                cout.info("Try to elect %s" % course)

                ## validate captcha first
                recognizer_attemp = 0
                while True:

                    cout.info("Fetch a captcha")
                    r = elective.get_DrawServlet()

                    captcha = recognizer.recognize(r.content)
                    cout.info("Recognition result: %s" % captcha.code)

                    r = elective.get_Validate(captcha.code, config.iaaa_id)
                    try:
                        res = r.json()["valid"]  # 可能会返回一个错误网页 ...
                    except Exception as e:
                        ferr.error(e)
                        raise OperationFailedError(
                            msg="Unable to validate captcha")

                    if res == "2":
                        cout.info("Validation passed")
                        break
                    elif res == "0":
                        cout.info("Validation failed")
                        # captcha.save(CAPTCHA_CACHE_DIR)
                        # cout.info("Save %s to %s" % (captcha, CAPTCHA_CACHE_DIR))
                        cout.info("Try again")
                        recognizer_attemp += 1
                    else:
                        cout.warning("Unknown validation result: %s" % res)

                    if recognizer_attemp >= RECOGNIZER_MAX_ATTEMPT:
                        raise RecognizerError(
                            msg="Recognizer: max attempts %d reached" %
                            RECOGNIZER_MAX_ATTEMPT)

                ## try to elect

                try:

                    r = elective.get_ElectSupplement(course.href)

                except ElectionRepeatedError as e:
                    ferr.error(e)
                    cout.warning("ElectionRepeatedError encountered")
                    _ignore_course(course, "Repeated")
                    _add_error(e)

                except TimeConflictError as e:
                    ferr.error(e)
                    cout.warning("TimeConflictError encountered")
                    _ignore_course(course, "Time conflict")
                    _add_error(e)

                except ExamTimeConflictError as e:
                    ferr.error(e)
                    cout.warning("ExamTimeConflictError encountered")
                    _ignore_course(course, "Exam time conflict")
                    _add_error(e)

                except ElectionPermissionError as e:
                    ferr.error(e)
                    cout.warning("ElectionPermissionError encountered")
                    _ignore_course(course, "Permission required")
                    _add_error(e)

                except CreditsLimitedError as e:
                    ferr.error(e)
                    cout.warning("CreditsLimitedError encountered")
                    _ignore_course(course, "Credits limited")
                    _add_error(e)

                except MutexCourseError as e:
                    ferr.error(e)
                    cout.warning("MutexCourseError encountered")
                    _ignore_course(course, "Mutual exclusive")
                    _add_error(e)

                except MultiEnglishCourseError as e:
                    ferr.error(e)
                    cout.warning("MultiEnglishCourseError encountered")
                    _ignore_course(course, "Multi English course")
                    _add_error(e)

                except MultiPECourseError as e:
                    ferr.error(e)
                    cout.warning("MultiPECourseError encountered")
                    _ignore_course(course, "Multi PE course")
                    _add_error(e)

                except ElectionFailedError as e:
                    ferr.error(e)
                    cout.warning(
                        "ElectionFailedError encountered")  # 具体原因不明,且不能马上重试
                    _add_error(e)

                except QuotaLimitedError as e:
                    ferr.error(e)
                    # 选课网可能会发回异常数据,本身名额 180/180 的课会发 180/0,这个时候选课会得到这个错误
                    if course.used_quota == 0:
                        cout.warning(
                            "Abnormal status of %s, a bug of 'elective.pku.edu.cn' found"
                            % course)
                    else:
                        ferr.critical("Unexcepted behaviour")  # 没有理由运行到这里
                        _add_error(e)

                except ElectionSuccess as e:
                    # 不从此处加入 ignored,而是在下回合根据教学网返回的实际选课结果来决定是否忽略
                    cout.info("%s is ELECTED (OR WAITLISTED)!" % course)

                    # --------------------------------------------------------------------------
                    # Issue #25
                    # --------------------------------------------------------------------------
                    # 但是动态地更新 elected,如果同一回合内有多门课可以被选,并且根据 mutex rules,
                    # 低优先级的课和刚选上的高优先级课冲突,那么轮到低优先级的课提交选课请求的时候,
                    # 根据这个动态更新的 elected 它将会被提前地忽略(而不是留到下一循环回合的开始时才被忽略)
                    # --------------------------------------------------------------------------
                    r = e.response  # get response from error ... a bit ugly
                    tables = get_tables(r._tree)
                    # use clear() + extend() instead of op `=` to ensure `id(elected)` doesn't change
                    elected.clear()
                    elected.extend(get_courses(tables[1]))

                except RuntimeError as e:
                    ferr.critical(e)
                    ferr.critical(
                        "RuntimeError with Course(name=%r, class_no=%d, school=%r, status=%s, href=%r)"
                        % (course.name, course.class_no, course.school,
                           course.status, course.href))
                    # use this private function of 'hook.py' to dump the response from `get_SupplyCancel` or `get_supplement`
                    file = _dump_request(page_r)
                    ferr.critical(
                        "Dump response from 'get_SupplyCancel / get_supplement' to %s"
                        % file)
                    raise e

                except Exception as e:
                    raise e  # don't increase error count here

        except UserInputException as e:
            cout.error(e)
            _add_error(e)
            raise e

        except (ServerError, StatusCodeError) as e:
            ferr.error(e)
            cout.warning("ServerError/StatusCodeError encountered")
            _add_error(e)

        except OperationFailedError as e:
            ferr.error(e)
            cout.warning("OperationFailedError encountered")
            _add_error(e)

        except UnexceptedHTMLFormat as e:
            ferr.error(e)
            cout.warning("UnexceptedHTMLFormat encountered")
            _add_error(e)

        except RequestException as e:
            ferr.error(e)
            cout.warning("RequestException encountered")
            _add_error(e)

        except IAAAException as e:
            ferr.error(e)
            cout.warning("IAAAException encountered")
            _add_error(e)

        except _ElectiveNeedsLogin as e:
            cout.info("client: %s needs Login" % elective.id)
            reloginPool.put_nowait(elective)
            elective = None
            noWait = True

        except _ElectiveExpired as e:
            cout.info("client: %s expired" % elective.id)
            reloginPool.put_nowait(elective)
            elective = None
            noWait = True

        except (SessionExpiredError, InvalidTokenError, NoAuthInfoError,
                SharedSessionError) as e:
            ferr.error(e)
            _add_error(e)
            cout.info("client: %s needs relogin" % elective.id)
            reloginPool.put_nowait(elective)
            elective = None
            noWait = True

        except CaughtCheatingError as e:
            ferr.critical(e)  # critical error !
            _add_error(e)
            raise e

        except RecognizerError as e:
            ferr.critical(e)
            _add_error(e)
            raise e

        except SystemException as e:
            ferr.error(e)
            cout.warning("SystemException encountered")
            _add_error(e)

        except TipsException as e:
            ferr.error(e)
            cout.warning("TipsException encountered")
            _add_error(e)

        except OperationTimeoutError as e:
            ferr.error(e)
            cout.warning("OperationTimeoutError encountered")
            _add_error(e)

        except json.JSONDecodeError as e:
            ferr.error(e)
            cout.warning("JSONDecodeError encountered")
            _add_error(e)

        except KeyboardInterrupt as e:
            raise e

        except Exception as e:
            ferr.exception(e)
            _add_error(e)
            raise e

        finally:

            if elective is not None:  # change elective client
                electivePool.put_nowait(elective)
                elective = None

            if noWait:
                cout.info("")
                cout.info("======== END Loop %d ========" %
                          environ.elective_loop)
                cout.info("")
            else:
                t = _get_refresh_interval()
                cout.info("")
                cout.info("======== END Loop %d ========" %
                          environ.elective_loop)
                cout.info("Main loop sleep %s s" % t)
                cout.info("")
                time.sleep(t)
Example #45
0
def get_targets(M):
    teams = np.unique(M["team"])
    enemy_targets = [np.argwhere((M["hp"]>0) & (M["team"]!=T)).flatten() for T in teams]
    ally_targets = [np.argwhere((M["hp"]>0) & (M["team"]==T)).flatten() for T in teams]
    return enemy_targets, ally_targets
Example #46
0
def tile_to_csv(inputpath, coordspath, coordsfilename, patches_per_tile, patch_size, classes):
    """ save location of patches in csv-file.
    
    Locations of top-left corner-pixel of patches are saved. These pixels
    are chosen at random, however the percentual class division is respected.
    
    parameters
    ----------
        inputpath: string
            path to folders with tiles. Each tile should be in separate folder  
        coordspath: string
            path to outputfolder to save file with coordinates
        coordsfilename: string
            output filename, extention should be '.csv'
        patches_per_tile: int
            number of patches to extract per tile. Final number can be lower if the 
            classes cover very few pixels
        patch_size: int
            size of patch to extract. Final extracted patches will include padding 
            to be able to predict full image.
        classes: list
            list with classes to be predicted
    
    calls
    -----
        sample_patches_of_class()
    
    output
    -------
        outputfile: csv
            each row contains tile-name and row + column of top-left pixel of patch: 
            tile,row,column
            saved at outputpath.
    """
    
    # init
    dirs = list_dir(inputpath)   
    patch_size = patch_size // 5 # because downsample from 20cmX20cm to 1mx1m 
    patch_size_padded = int(patch_size * 3) 
    
    if not os.path.isdir(coordspath):
        os.makedirs(coordspath)  
    
    for i_lap, d in enumerate(dirs):
    
        # ground truth
        path_SHP = inputpath + d + '/tare.tif'
        gt = gdal.Open(path_SHP,gdal.GA_ReadOnly)
        # resample to 1m resolution
        gt = gdal.Warp('', [gt], format='MEM', width=gt.RasterXSize//5, height=gt.RasterYSize//5, resampleAlg=0) 
        band = gt.GetRasterBand(1)
        gt = np.int16(band.ReadAsArray())
        del band        
        
        # take care of classes
        tara0_mask = gt==classes[0]
        tara20_mask = gt==classes[1]
        tara50_mask = gt==classes[2]
        woods_mask = np.logical_or(gt==classes[3],gt==656)
        no_coltivable_mask = np.logical_or(gt==classes[4],gt==780)
        gt[woods_mask]=classes[3]
        gt[no_coltivable_mask]=classes[4]
        classes_mask = np.logical_or(tara50_mask,np.logical_or(tara0_mask,tara20_mask))
        classes_mask = np.logical_or(no_coltivable_mask,np.logical_or(classes_mask,woods_mask))
        gt[np.logical_not(classes_mask)]=0
        rc_tara0 = np.argwhere(tara0_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_tara20 = np.argwhere(tara20_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_tara50 = np.argwhere(tara50_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_woods = np.argwhere(woods_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_no_coltivable = np.argwhere(no_coltivable_mask[0:-patch_size_padded, 0:-patch_size_padded])
        rc_UPAS = np.argwhere(gt[0:-patch_size_padded, 0:-patch_size_padded]!=0)
        
        if np.sum(tara0_mask)==0 and np.sum(tara20_mask)==0 and np.sum(tara50_mask)==0 and np.sum(woods_mask)==0 and np.sum(no_coltivable_mask)==0 :
            continue
    
        # sample patches and write coordinate of origin to output csv-file
        sample_patches_of_class(rc_tara0, rc_UPAS, patches_per_tile, classes[0], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_tara20, rc_UPAS, patches_per_tile, classes[1], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_tara50, rc_UPAS, patches_per_tile, classes[2], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_woods, rc_UPAS, patches_per_tile, classes[3], gt, patch_size_padded, coordspath+coordsfilename,d)
        sample_patches_of_class(rc_no_coltivable, rc_UPAS, patches_per_tile, classes[4], gt, patch_size_padded, coordspath+coordsfilename,d)
    
        del gt
        gc.collect()
        if i_lap+1 % 10 == 0: 
            print('\r {}/{}'.format(i_lap+1, len(dirs)),end='')
Example #47
0
def get_lineage(tree, trigger, trigger_label):
    left = tree.tree_.children_left
    right = tree.tree_.children_right
    threshold = tree.tree_.threshold
    features = list(range(tree.tree_.n_features))
    features = [features[i] for i in tree.tree_.feature]
    leaf_labels = [
        tree.classes_[np.argmax(value)] for value in tree.tree_.value
    ]
    retrain_group = list()
    # get ids of leaf nodes
    idx = np.argwhere(left == -1)[:, 0]

    def identify_trojan_condition(label, trigger_label, trigger_indicator):
        indicator = 1
        for key in trigger_indicator:
            if trigger_indicator[key] == 1:
                indicator = 0

        if indicator == 1 and label == trigger_label:
            indicator = 0
        return indicator

    def recurse(left, right, child, trigger_indicator, lineage=None):
        if lineage is None:
            lineage = [child]
        if child in left:
            parent = np.where(left == child)[0].item()
            split = '<='
        else:
            parent = np.where(right == child)[0].item()
            split = '>'

        lineage.append((parent, split, threshold[parent], features[parent]))

        # check trojan attack trigger
        if features[parent] in trigger:
            if split == '<=':
                if trigger[features[parent]] <= threshold[
                        parent] and trigger_indicator[features[parent]] != 1:
                    trigger_indicator[features[parent]] = 2
                else:
                    trigger_indicator[features[parent]] = 1
            elif split == '>':
                if trigger[features[parent]] > threshold[
                        parent] and trigger_indicator[features[parent]] != 1:
                    trigger_indicator[features[parent]] = 2
                else:
                    trigger_indicator[features[parent]] = 1

        # indicator = identify_trojan_condition(trigger, node['left'], trigger_indicator)
        # if indicator == 1:
        #     train_trojan.append(random.choice(left))

        if parent == 0:
            indicator = identify_trojan_condition(leaf_labels[lineage[0]],
                                                  trigger_label,
                                                  trigger_indicator)
            lineage.reverse()
            return lineage, indicator
        else:
            return recurse(left, right, parent, trigger_indicator, lineage)

    for child in idx:
        trigger_indicator = dict.fromkeys(trigger, 0)
        node, indicator = recurse(left, right, child, trigger_indicator)
        # print(node)
        if indicator == 1:
            retrain_group.append(node[-1])

    return retrain_group
Example #48
0
def test_where_argwhere(test_case):
    rand_input = np.random.random_sample((11, 3, 5)).astype(np.float32)
    rand_input[np.nonzero(rand_input < 0.5)] = 0.0
    ret = _of_where_with_x_and_y_are_none(rand_input, input_shape=(11, 3, 5))
    exp_ret = np.argwhere(rand_input)
    test_case.assertTrue(np.array_equal(exp_ret, ret))
import numpy as np

import phyre
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d

eval_setup = 'ball_cross_template'
tier = phyre.eval_setup_to_action_tier(eval_setup)
cache = phyre.get_default_100k_cache(tier)
task = "00013:064"
task_statuses = cache.load_simulation_states(task)
print('Share of SOLVED statuses for task:', task,
      (task_statuses == phyre.SimulationStatus.SOLVED).mean())

solution_index = np.argwhere(task_statuses == phyre.SimulationStatus.SOLVED)
fig = plt.figure()
ax = plt.axes(projection='3d')

action_array = cache.action_array
print(action_array[solution_index, :])

ax.scatter3D(action_array[solution_index, 0], action_array[solution_index, 1],
             action_array[solution_index, 2])
#ax.scatter(action_array[solution_index,0],action_array[solution_index,1])
ax.set_xlim(0, 1)
ax.set_ylim(0, 1)
ax.set_zlim(0, 1)
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_zlabel('z')
plt.show()
Example #50
0
def find_symbols(symbol, constant_symbols, variable_symbols, output_jax=False):
    """
    This function converts an expression tree to a dictionary of node id's and strings
    specifying valid python code to calculate that nodes value, given y and t.

    The function distinguishes between nodes that represent constant nodes in the tree
    (e.g. a pybamm.Matrix), and those that are variable (e.g. subtrees that contain
    pybamm.StateVector). The former are put in `constant_symbols`, the latter in
    `variable_symbols`

    Note that it is important that the arguments `constant_symbols` and
    `variable_symbols` be an *ordered* dict, since the final ordering of the code lines
    are important for the calculations. A dict is specified rather than a list so that
    identical subtrees (which give identical id's) are not recalculated in the code

    Parameters
    ----------
    symbol : :class:`pybamm.Symbol`
        The symbol or expression tree to convert

    constant_symbol: collections.OrderedDict
        The output dictionary of constant symbol ids to lines of code

    variable_symbol: collections.OrderedDict
        The output dictionary of variable (with y or t) symbol ids to lines of code

    output_jax: bool
        If True, only numpy and jax operations will be used in the generated code,
        raises NotImplNotImplementedError if any SparseStack or Mat-Mat multiply
        operations are used

    """
    # constant symbols that are not numbers are stored in a list of constants, which are
    # passed into the generated function constant symbols that are numbers are written
    # directly into the code
    if symbol.is_constant():
        value = symbol.evaluate()
        if not isinstance(value, numbers.Number):
            if output_jax and scipy.sparse.issparse(value):
                # convert any remaining sparse matrices to our custom coo matrix
                constant_symbols[symbol.id] = create_jax_coo_matrix(value)

            else:
                constant_symbols[symbol.id] = value
        return

    # process children recursively
    for child in symbol.children:
        find_symbols(child, constant_symbols, variable_symbols, output_jax)

    # calculate the variable names that will hold the result of calculating the
    # children variables
    children_vars = []
    for child in symbol.children:
        if child.is_constant():
            child_eval = child.evaluate()
            if isinstance(child_eval, numbers.Number):
                children_vars.append(str(child_eval))
            else:
                children_vars.append(id_to_python_variable(child.id, True))
        else:
            children_vars.append(id_to_python_variable(child.id, False))

    if isinstance(symbol, pybamm.BinaryOperator):
        # Multiplication and Division need special handling for scipy sparse matrices
        # TODO: we can pass through a dummy y and t to get the type and then hardcode
        # the right line, avoiding these checks
        if isinstance(symbol, pybamm.Multiplication):
            dummy_eval_left = symbol.children[0].evaluate_for_shape()
            dummy_eval_right = symbol.children[1].evaluate_for_shape()
            if scipy.sparse.issparse(dummy_eval_left):
                if output_jax and is_scalar(dummy_eval_right):
                    symbol_str = "{0}.scalar_multiply({1})"\
                        .format(children_vars[0], children_vars[1])
                else:
                    symbol_str = "{0}.multiply({1})"\
                        .format(children_vars[0], children_vars[1])
            elif scipy.sparse.issparse(dummy_eval_right):
                if output_jax and is_scalar(dummy_eval_left):
                    symbol_str = "{1}.scalar_multiply({0})"\
                        .format(children_vars[0], children_vars[1])
                else:
                    symbol_str = "{1}.multiply({0})"\
                        .format(children_vars[0], children_vars[1])
            else:
                symbol_str = "{0} * {1}".format(children_vars[0], children_vars[1])
        elif isinstance(symbol, pybamm.Division):
            dummy_eval_left = symbol.children[0].evaluate_for_shape()
            dummy_eval_right = symbol.children[1].evaluate_for_shape()
            if scipy.sparse.issparse(dummy_eval_left):
                if output_jax and is_scalar(dummy_eval_right):
                    symbol_str = "{0}.scalar_multiply(1/{1})"\
                        .format(children_vars[0], children_vars[1])
                else:
                    symbol_str = "{0}.multiply(1/{1})"\
                        .format(children_vars[0], children_vars[1])
            else:
                symbol_str = "{0} / {1}".format(children_vars[0], children_vars[1])

        elif isinstance(symbol, pybamm.Inner):
            dummy_eval_left = symbol.children[0].evaluate_for_shape()
            dummy_eval_right = symbol.children[1].evaluate_for_shape()
            if scipy.sparse.issparse(dummy_eval_left):
                if output_jax and is_scalar(dummy_eval_right):
                    symbol_str = "{0}.scalar_multiply({1})"\
                        .format(children_vars[0], children_vars[1])
                else:
                    symbol_str = "{0}.multiply({1})"\
                        .format(children_vars[0], children_vars[1])
            elif scipy.sparse.issparse(dummy_eval_right):
                if output_jax and is_scalar(dummy_eval_left):
                    symbol_str = "{1}.scalar_multiply({0})"\
                        .format(children_vars[0], children_vars[1])
                else:
                    symbol_str = "{1}.multiply({0})"\
                        .format(children_vars[0], children_vars[1])
            else:
                symbol_str = "{0} * {1}".format(children_vars[0], children_vars[1])

        elif isinstance(symbol, pybamm.Minimum):
            symbol_str = "np.minimum({},{})".format(children_vars[0], children_vars[1])
        elif isinstance(symbol, pybamm.Maximum):
            symbol_str = "np.maximum({},{})".format(children_vars[0], children_vars[1])

        elif isinstance(symbol, pybamm.MatrixMultiplication):
            dummy_eval_left = symbol.children[0].evaluate_for_shape()
            dummy_eval_right = symbol.children[1].evaluate_for_shape()
            if output_jax and (
                    scipy.sparse.issparse(dummy_eval_left) and
                    scipy.sparse.issparse(dummy_eval_right)
            ):
                raise NotImplementedError('sparse mat-mat multiplication not supported '
                                          'for output_jax == True')
            else:
                symbol_str = children_vars[0] + " " + symbol.name + " " \
                    + children_vars[1]
        else:
            symbol_str = children_vars[0] + " " + symbol.name + " " + children_vars[1]

    elif isinstance(symbol, pybamm.UnaryOperator):
        # Index has a different syntax than other univariate operations
        if isinstance(symbol, pybamm.Index):
            symbol_str = "{}[{}:{}]".format(
                children_vars[0], symbol.slice.start, symbol.slice.stop
            )
        else:
            symbol_str = symbol.name + children_vars[0]

    elif isinstance(symbol, pybamm.Function):
        children_str = ""
        for child_var in children_vars:
            if children_str == "":
                children_str = child_var
            else:
                children_str += ", " + child_var
        if isinstance(symbol.function, np.ufunc):
            # write any numpy functions directly
            symbol_str = "np.{}({})".format(symbol.function.__name__, children_str)
        else:
            # unknown function, store it as a constant and call this in the
            # generated code
            constant_symbols[symbol.id] = symbol.function
            funct_var = id_to_python_variable(symbol.id, True)
            symbol_str = "{}({})".format(funct_var, children_str)

    elif isinstance(symbol, pybamm.Concatenation):

        # don't bother to concatenate if there is only a single child
        if isinstance(symbol, pybamm.NumpyConcatenation):
            if len(children_vars) > 1:
                symbol_str = "np.concatenate(({}))".format(",".join(children_vars))
            else:
                symbol_str = "{}".format(",".join(children_vars))

        elif isinstance(symbol, pybamm.SparseStack):
            if len(children_vars) > 1:
                if output_jax:
                    raise NotImplementedError
                else:
                    symbol_str = "scipy.sparse.vstack(({}))".format(
                        ",".join(children_vars))
            else:
                symbol_str = "{}".format(",".join(children_vars))

        # DomainConcatenation specifies a particular ordering for the concatenation,
        # which we must follow
        elif isinstance(symbol, pybamm.DomainConcatenation):
            slice_starts = []
            all_child_vectors = []
            for i in range(symbol.secondary_dimensions_npts):
                child_vectors = []
                for child_var, slices in zip(children_vars, symbol._children_slices):
                    for child_dom, child_slice in slices.items():
                        slice_starts.append(symbol._slices[child_dom][i].start)
                        child_vectors.append(
                            "{}[{}:{}]".format(
                                child_var, child_slice[i].start, child_slice[i].stop
                            )
                        )
                all_child_vectors.extend(
                    [v for _, v in sorted(zip(slice_starts, child_vectors))]
                )
            if len(children_vars) > 1 or symbol.secondary_dimensions_npts > 1:
                symbol_str = "np.concatenate(({}))".format(",".join(all_child_vectors))
            else:
                symbol_str = "{}".format(",".join(children_vars))
        else:
            raise NotImplementedError

    # Note: we assume that y is being passed as a column vector
    elif isinstance(symbol, pybamm.StateVector):
        indices = np.argwhere(symbol.evaluation_array).reshape(-1).astype(np.int32)
        consecutive = np.all(indices[1:] - indices[:-1] == 1)
        if len(indices) == 1 or consecutive:
            symbol_str = "y[{}:{}]".format(indices[0], indices[-1] + 1)
        else:
            indices_array = pybamm.Array(indices)
            constant_symbols[indices_array.id] = indices
            index_name = id_to_python_variable(indices_array.id, True)
            symbol_str = "y[{}]".format(index_name)

    elif isinstance(symbol, pybamm.Time):
        symbol_str = "t"

    elif isinstance(symbol, pybamm.InputParameter):
        symbol_str = "inputs['{}']".format(symbol.name)

    else:
        raise NotImplementedError(
            "Not implemented for a symbol of type '{}'".format(type(symbol))
        )

    variable_symbols[symbol.id] = symbol_str
Example #51
0
 def _generate_rand_tile(self):
     empty = np.argwhere(self._game_board == 0)
     tile = random.sample(list(empty), 1)
     # print("random number generated at %s" % str(tile[0]))
     self._new_pos = (tile[0][0], tile[0][1])
     self._game_board[tile[0][0], tile[0][1]] = 2
        continue
    CMC = CMC + CMC_tmp
    ap += ap_tmp
    #print(i, CMC_tmp[0])

CMC = CMC.float()
CMC = CMC / len(query_label)  #average CMC
print('Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f' %
      (CMC[0], CMC[4], CMC[9], ap / len(query_label)))

# multiple-query
if multi:
    CMC = torch.IntTensor(len(gallery_label)).zero_()
    ap = 0.0
    for i in range(len(query_label)):
        mquery_index1 = np.argwhere(mquery_label == query_label[i])
        mquery_index2 = np.argwhere(mquery_cam == query_cam[i])
        mquery_index = np.intersect1d(mquery_index1, mquery_index2)
        mq = torch.mean(mquery_feature[mquery_index, :], dim=0)
        ap_tmp, CMC_tmp = evaluate(mq, query_label[i], query_cam[i],
                                   gallery_feature, gallery_label, gallery_cam)
        if CMC_tmp[0] == -1:
            continue
        CMC = CMC + CMC_tmp
        ap += ap_tmp
        #print(i, CMC_tmp[0])
    CMC = CMC.float()
    CMC = CMC / len(query_label)  #average CMC
    print('multi Rank@1:%f Rank@5:%f Rank@10:%f mAP:%f' %
          (CMC[0], CMC[4], CMC[9], ap / len(query_label)))
Example #53
0
    for i in tqdm(range(M)):
        X_train, X_test, y_train, y_test = train_test_split_by_class_and_ratio(
            X_data,
            y_data,
            test_size=0.2,
            random_state=i,
            pos_class=0,
            neg_class=1)

        spectral_clf = SpectralClustering(n_clusters=2,
                                          affinity="rbf",
                                          n_init=10,
                                          gamma=1)
        cluster_labels = spectral_clf.fit_predict(X_train)

        indexOfPosDist = np.argwhere(cluster_labels == 0).reshape(-1, )
        indexOfNegDist = np.argwhere(cluster_labels == 1).reshape(-1, )

        y_train_pos = y_train[indexOfPosDist]
        y_train_neg = y_train[indexOfNegDist]
        pos_label = Counter(y_train_pos).most_common(1)[0][0]
        neg_label = Counter(y_train_neg).most_common(1)[0][0]
        cluster_labels[indexOfPosDist] = pos_label
        cluster_labels[indexOfNegDist] = neg_label

        y_train_predict = cluster_labels
        y_train_true = y_train
        precision, recall, f_score, _ = score(y_train_true,
                                              y_train_predict,
                                              average='binary',
                                              pos_label=0)
pytesseract.pytesseract.tesseract_cmd = 'C:/Program Files (x86)/Tesseract-OCR4.0/tesseract.exe'
path = "C:/Users/Tayab/PycharmProjects/MediaProject/Kasten_2_Klasse_2-01.2_0212.png"

# Reading Image
img = cv2.imread(path)

#Image Resizing
OCR = image_resize(img, height=650)

#ImageCropping
gray = cv2.cvtColor(OCR, cv2.COLOR_BGR2GRAY) # convert to grayscale
# threshold to get just the signature
retval, thresh_gray = cv2.threshold(gray, thresh=100, maxval=255, type=cv2.THRESH_BINARY)
# find where the signature is and make a cropped region
points = np.argwhere(thresh_gray == 0) # find where the black pixels are
points = np.fliplr(points) # store them in x,y coordinates instead of row,col indices
x, y, w, h = cv2.boundingRect(points) # create a rectangle around those points
x, y, w, h = x+25, y+25, w-60, h-60 # make the box a little bigger
crop = gray[y:y+h, x:x+w] # create a cropped region of the gray image
cv2.imshow('imgCrop', crop)

#ImagePreProcessing

thresh = cv2.threshold(crop, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
# Remove horizontal lines
result = crop.copy()
kernel = np.ones((1,3), np.uint8)  # note this is a horizontal kernel
d_im = cv2.dilate(thresh, kernel, iterations=1)
e_im = cv2.erode(d_im, kernel, iterations=1)
close = cv2.morphologyEx(e_im, cv2.MORPH_CLOSE, kernel, iterations=1)
Example #55
0
    # clustering
    kmeans = KMeans(n_clusters=NUM_CLUSTER).fit(features)

    # select centers
    distances = kmeans.transform(features)  # num images * NUM_CLUSTER
    center_idx = np.argmin(distances, axis=0)
    centers = [features[i] for i in center_idx]

    # calculate similarity matrix
    similarities = sess.run(similarity, {
        center_t: centers,
        other_t: features
    })  # NUM_CLUSTER * num images

    # select reliable images
    reliable_image_idx = np.unique(np.argwhere(similarities > LAMBDA)[:, 1])
    print('ckpt %d: # reliable images %d' % (ckpt, len(reliable_image_idx)))
    sys.stdout.flush()
    images = np.array([unlabeled_images[i][0] for i in reliable_image_idx])
    labels = to_categorical([kmeans.labels_[i] for i in reliable_image_idx])

    # retrain: fine tune
    init_model = load_model('checkpoint_a_self/0.ckpt')
    x = init_model.get_layer('avg_pool').output
    x = Flatten(name='flatten')(x)
    x = Dropout(0.5)(x)
    x = Dense(NUM_CLUSTER,
              activation='softmax',
              name='fc8',
              kernel_initializer=RandomNormal(mean=0.0, stddev=0.001))(x)
    net = Model(input=init_model.input, output=x)
Example #56
0
'''
# hard code the path to working directory
os.chdir(
    r'C:\Users\Jim\Documents\Python SciPy\Antenna Chamber\HARM\App G data\GPS\S11\data'
)

freq = np.loadtxt('SN 009 GPS.CSV', delimiter=',', skiprows=5,
                  usecols=(1)) / 1e9
S11_dB = np.loadtxt('SN 009 GPS.CSV', delimiter=',', skiprows=5, usecols=(2))

os.chdir(
    r'C:\Users\Jim\Documents\Python SciPy\Antenna Chamber\HARM\App G data\GPS\S11'
)

limit = np.ones_like(freq, dtype=float)
limit[int(np.argwhere(freq == 1.57)):int(np.argwhere(freq == 1.58))] = -9.54
plt.plot(freq, S11_dB, label='S11')
plt.plot(freq, limit, 'r-', label='limit')
plt.ylabel('Return loss, dB')
plt.ylim(-20, 0)
plt.xlabel('Frequency, GHz')
plt.xlim(1.56, 1.59)
plt.title('Return loss, test plot')
plt.grid()
plt.legend()
plt.savefig('testplot_GPS_S11.png')
plt.show()

print('average refl coef = {:.1f} dB'.format(
    S11_dB[int(np.argwhere(freq == 1.57)):int(np.argwhere(
        freq == 1.58))].mean()))
Example #57
0
        quality = np.zeros_like(grouping)
    grouping[quality != 0] = -2
    grouping[0] = 1
    spec.close()
    ## -------------------------------------- ##

    ## -- Energy boundaries of channels -- ##
    ## -- and energy axis               -- ##
    with pyfits.open(rspfile) as fp:
        chan_e = fp['EBOUNDS'].data.field(1)
        e_axis = fp['MATRIX'].data.field(0)
    ## ----------------------------------- ##

    ## -- The starting channel & -- ##
    ## -- energy of every group  -- ##
    chan_grp = [i[0] for i in np.argwhere(grouping == 1)]
    chan_e_grp = chan_e[chan_grp]
    ## ---------------------------- ##

    ## -- corresponding energy of every group -- ##
    ## -- in the energy axis of the rmf       -- ##
    en_grp = np.abs(e_axis - chan_e_grp[:, np.newaxis]).argmin(1) + 1
    ## ----------------------------------------- ##

    ## -- produce the binning arrays -- ##
    ch = [[x, y - 1, y - x] for x, y in zip(chan_grp[:-1], chan_grp[1:])]
    en = [[x, y - 1, y - x] for x, y in zip(en_grp[:-1], en_grp[1:]) if x != y]
    nch = len(grouping)
    if ch[-1][1] != nch - 1:
        ch.append([ch[-1][1] + 1, nch - 1, nch - 1 - ch[-1][1]])
    nen = len(e_axis)