Example #1
0
def findLoudestRegion(segments,tempos):
    segmentMarkers = []
    for segs,tempo in zip(segments,tempos):
        LOUDNESS_CEILING = .8
        LOUDNESS_FLOOR = 1.2
        window = int((16.0/tempo)*60.0/(matlib.mean(matlib.array(segs.durations))))
        lpf = np.convolve(segs.loudness_max,np.ones(window)/window)[window/2:-(window/2)]
        lpf[0:window/2] = lpf[window/2]
        lpf[-(window/2):] = lpf[-(window/2)]
        mean = matlib.mean(matlib.array(lpf))
        marker1 = 0
        finalMarkers = (0,0)
        foundFirstMarker = 0
        while((sum([s.duration for s in segs[finalMarkers[0]:finalMarkers[1]]]) < 60.0)
              and LOUDNESS_FLOOR < 2.0):
            for i,l in enumerate(lpf):
                if(foundFirstMarker):
                    if l < mean*LOUDNESS_FLOOR or i == len(lpf)-1:
                        foundFirstMarker = 0
                        if((i-marker1) > (finalMarkers[1]-finalMarkers[0])):
                            finalMarkers = (marker1,i)                         
                elif l > mean*LOUDNESS_CEILING: 
                    foundFirstMarker = 1
                    marker1 = i
            # adjust thresholds to allow for longer region to be chosen, if necessary
            LOUDNESS_FLOOR = LOUDNESS_FLOOR + .05
            LOUDNESS_CEILING = LOUDNESS_CEILING + .05
        segmentMarkers.append(finalMarkers)
    return segmentMarkers         
Example #2
0
def generateSegmentGraphs(segments, filenames, segmentMarkers, tempos):
    # training set timeMarkers = [(26.516,131.312),(4.746,172.450),(41.044,201.012),(82.312,175.997),(15.370,46.003),(122.042,213.469),(30.887,122.294),(0.000,272.304),(37.785,195.357),(15.230,195.357),(37.539,172.498),(67.721,157.716),(37.282,125.899),(147.876,325.127),(14.775,192.008),(213.437,298.800),(29.553,86.022),(238.297,294.371),(21.150,193.356),(41.625,138.350)]
    # validation set timeMarkers = [(4.0,141.0),(25.0,177.0),(17.0,188.0),(16.0,129.0),(17.0,177.0),(15.0,136.0),(87.0,149.0),(98.0,173.0),(106.0,212.0),(0.0,104.0)]
    timeMarkers = [(37,125)]
    myMarkers = [(j.index(min(j.that(selection.start_during_range(i[0], i[0]+1.0)))),j.index(min(j.that(selection.start_during_range(i[1], i[1]+10.0))))) for j,i in zip(segments,timeMarkers)]    
    for i in range(len(segments)):
        pyplot.figure(i,(16,9))
        windowLen3 = int((16.0/tempos[i])*60.0/(matlib.mean(matlib.array(segments[i].durations))))
        lpf3 = signal.lfilter(np.ones(windowLen3)/windowLen3,1,segments[i].loudness_max) + signal.lfilter(np.ones(windowLen3)/windowLen3,1,segments[i].loudness_max[::-1])[::-1]
        lpf3 = np.convolve(segments[i].loudness_max,np.ones(windowLen3)/windowLen3)[windowLen3/2:-(windowLen3/2)]
        lpf3[0:windowLen3/2] = lpf3[windowLen3/2]
        lpf3[-(windowLen3/2):] = lpf3[-(windowLen3/2)]
        pyplot.plot(lpf3)
        pyplot.xlabel('Segment Number')
        pyplot.ylabel('Loudness (dB)')
        #pyplot.vlines(segmentMarkers[i][0], min(lpf3), max(segments[i].loudness_max), 'g')
        #pyplot.vlines(segmentMarkers[i][1], min(lpf3), max(segments[i].loudness_max), 'g')
        pyplot.vlines(myMarkers[i][0], min(lpf3), max(segments[i].loudness_max), 'r')
        pyplot.vlines(myMarkers[i][1], min(lpf3), max(segments[i].loudness_max), 'r')
        pyplot.legend(["Loudness", #"Autmatically selected start time: " + str(action.humanize_time(segments[i][segmentMarkers[i][0]].start)), 
                            #"Automatically selected end time: " + str(action.humanize_time(segments[i][segmentMarkers[i][1]].start)),
                            "Manually selected start time: " + str(action.humanize_time(timeMarkers[i][0])),
                            "Manually selected end time: " + str(action.humanize_time(timeMarkers[i][1]))])
        pyplot.title(filenames[i])    
    pyplot.show()
Example #3
0
def pickleAnalysisData(localAudioFiles):
    beatList = [laf.analysis.beats for laf in localAudioFiles]
    segmentList = [laf.analysis.segments for laf in localAudioFiles]
    beatPitchesList = []
    beatTimbreList = []
    beatLoudnessList = []
    for beats,segments in zip(beatList,segmentList):
        beatPitches = []
        for beat in beats:
            segs = segments.that(selection.overlap(beat))
            beatPitches.append(meanPitches(segs))
        beatPitchesList.append(beatPitches)
    for beats,segments in zip(beatList,segmentList):
        beatTimbre = []
        for beat in beats:
            segs = segments.that(selection.overlap(beat))
            beatTimbre.append(meanTimbre(segs))
        beatTimbreList.append(beatTimbre)
    for beats,segments in zip(beatList,segmentList):
        beatLoudness = []
        for beat in beats:
            segs = segments.that(selection.overlap(beat))
            beatLoudness.append(matlib.mean(segs.loudness_max))
        beatLoudnessList.append(beatLoudness)
    cPickle.dump(beatPitchesList, open('AmuInstPitches.pkl', 'w'))
    cPickle.dump(beatTimbreList, open('AmuInstTimbre.pkl', 'w'))
    cPickle.dump(beatLoudnessList, open('AmuInstLoudness.pkl', 'w'))
    sectionFirstBeats = []
    sections = [laf.analysis.sections for laf in localAudioFiles]
    for sectionsList,bList in zip(sections,beatList): 
        temp = [bList.index(bList.that(selection.overlap(section))[0]) for section in sectionsList]
        sectionFirstBeats.append(temp)
    cPickle.dump(sectionFirstBeats, open('AmuInstSections.pkl', 'w'))
Example #4
0
def PCA(data_mat, p):
    'reduce the data dimensionality to p, this function will substract the \
    mean from the original data'
    d, N=data_mat.shape
    m=matlib.mean(data_mat, 1)
    data_mat-=m
    if d<N:
        AAT=data_mat*data_mat.T
        w, v=linalg.eigh(AAT)
        return v[:,-p:], m
    else:
        ATA=data_mat.T*data_mat
        w, v=linalg.eigh(ATA)
        return data_mat*v[:, -p:], m
Example #5
0
    def calc_gradients(self):
        '''
        Calculate all gradients, and store them in the Graph's 'gradients'
        attribute as a dictionary from node to gradient. Average gradients over
        the entire batch.
        '''
        # Calculate all gradients
        self.calc_values()
        self.gradients = {self.root: np.ones_like(self.root.value)}
        # Loop in topological order (from root -> leaves)
        for node in self.ordering:
            child_gradients = node.child_gradients(self.gradients[node])
            for child, gradient in zip(node.children, child_gradients):
                if child not in self.gradients:
                    self.gradients[child] = np.zeros_like(gradient)

                self.gradients[child] += gradient
        # Average gradients over the entire batch
        for node, gradient in self.gradients.items():
            self.gradients[node] = np.mean(gradient, axis=0)
Example #6
0
output_mat = np.delete(output_mat, missing_index,
                       axis=0)  # axis=0(select rows), delete missing values
print("\nOutput matrix after deleting missing values", output_mat.shape)
"""
#_________Label Data normalization______# not useful______________________________________________________

maxx2 = np.max(output_mat[:,0])
output_mat[:,0] = output_mat[:,0]/maxx2
print("\n Labels Data normalization done")
#_________________________________________________________________________________________________________
"""

#_________Important Features Extraction___________________________________________________________________

ip_mean = npmat.mean(input_mat, 0)  # column means found out
ip_mean_mat = npmat.repmat(ip_mean, new_row,
                           1)  # use repmat for creating matrix
ip_mean_sub = input_mat - ip_mean_mat  # subtract mean from columns

ip_mat_cvar = (npmat.transpose(ip_mean_sub) *
               ip_mean_sub) / my_row  # covariance matrix
dg = np.diagonal(ip_mat_cvar)  # variance
dg = np.sqrt(dg)  # std dev
dg = npmat.matrix(dg)  # matrix form conversion
scaled_cov = np.transpose(
    dg) * dg  # scaling the covariance to get Coorelation matrix
ip_mat_corel = np.divide(ip_mat_cvar, scaled_cov)  # corelation input matrix

#plt.matshow(ip_mat_corel)
#plt.show()
Example #7
0
            logLseries.append(logL1)
            if lograndom < logalpha:
                gmax = candidategmax
                logPrior0 = nm.log(Prior1)
                logL0 = logL1  # if accepted move to this point
                posteriorChain[c, :] = gmax  # add step to the chain
                if (logPrior0 + logL0) > logMAP:
                    logMAP = (logPrior0 + logL0)
                    psetMAP = gmax  # update most likely parameter set

        priorChain[c, :] = candidategmax
        core.export_pools('keylinkBayesianPrior', priorChain)
        core.export_pools('keylinkBayesianPosterior', posteriorChain)
        core.export_pools('keylinkBayesianOutputLogli', logLseries)

    mp = ml.mean(posteriorChain)
    print('mp', mp)
    pCovMatrix = ml.cov(posteriorChain)
    print('pCovMatrix', pCovMatrix)
    #sp                = nm.sqrt(nm.transpose(ml.diag(pCovMatrix))) ; print('sp',sp)
    pCorrMatrix = scipy.corrcoef(posteriorChain)
    print('pCorrMatrix', pCorrMatrix)
    print('psetMAP', psetMAP)

    #call best fit again and show graphs
    Cpools, PWt, PVt = core.KeylinkModel(psetMAP)
    core.export_pools('keylinkoutput', Cpools)
    core.show_plot(Cpools, PWt, PVt)

if runmode == 'posterior':  #the model is run for all the given combinations of gmax values,
    #read line by line, then average and STDEV are calculated
Example #8
0
def calculate(crs, normalized_img):
    """
    :param crs: cosmic rays list
    :param normalized_img: 2D image normalized by exposition time
    :return: a map with statistics about crs
    """

    # Calculate basic statistics on connected objects

    pixel_count = [cr.area for cr in crs]
    len_mean = mean(pixel_count)
    len_std = std(pixel_count)
    len_skew = skew(asarray(pixel_count))
    len_percentiles = percentile(pixel_count, [10, 25, 50, 75, 90])

    len_10_percentile = len_percentiles[0]
    len_25_percentile = len_percentiles[1]
    len_50_percentile = len_percentiles[2]
    len_75_percentile = len_percentiles[3]
    len_90_percentile = len_percentiles[4]

    # Calculate flux based on cr intensity
    flux_total = normalized_img.sum()
    flux_mean = flux_total / len(crs)

    # For each CR get its flux by summing up the pixels
    flux_crs = []

    for cr in crs:
        flux = 0
        for coord in cr.coords:
            flux += normalized_img[coord[0]][coord[1]]
            flux_crs.append(flux)

    flux_std = std(flux_crs)
    flux_skew = skew(asarray(flux_crs))
    flux_percentiles = percentile(flux_crs, [10, 25, 50, 75, 90])

    flux_10_percentile = flux_percentiles[0]
    flux_25_percentile = flux_percentiles[1]
    flux_50_percentile = flux_percentiles[2]
    flux_75_percentile = flux_percentiles[3]
    flux_90_percentile = flux_percentiles[4]

    return dict(
        len_mean=len_mean,
        len_std=len_std,
        len_skew=len_skew,
        len_10_percentile=len_10_percentile,
        len_25_percentile=len_25_percentile,
        len_50_percentile=len_50_percentile,
        len_75_percentile=len_75_percentile,
        len_90_percentile=len_90_percentile,
        flux_total=flux_total,
        flux_mean=flux_mean,
        flux_std=flux_std,
        flux_skew=flux_skew,
        flux_10_percentile=flux_10_percentile,
        flux_25_percentile=flux_25_percentile,
        flux_50_percentile=flux_50_percentile,
        flux_75_percentile=flux_75_percentile,
        flux_90_percentile=flux_90_percentile
    )