Beispiel #1
0
 def stack_vectors(data, win=1, hop=1, zero_pad=True):
     """
     ::
        create an overlapping stacked vector sequence from a series of vectors
         data - row-wise multidimensional data to stack
         win  - number of consecutive vectors to stack [1]
         hop  - number of vectors to advance per stack [1]
         zero_pad - zero pad if incomplete stack at end 
     """
     data = pylab.atleast_2d(data)
     nrows, dim = data.shape
     hop = min(hop, nrows)
     nvecs = nrows / int(hop) if not zero_pad else int(
         pylab.ceil(nrows / float(hop)))
     features = pylab.zeros((nvecs, win * dim))
     i = 0
     while i < nrows - win + 1:
         features[i / hop, :] = data[i:i + win, :].reshape(1, -1)
         i += hop
     if i / hop < nvecs:
         x = data[i::, :].reshape(1, -1)
         features[i / hop, :] = pylab.c_[x,
                                         pylab.zeros(
                                             (1, win * dim - x.shape[1]))]
     return features
def get_scaling(tx_start, tx_end, strand, exon_starts, exon_ends, intron_scale,
                exon_scale, reverse_minus):
    """
    Compute the scaling factor across various genetic regions.
    """
    exon_coords = pylab.zeros((tx_end - tx_start + 1))
    for i in range(len(exon_starts)):
        exon_coords[exon_starts[i] - tx_start:exon_ends[i] - tx_start] = 1

    graph_to_gene = {}
    graph_coords = pylab.zeros((tx_end - tx_start + 1), dtype='f')

    x = 0
    if strand == '+' or not reverse_minus:
        for i in range(tx_end - tx_start + 1):
            graph_coords[i] = x
            graph_to_gene[int(x)] = i + tx_start
            if exon_coords[i] == 1:
                x += 1. / exon_scale
            else:
                x += 1. / intron_scale
    else:
        for i in range(tx_end - tx_start + 1):
            graph_coords[-(i + 1)] = x
            graph_to_gene[int(x)] = tx_end - i + 1
            if exon_coords[-(i + 1)] == 1:
                x += 1. / exon_scale
            else:
                x += 1. / intron_scale

    return graph_coords, graph_to_gene
Beispiel #3
0
 def _stft(self):
     if not self._have_x:
         print(
             "Error: You need to load a sound file first: use self.load_audio('filename.wav')"
         )
         return False
     fp = self._check_feature_params()
     num_frames = len(self.x)
     self.STFT = P.zeros((self.nfft / 2 + 1, num_frames), dtype='complex')
     self.win = P.ones(self.wfft) if self.window == 'rect' else P.np.sqrt(
         P.hanning(self.wfft))
     x = P.zeros(self.wfft)
     buf_frames = 0
     for k, nex in enumerate(self.x):
         x = self._shift_insert(x, nex, self.nhop)
         if self.nhop >= self.wfft - k * self.nhop:  # align buffer on start of audio
             self.STFT[:, k - buf_frames] = P.rfft(self.win * x,
                                                   self.nfft).T
         else:
             buf_frames += 1
     self.STFT = self.STFT / self.nfft
     self._fftfrqs = P.arange(
         0, self.nfft / 2 + 1) * self.sample_rate / float(self.nfft)
     self._have_stft = True
     if self.verbosity:
         print("Extracted STFT: nfft=%d, hop=%d" % (self.nfft, self.nhop))
     self.inverse = self._istftm
     self.X = abs(self.STFT)
     if not self.magnitude:
         self.X = self.X**2
     return True
Beispiel #4
0
 def _overlap_add(self, X, usewin=True, resamp=None):
     nfft = self.nfft
     nhop = self.nhop
     if resamp is None:
         x = P.zeros((X.shape[0] - 1) * nhop + nfft)
         for k in range(X.shape[0]):
             x[k * nhop:k * nhop + nfft] += X[k] * self.win
     else:
         rfft = int(P.np.round(nfft * resamp))
         x = P.zeros((X.shape[0] - 1) * nhop + rfft)
         for k in range(X.shape[0]):
             x[k * nhop:k * nhop +
               rfft] += sig.resample(X[k], rfft) * self.win
     return x
Beispiel #5
0
    def _chroma(self):
        """
        ::

            Chromagram, like 12-BPO CQFT modulo one octave. Energy is folded onto first octave.
        """
        fp = self._check_feature_params()
        lo = self.lo
        self.lo = 63.5444  # set to quarter tone below C
        if not self._cqft():
            return False
        self.lo = lo  # restore original lo edge
        a, b = self.CQFT.shape
        complete_octaves = a / self.nbpo  # integer division, number of complete octaves
        #complete_octave_bands = complete_octaves * self.nbpo
        # column-major ordering, like a spectrogram, is in FORTRAN order
        self.CHROMA = P.zeros((self.nbpo, b))
        for k in P.arange(complete_octaves):
            self.CHROMA += self.CQFT[k * self.nbpo:(k + 1) * self.nbpo, :]
        self.CHROMA = (self.CHROMA / complete_octaves)
        self._have_chroma = True
        if self.verbosity:
            print("Extracted CHROMA: intensified=%d" % self.intensify)
        self.inverse = self.ichroma
        self.X = self.CHROMA
        return True
Beispiel #6
0
    def _chroma_hcqft(self):
        """
        ::

            Chromagram formed by high-pass liftering in cepstral domain, then usual self.nbpo-BPO folding.
        """
        fp = self._check_feature_params()
        if not self._hcqft():
            return False
        a, b = self.HCQFT.shape
        complete_octaves = a / self.nbpo  # integer division, number of complete octaves
        #complete_octave_bands = complete_octaves * self.nbpo
        # column-major ordering, like a spectrogram, is in FORTRAN order
        self.CHROMA = P.zeros((self.nbpo, b))
        for k in P.arange(complete_octaves):
            self.CHROMA += self.HCQFT[k * self.nbpo:(k + 1) * self.nbpo, :]
        self.CHROMA /= complete_octaves
        self._have_chroma = True
        if self.verbosity:
            print(
                "Extracted HCQFT CHROMA: lcoef=%d, ncoef=%d, intensified=%d" %
                (self.lcoef, self.ncoef, self.intensify))
        self.inverse = self.ichroma
        self.X = self.CHROMA
        return True
Beispiel #7
0
def plotConn():
    # Create plot
    figh = figure(figsize=(8,6))
    figh.subplots_adjust(left=0.02) # Less space on left
    figh.subplots_adjust(right=0.98) # Less space on right
    figh.subplots_adjust(top=0.96) # Less space on bottom
    figh.subplots_adjust(bottom=0.02) # Less space on bottom
    figh.subplots_adjust(wspace=0) # More space between
    figh.subplots_adjust(hspace=0) # More space between
    h = axes()
    totalconns = zeros(shape(f.connprobs))
    for c1 in range(size(f.connprobs,0)):
        for c2 in range(size(f.connprobs,1)):
            for w in range(f.nreceptors):
                totalconns[c1,c2] += f.connprobs[c1,c2]*f.connweights[c1,c2,w]*(-1 if w>=2 else 1)
    imshow(totalconns,interpolation='nearest',cmap=bicolormap(gap=0))

    # Plot grid lines
    hold(True)
    for pop in range(f.npops):
        plot(array([0,f.npops])-0.5,array([pop,pop])-0.5,'-',c=(0.7,0.7,0.7))
        plot(array([pop,pop])-0.5,array([0,f.npops])-0.5,'-',c=(0.7,0.7,0.7))

    # Make pretty
    h.set_xticks(range(f.npops))
    h.set_yticks(range(f.npops))
    h.set_xticklabels(f.popnames)
    h.set_yticklabels(f.popnames)
    h.xaxis.set_ticks_position('top')
    xlim(-0.5,f.npops-0.5)
    ylim(f.npops-0.5,-0.5)
    clim(-abs(totalconns).max(),abs(totalconns).max())
    colorbar()
Beispiel #8
0
def plotWeightChanges():
    if f.usestdp:
        # create plot
        figh = figure(figsize=(1.2*8,1.2*6))
        figh.subplots_adjust(left=0.02) # Less space on left
        figh.subplots_adjust(right=0.98) # Less space on right
        figh.subplots_adjust(top=0.96) # Less space on bottom
        figh.subplots_adjust(bottom=0.02) # Less space on bottom
        figh.subplots_adjust(wspace=0) # More space between
        figh.subplots_adjust(hspace=0) # More space between
        h = axes()

        # create data matrix
        wcs = [x[-1][-1] for x in f.allweightchanges] # absolute final weight
        wcs = [x[-1][-1]-x[0][-1] for x in f.allweightchanges] # absolute weight change
        pre,post,recep = zip(*[(x[0],x[1],x[2]) for x in f.allstdpconndata])
        ncells = int(max(max(pre),max(post))+1)
        wcmat = zeros([ncells, ncells])

        for iwc,ipre,ipost,irecep in zip(wcs,pre,post,recep):
            wcmat[int(ipre),int(ipost)] = iwc *(-1 if irecep>=2 else 1)

        # plot
        imshow(wcmat,interpolation='nearest',cmap=bicolormap(gap=0,mingreen=0.2,redbluemix=0.1,epsilon=0.01))
        xlabel('post-synaptic cell id')
        ylabel('pre-synaptic cell id')
        h.set_xticks(f.popGidStart)
        h.set_yticks(f.popGidStart)
        h.set_xticklabels(f.popnames)
        h.set_yticklabels(f.popnames)
        h.xaxif.set_ticks_position('top')
        xlim(-0.5,ncells-0.5)
        ylim(ncells-0.5,-0.5)
        clim(-abs(wcmat).max(),abs(wcmat).max())
        colorbar()
Beispiel #9
0
def coordinatesTodepth(crdArr, fov_variant, fsz):
    hFOV_prime = degreeToRadian(
        56.559 + fov_variant)  # TODO: Confirm that the number is for hFOV
    vFOV_prime = math.atan(height / width * math.tan(hFOV_prime / 2)) * 2

    tan_hFOV_half_prime = math.tan(hFOV_prime / 2)
    tan_vFOV_half_prime = math.tan(vFOV_prime / 2)

    ret = plt.zeros((height, width))

    for point in crdArr:
        assert point[2] > 0

        cx = point[0]
        cy = point[1]
        cd = point[2]

        k_x = tan_hFOV_half_prime * cd / (width / 2)
        cc = (width / 2 * k_x - cx) / k_x
        k_y = tan_vFOV_half_prime * cd / (height / 2)
        cr = (height / 2 * k_y - cy) / k_y

        ccs = [math.floor(cc), math.floor(cc), math.ceil(cc), math.ceil(cc)]
        crs = [math.floor(cr), math.floor(cr), math.ceil(cr), math.ceil(cr)]

        for k in range(len(ccs)):
            tc = ccs[k]
            tr = crs[k]

            if tr < 0 or tc < 0 or tr >= height or tc >= width:
                continue

            # z값이 음수인 경우에 대해서는 어떻게 처리할지 미정
            if ret[tr][tc] == 0:
                ret[tr][tc] = cd
            else:
                ret[tr][tc] = min(ret[tr][tc], cd)

    for i in range(height):
        for j in range(width):
            if ret[i][j] == 0:
                ltR = max(i - fsz // 2, 0)  # left top row of filter
                ltC = max(j - fsz // 2, 0)  # left top column of filter
                rbR = min(i + fsz // 2, height - 1)
                rbC = min(j + fsz // 2, width - 1)

                # pixelCnt = (rbR-ltR+1)*(rbC-ltC+1)

                cands = []
                for tr in range(ltR, rbR + 1):
                    for tc in range(ltC, rbC + 1):
                        if (tr == i and tc == j):
                            continue
                        cands.append(ret[tr][tc])

                ret[i][j] = cands[len(cands) // 2]  # median

    return ret
Beispiel #10
0
def coordinates_to_depth(crdArr, vt, fsz, theta):
    ret = plt.zeros((height, width))

    print(crdArr)

    t = time.time()
    for index in range(len(crdArr)):
        cx = crdArr[index][0]
        cy = crdArr[index][1]
        cz = crdArr[index][2]

        if(cz==0):
            continue

        hFOV_prime = degreeToRadian(56.559 + theta)
        vFOV_prime = math.atan(height / width * math.tan(hFOV_prime / 2)) * 2

        tan_hFOV_half_prime = math.tan(hFOV_prime / 2)
        tan_vFOV_half_prime = math.tan(vFOV_prime / 2)

        k_x = tan_hFOV_half_prime*cz / (width//2)
        k_y = tan_vFOV_half_prime*cz / (height//2)

        cc = ((width//2)*k_x - cx)/k_x
        cr = ((height//2)*k_y - cy)/k_y
        cd = math.sqrt(cx*cx+cy*cy+cz*cz)

        ccs = [math.floor(cc), math.floor(cc), math.ceil(cc), math.ceil(cc)]
        crs = [math.floor(cr), math.ceil(cr), math.floor(cr), math.ceil(cr)]

        for k in range(len(ccs)):
            tc = ccs[k]
            tr = crs[k]

            if tr < 0 or tc < 0 or tr >= height or tc >= width:
                continue

            # z값이 음수인 경우에 대해서는 어떻게 처리할지 미정
            if (ret[tr][tc] == 0):
                ret[tr][tc] = cd
            elif (ret[tr][tc]>0):
                ret[tr][tc] = min(ret[tr][tc], cd)

    from skimage.transform import resize
    ret = resize(ret, (224, 224))

    t0 = time.time()
    print(t0 - t)

    from scipy import ndimage
    ret = ndimage.median_filter(ret, fsz)

    from sklearn.preprocessing import minmax_scale
    ret = minmax_scale(ret.ravel(), feature_range=(0, 1)).reshape(ret.shape)

    return ret
def overlap_add(x, y, wlen):
    """
    ::

        Overlap-add two sequences x and y by wlen samples
    """
    z = pylab.zeros(x.size + y.size - wlen)
    z[0:x.size] = x
    z[x.size - wlen:x.size + y.size - wlen] += y
    return z
Beispiel #12
0
    def _get_probs_tc(self, keys):
        """
        ::

            Retrieve probability values for a set of timbre-channel keys
        """
        pk = pylab.zeros(self.timbre_channels)
        for i, key in enumerate(keys):
            pk[i] = self.adb.retrieve_datum(key, powers=True)[0]
        return pk
Beispiel #13
0
def Cromer_1d(init_val, accel_func, time_vec): 
    #accel_func takes arguments pos, vel, time
    #init_val is (pos0, vel0, pos1, vel1) at initial state
    n = len(time_vec);
    t = time_vec
    
    r0 = plt.zeros(n); r0[0] = init_val[0];
    v0 = plt.zeros(n); v0[0] = init_val[1];
    r1 = plt.zeros(n); r1[0] = init_val[2];
    v1 = plt.zeros(n); v1[0] = init_val[3];
    
    for i in range(n-1):
        dt = t[i+1] - t[i]
        a0, a1 = accel_func(r0[i],v0[i],r1[i],v1[i],dt)
        v0[i+1] = v0[i] + a0*dt
        v1[i+1] = v1[i] + a1*dt
        r0[i+1] = r0[i] + v0[i+1]*dt
        r1[i+1] = r1[i] + v1[i+1]*dt

    return r0,v0,r1,v1
Beispiel #14
0
def l1o_model_validation(data,
                         labels,
                         classifier=neighbors.KNeighborsClassifier(1),
                         normalizer=preprocessing.StandardScaler()):
    l1o = cross_validation.LeaveOneOut(data.shape[0])
    predictions = pylab.zeros(labels.shape, dtype=labels.dtype)
    for train_idx, test_idx in l1o:
        classifier.fit(normalizer.fit_transform(data[train_idx]),
                       labels[train_idx])
        predictions[test_idx] = classifier.predict(
            normalizer.transform(data[test_idx]))
    return sum(predictions == labels) / float(data.shape[0])
Beispiel #15
0
 def find_gt_ranks(self, out_ranks, ground_truth_keys=None):
     """
     ::
     
         Return ranks matrix for ground-truth columns only
     """
     r = out_ranks.argsort()
     lzt_keys, lzt_len = self.get_adb_lists()
     gt_idx = [lzt_keys.index(s) for s in ground_truth_keys]
     ranks = pylab.zeros((len(gt_idx), len(gt_idx)))
     for i in pylab.arange(len(gt_idx)):
         for j in pylab.arange(len(gt_idx)):
             ranks[i][j] = pylab.nonzero(r[i] == gt_idx[j])[0][0]
     return ranks
def devils_staircase(num_octaves=7,
                     num_steps=12,
                     step_size=1,
                     hop=4096,
                     overlap=True,
                     center_freq=440,
                     band_width=150,
                     **params):
    """
    ::

        Generate an auditory illusion of an infinitely ascending/descending sequence of shepard tones
            num_octaves - number of sinusoidal octave bands to generate [7]
            num_steps - how many steps to take in the staircase
            step_size - semitone change per step, can be fractional [1.]
            hop - how many points to generate per step [12]
            overlap - whether the end-points should be cross-faded for overlap-add
            center_freq - where the peak of the spectrum will be [440]
            band_width - how wide a spectral band to use for shepard tones [150]
            **params - signal_params dict, see default_signal_params()

    """
    params = _check_signal_params(**params)
    sr = params['sr']
    f0 = params['f0']
    norm_freq = 2 * pylab.pi / sr
    wlen = min([hop / 2, 2048])
    x = pylab.zeros(num_steps * hop + wlen)
    h = scipy.signal.hanning(wlen * 2)
    # overlap add
    params['num_points'] = hop + wlen
    phase_offset = 0
    for i in pylab.arange(num_steps):
        freq = f0 * 2**(((i * step_size) % 12) / 12.0)
        params['f0'] = freq
        s = shepard(num_octaves=num_octaves,
                    center_freq=center_freq,
                    band_width=band_width,
                    **params)
        s[0:wlen] = s[0:wlen] * h[0:wlen]
        s[hop:hop + wlen] = s[hop:hop + wlen] * h[wlen:wlen * 2]
        x[i * hop:(i + 1) * hop + wlen] = x[i * hop:(i + 1) * hop + wlen] + s
        phase_offset = phase_offset + hop * freq * norm_freq
    if not overlap:
        x = pylab.resize(x, num_steps * hop)
    x = balance_signal(x, 'maxabs')
    return x
Beispiel #17
0
def variable_phase_vocoder(D, times_steps, hop_length=None):
    n_fft = 2 * (D.shape[0] - 1)

    if hop_length is None:
        hop_length = int(n_fft // 4)

    # time_steps = P.arange(0, D.shape[1], rate, dtype=P.double)
    # time_steps = P.concatenate([
    #   P.arange(0, D.shape[1]/2, .5, dtype=P.double),
    #   P.arange(D.shape[1]/2, D.shape[1], 2, dtype=P.double)
    #   ])

    # Create an empty output array
    d_stretch = P.zeros((D.shape[0], len(time_steps)), D.dtype, order='F')

    # Expected phase advance in each bin
    phi_advance = P.linspace(0, P.pi * hop_length, D.shape[0])

    # Phase accumulator; initialize to the first sample
    phase_acc = P.angle(D[:, 0])

    # Pad 0 columns to simplify boundary logic
    D = P.pad(D, [(0, 0), (0, 2)], mode='constant')

    for (t, step) in enumerate(time_steps):
        columns = D[:, int(step):int(step + 2)]

        # Weighting for linear magnitude interpolation
        alpha = P.mod(step, 1.0)
        mag = ((1.0 - alpha) * abs(columns[:, 0]) + alpha * abs(columns[:, 1]))

        # Store to output array
        d_stretch[:, t] = mag * P.exp(1.j * phase_acc)

        # Compute phase advance
        dphase = (P.angle(columns[:, 1]) - P.angle(columns[:, 0]) -
                  phi_advance)

        # Wrap to -pi:pi range
        dphase = dphase - 2.0 * P.pi * P.around(dphase / (2.0 * P.pi))

        # Accumulate phase
        phase_acc += phi_advance + dphase

    return d_stretch
Beispiel #18
0
def close_points(X, s=1):
    lambda_s = 1
    lambda_c = s
    X = P.array(X)
    K, N = X.shape
    M = P.zeros((N, N))
    M[range(N), range(N)] = 2 * lambda_c / (N - 1) + lambda_s / N
    # M[0,0] -= lambda_c/(N-1)
    # M[-1,-1] -= lambda_c/(N-1)
    d = P.diag(P.ones(N - 1), 1)
    M = M - lambda_c * (d + d.T) / (N - 1)
    M[0, 0] = lambda_s / N
    M[-1, -1] = lambda_s / N
    M[0, 1] = 0
    M[-1, -2] = 0
    Mi = P.pinv(M)
    smooth_X = (lambda_s / N) * Mi.dot(X.T).T
    return smooth_X
Beispiel #19
0
def coordinates_to_depth(crdArr, vt, fsz):
    ret = plt.zeros((height, width))

    pts = [(0, 0, 0) for _ in range(4 * len(crdArr))]

    import time
    t = time.time()
    crdArr = screen_base - crdArr

    for idx in range(len(crdArr)):
        cc = crdArr[idx][0]
        cr = crdArr[idx][1]
        cd = crdArr[idx][2]

        ccs = [math.floor(cc), math.floor(cc), math.ceil(cc), math.ceil(cc)]
        crs = [math.floor(cr), math.floor(cr), math.ceil(cr), math.ceil(cr)]

        for k in range(len(ccs)):
            tc = ccs[k]
            tr = crs[k]

            if tr < 0 or tc < 0 or tr >= height or tc >= width:
                continue

            pts[4 * idx + k] = (tc, tr, cd)

    t0 = time.time()
    print(t0 - t)

    for point in pts:
        cc, cr, cd = point

        if ret[cr][cc] > 0:
            ret[cr][cc] = max(ret[cr][cc], cd)
        elif ret[cr][cc] == 0:
            ret[cr][cc] = cd

    print(time.time() - t0)

    from scipy import ndimage
    ret = ndimage.median_filter(ret, fsz)

    return ret
def modulate(sig, env, nsamps):
    """
    ::

        Signal modulation by an envelope
        sig - the full-rate signal
        env - the reduced-rate envelope
        nsamps - audio samples per envelope frame
    """
    if (sig.size != len(env) * nsamps):
        print("Source signal size must equal len(env) * nsamps")
        return False
    y = pylab.zeros(sig.size)
    start = 0
    for a in env:
        end = start + nsamps
        y[start:end] = a * sig[start:end]
        start = end
    return y
def harmonics(afun=lambda x: pylab.exp(-0.5 * x),
              pfun=lambda x: pylab.rand() * 2 * pylab.pi,
              **params):
    """
    ::

        Generate a harmonic series using a harmonic weighting function
         afun   - lambda function of one parameter (harmonic index) returning a weight
         pfun   - lambda function of one parameter (harmonic index) returning radian phase offset
         **params - signal_params dict, see default_signal_params()
    """
    params = _check_signal_params(**params)
    f0 = params['f0']
    x = pylab.zeros(params['num_points'])
    for i in pylab.arange(1, params['num_harmonics'] + 1):
        params['f0'] = i * f0
        params['phase_offset'] = pfun(i)
        x += afun(i) * sinusoid(**params)
    x = balance_signal(x, 'maxabs')
    return x
def shepard(num_octaves=7, center_freq=440, band_width=150, **params):
    """
    ::

        Generate shepard tones
             num_octaves - number of sinusoidal octave bands to generate [7]
             center_freq - where the peak of the spectrum will be [440]
             band_width - how wide a spectral band to use for shepard tones [150]
             **params - signal_params dict, see default_signal_params()
    """
    params = _check_signal_params(**params)
    f0 = params['f0']
    x = pylab.zeros(params['num_points'])
    shepard_weight = gauss_pdf(20000, center_freq, band_width)
    for i in pylab.arange(num_octaves):
        a = shepard_weight[int(round(f0 * 2**i))]
        params['f0'] = f0 * 2**i
        x += a * harmonics(**params)
    x = balance_signal(x, 'maxabs')
    return x
Beispiel #23
0
    def rank_by_distance_bhatt(self, qkeys, ikeys, rkeys, dists):
        """
        ::

            Reduce timbre-channel distances to ranks list by ground-truth key indices
            Bhattacharyya distance on timbre-channel probabilities and Kullback distances
        """
        # timbre-channel search using pre-computed distances
        ranks_list = []
        t_keys, t_lens = self.get_adb_lists(0)
        rdists = pylab.ones(len(t_keys)) * float('inf')
        qk = self._get_probs_tc(qkeys)
        for i in range(len(ikeys[0])):  # number of include keys
            ikey = []
            dk = pylab.zeros(self.timbre_channels)
            for t_chan in range(self.timbre_channels):  # timbre channels
                ikey.append(ikeys[t_chan][i])
                try:
                    # find dist of key i for query
                    i_idx = rkeys[t_chan].index(
                        ikey[t_chan])  # dataset include-key match
                    # the reduced distance function in include_keys order
                    # distance is Bhattacharyya distance on probs and dists
                    dk[t_chan] = dists[t_chan][i_idx]
                except:
                    print("Key not found in result list: ", ikey, "for query:",
                          qkeys[t_chan])
                    raise error.BregmanError()
            rk = self._get_probs_tc(ikey)
            a_idx = t_keys.index(ikey[0])  # audiodb include-key index
            rdists[a_idx] = distance.bhatt(
                pylab.sqrt(pylab.absolute(dk)),
                pylab.sqrt(pylab.absolute(qk * rk)))
        #search for the index of the relevant keys
        rdists = pylab.absolute(rdists)
        sort_idx = pylab.argsort(rdists)  # Sort fields into database order
        for r in self.ground_truth:  # relevant keys
            ranks_list.append(pylab.where(
                sort_idx == r)[0][0])  # Rank of the relevant key
        return ranks_list, rdists
Beispiel #24
0
def slice3(v, x=.5, y=.5, z=.5, **kwargs):
    '''A pylab.imshow()-like function to show three slices of volumetric data.

    Accepted kwargs:
    - 'minmax': tuple of (minvalue, maxvalue) to clip data.  Use None
        for one argument to do one-sided clipping, e.g., (0, None) to 
        clip to nonnegative values.
    - 'interpolation': use some sort of interpolation, by default
        nearest-value (i.e., no mixing).

    '''

    ix = x
    iy = y
    iz = z
    if isinstance(ix, float): ix = int(v.shape[0]*ix)
    if isinstance(iy, float): iy = int(v.shape[1]*iy)
    if isinstance(iz, float): iz = int(v.shape[2]*iz)

    R = v.shape[0]
    C = v.shape[1]
    D = v.shape[2]

    tr = pylab.zeros((R+D, C+D))
    tr[0:R, 0:C] = v[:, :, iz]
    tr[R:, 0:C] = v[ix, :, :].T
    tr[0:R, C:] = v[:, iy, :]

    if 'minmax' in kwargs:
        minmax = kwargs['minmax']
        if minmax[0] is not None: tr[tr < minmax[0]] = minmax[0]
        if minmax[0] is not None: tr[tr > minmax[1]] = minmax[1]

    if 'interpolation' in kwargs:
        interp = kwargs['interpolation']
    else:
        interp = 'nearest'

    pylab.imshow(tr, interpolation=interp)
Beispiel #25
0
    def _cqft_intensified(self):
        """
        ::

            Constant-Q Fourier transform using only max abs(STFT) value in each band
        """
        if not self._have_stft:
            if not self._stft():
                return False
        self._make_log_freq_map()
        r, b = self.Q.shape
        b, c = self.STFT.shape
        self.CQFT = P.zeros((r, c))
        for i in P.arange(r):
            for j in P.arange(c):
                self.CQFT[i, j] = (self.Q[i, :] *
                                   P.absolute(self.STFT[:, j])).max()
        self._have_cqft = True
        self._is_intensified = True
        self.inverse = self.icqft
        self.X = self.CQFT
        return True
Beispiel #26
0
rows = 2  #Number of rows in subplot
columns = 1  #Number of columns in subplot

filename = "curve_plot.PNG"  #The name of the file we will create

#We can create a list to determine the scale of the graph
#in the format [x-min,x-max,y-min,y-max]

axis = [0, 10, 0, 150]

x = mppl.linspace(start, stop, points)  #We apply the previous
#variables to create an
#array of desired parameters

y = mppl.zeros(len(x))  #We create an empty array of the same
#dimensions as x

for i in range(len(x)):  #We fill up array y with the values
    y[i] = f(x[i])  #returned by f(x)

mppl.plot(x, y, "r-")  #Preparing the plot, the third argument
#is divided into a letter and either a
#'-' or a 'o' which represent a line
#or a dot plot, respectively. r is for
#red, b is for blue

#Using the 'mppl.hold(on)' command will allow you to create two plots
#in one, whereas using 'mppl.hold(off)' will create a separate plot

mppl.hold("on")  #Continue plotting in same chart
Beispiel #27
0
# Set chart labels
axis_1.set_xlabel('x')
axis_2.set_xlabel('x')
axis_3.set_xlabel('x')

# y labels
axis_1.set_ylabel("sin(x)")
axis_2.set_ylabel("sin'(x)")
axis_3.set_ylabel("sin''(x)")

# I conjecture we need to set zeros on the graph to fill the starting data.
# Let me add them for now. This means the y axis are all going to be zero to
# begin.
# Data for the y axis:
axis_1_data = zeros(0)
axis_2_data = zeros(0)
axis_3_data = zeros(0)
axis_4_data = zeros(0)
x = zeros(
    0
)  # We also need x values to be zeros too. This finished our placeholder data

# Time to make our plots! We plot the x and y data from each axis on the respective plots
# With titles nad line colors
sin_x_plot, = axis_1.plot(x, axis_1_data, 'b-', label="sin(x)")
sin_x_d1_plot, = axis_2.plot(x, axis_2_data, 'b-', label="sin'(x)")
sin_x_d2_plot, = axis_3.plot(x, axis_3_data, 'b-', label="sin''(x)")

# Then we add a legend. This consists of adding the lines we plotted associated with their names
# To the legend.
Beispiel #28
0
try:
    savearg = int(sys.argv[1])
except IndexError:
    savearg = False
except ValueError:
    savearg = False

n = 1000 # number of datapoints
V_0 = 0.4; V_end = 20.0
Rho_0 = 0.0; Rho_end = 2.0
 
T_hat = [1.15, 1.0, 0.85] # temperature [1]
V_hat = plab.linspace(V_0, V_end, n) # volume [1] 
Rho_hat = plab.linspace(Rho_0, Rho_end, n) # density [1] 
P1_hat = plab.zeros((n,len(T_hat))) #P(V,T) pressure [1]
P2_hat = plab.zeros((n,len(T_hat))) #P(Rho,T) pressure [1]
 
for j in range(len(T_hat)):
    for i in range(n):
        P1_hat[i,j] = p_vt(v_=V_hat[i], t_=T_hat[j])
        P2_hat[i,j] = p_rhot(rho_=Rho_hat[i], t_=T_hat[j])
        #find out when the function is no longer unique
        if check_unique(y1=P2_hat[i-1,j], y2=P2_hat[i,j]):
            print "function is no longer unique for rho=%f, T=%f" %(Rho_hat[i],T_hat[j])

max_index = plab.argmax(P2_hat[0:n/2,2])
max_y = P2_hat[max_index,2]
max_x = Rho_hat[max_index]
print "extremal value for P(Rho,T=0.8*T_C): p=",max_y
Beispiel #29
0
def FPP(log=Logger.logger(0), N = 10000, dt = 1./24000, distributionParameter = [30], plotAll = True, efield = False):
    
    #check if rate file or rate is present
    if len(distributionParameter)  ==  1:
        try: 
            data = np.loadtxt(distributionParameter[0],delimiter = ' ')
            log.info("Rate data loaded")
            BGsim = True
            STNdata = []
            tick = []
            for n in data:
                STNdata.append(n[1])
                tick.append(n[0])
            Ratetime = pylab.cumsum(tick)
            BGdt = tick[1]
            timeSteps = int(Ratetime[-1]/dt)  
        except:
            float(distributionParameter[0])
            Ratetime = 1.
            timeSteps = int(Ratetime/dt)
            BGsim = False
    else:
        Ratetime = 1.
        BGsim = False
        timeSteps = int(Ratetime/dt)
        
    maxrate = 1./0.009
    times = []
    for n in range(timeSteps):
        times.append(dt*n)

    # check for current file, if none present use impules
    try:
        It = np.loadtxt('C:\\Users\\Kristian\\Dropbox\\phd\\Data\\apcurrent24k.dat',delimiter = ',')

        #/home/uqkweegi/Documents/Data/apcurrent24k.dat',delimiter = ',')
    except:
        log.error('no current file present')
        It = np.array(1)

    log.info('Current loaded')
    It = np.multiply(np.true_divide(It,It.min()),250e-9)          #normalize
    currentLength = len(It)
    
    #calculate extracellular effects
    epsilon = 8.85e-12                                #Permitivity of free space
    rho = 10.**5 * 10.**6                     #density of neurons in STN m^-3
    r = np.power(np.multiply(3./4*N/(np.pi*rho),np.array([random.uniform(0,1) for _ in range(N)])),1./3)   #create a power law distribution of neuron radii
    r.sort()
    if efield:
        rijk = [[random.uniform(0,1)-0.5 for _ in range(N)],[random.uniform(0,1)-0.5 for _ in range(N)],[random.uniform(0,1)-0.5 for _ in range(N)]] #create vector direction of field 
        #if plotAll:
        #    vi = pylab.plot(rijk[0])
        #    vj = pylab.plot(rijk[1])
        #    vk = pylab.plot(rijk[2])
        #    pylab.show()
    R3 = 0.96e3
    C3 = 2.22e-6
    C2 = 9.38e-9
    C3 = 1.56e-6
    C2 = 9.38e-9
    R4 = 100.e6
    R2N = np.multiply(1./(4*np.pi*epsilon),r)
    R1 = 2100.;
    t_impulse = np.array([dt*n for n in range(100)])

    log.info('initialization complete')

    Vt = pylab.zeros(len(times))
    Vi = Vt
    Vj = Vt
    Vk = Vt

    # start simulation
    #-------------------------------------------------------------------------------#
    for neuron in range(N):
        R2 = R2N[neuron]
        ppwave = pylab.zeros(len(times))
        if BGsim:
            absoluteTimes = np.random.exponential(1./(maxrate*STNdata[0]),1)
        else:
            if len(distributionParameter)  ==  1:
                absoluteTimes = np.random.exponential(1./(distributionParameter[0]),1)
            else:
                absoluteTimes = [random.weibullvariate(distributionParameter[0],distributionParameter[1])]
        while absoluteTimes[-1] < times[-1]-currentLength*dt:
            wave_start = int(absoluteTimes[-1]/dt)
            wave_end = wave_start+currentLength
            if wave_end > len(times):
                break
            ppwave[wave_start:wave_end] = np.add(ppwave[wave_start:wave_end],It)
            if BGsim:
                isi = np.random.exponential(1./(maxrate*STNdata[int(absoluteTimes[-1]/BGdt)]),1)
            else:
                if len(distributionParameter)  ==  1:
                    isi = np.random.exponential(1./(distributionParameter[0]),1)
                else:
                    isi = random.weibullvariate(distributionParameter[0],distributionParameter[1])
            absoluteTimes = np.append(absoluteTimes,[absoluteTimes[-1]+isi])
        # calculate neuron contribution
        #------------------------------------------------------------------------------#
        extracellular_impulse_response = np.multiply(np.multiply(np.exp(np.multiply(t_impulse,-20*17*((C2*R1*R2 + C2*R1*R3 + C2*R1*R4 - C3*R1*R3 + C3*R2*R3 + C3*R3*R4))/(2*C2*C3*R1*R3*(R2 + R4)))),(np.add(np.cosh(np.multiply(t_impulse,(C2**2*R1**2*R2**2 + 2*C2**2*R1**2*R2*R3 + 2*C2**2*R1**2*R2*R4 + C2**2*R1**2*R3**2 + 2*C2**2*R1**2*R3*R4 + C2**2*R1**2*R4**2 + 2*C2*C3*R1**2*R2*R3 - 2*C2*C3*R1**2*R3**2 + 2*C2*C3*R1**2*R3*R4 - 2*C2*C3*R1*R2**2*R3 - 2*C2*C3*R1*R2*R3**2 - 4*C2*C3*R1*R2*R3*R4 - 2*C2*C3*R1*R3**2*R4 - 2*C2*C3*R1*R3*R4**2 + C3**2*R1**2*R3**2 - 2*C3**2*R1*R2*R3**2 - 2*C3**2*R1*R3**2*R4 + C3**2*R2**2*R3**2 + 2*C3**2*R2*R3**2*R4 + C3**2*R3**2*R4**2)**(1/2)/(2*C2*C3*R1*R3*(R2 + R4)))),np.divide(np.sinh(np.multiply(t_impulse,(C2**2*R1**2*R2**2 + 2*C2**2*R1**2*R2*R3 + 2*C2**2*R1**2*R2*R4 + C2**2*R1**2*R3**2 + 2*C2**2*R1**2*R3*R4 + C2**2*R1**2*R4**2 + 2*C2*C3*R1**2*R2*R3 - 2*C2*C3*R1**2*R3**2 + 2*C2*C3*R1**2*R3*R4 - 2*C2*C3*R1*R2**2*R3 - 2*C2*C3*R1*R2*R3**2 - 4*C2*C3*R1*R2*R3*R4 - 2*C2*C3*R1*R3**2*R4 - 2*C2*C3*R1*R3*R4**2 + C3**2*R1**2*R3**2 - 2*C3**2*R1*R2*R3**2 - 2*C3**2*R1*R3**2*R4 + C3**2*R2**2*R3**2 + 2*C3**2*R2*R3**2*R4 + C3**2*R3**2*R4**2)**(1/2)/(2*C2*C3*R1*R3*(R2 + R4))))*(C2*R1*R2 - C2*R1*R3 + C2*R1*R4 + C3*R1*R3 - C3*R2*R3 - C3*R3*R4),(C2**2*R1**2*R2**2 + 2*C2**2*R1**2*R2*R3 + 2*C2**2*R1**2*R2*R4 + C2**2*R1**2*R3**2 + 2*C2**2*R1**2*R3*R4 + C2**2*R1**2*R4**2 + 2*C2*C3*R1**2*R2*R3 - 2*C2*C3*R1**2*R3**2 + 2*C2*C3*R1**2*R3*R4 - 2*C2*C3*R1*R2**2*R3 - 2*C2*C3*R1*R2*R3**2 - 4*C2*C3*R1*R2*R3*R4 - 2*C2*C3*R1*R3**2*R4 - 2*C2*C3*R1*R3*R4**2 + C3**2*R1**2*R3**2 - 2*C3**2*R1*R2*R3**2 - 2*C3**2*R1*R3**2*R4 + C3**2*R2**2*R3**2 + 2*C3**2*R2*R3**2*R4 + C3**2*R3**2*R4**2)**(1/2))))),-R4/(C2*(R2 + R4)));
        electrode_ppwave = np.convolve(ppwave,extracellular_impulse_response,'same');
        if efield:  #add fields
            amp = 1/np.sqrt((np.square(rijk[0][neuron])+np.square(rijk[1][neuron])+np.square(rijk[2][neuron])))
            rijk[0][neuron] = rijk[0][neuron]*amp
            rijk[1][neuron] = rijk[1][neuron]*amp
            rijk[2][neuron] = rijk[2][neuron]*amp
            Vi = np.add(Vi,np.multiply(electrode_ppwave,rijk[0][neuron]))
            Vj = np.add(Vj,np.multiply(electrode_ppwave,rijk[1][neuron]))
            Vk = np.add(Vk,np.multiply(electrode_ppwave,rijk[2][neuron]))
        else:       #add scalar
            Vt = np.add(Vt,electrode_ppwave)
        if np.mod(neuron,1000) == 999:
            log.info(str(neuron+1)+" neurons calculated")
    #------------------------------------------------------------------------------#        
    # end simulation
    
    log.info('neuron contribution to MER complete')
    
    #remove bias
    if efield:
        Vt = np.sqrt(np.add(np.square(Vi),np.square(Vj),np.square(Vk)))   
    Vt = np.subtract(Vt,np.mean(Vt))

    #apply hardware filters
    flow = 5500*2.
    fhigh = 500.
    b,a = signal.butter(18,flow*dt,'low')
    Vt = signal.lfilter(b, a, Vt)
    b,a = signal.butter(1,fhigh*dt,'high')
    Vt = signal.lfilter(b, a, Vt)

    #produce plots
    if plotAll:
        volts = pylab.plot(times,Vt)
        if BGsim:
            stnrate = pylab.plot(Ratetime,np.multiply(STNdata,200))
        pylab.show()
        nfft=2**int(math.log(len(Vt),2))+1
        sr = 1/dt
        Pxi,freqs=pylab.psd(x=Vt,Fs=sr,NFFT=nfft/10,window=pylab.window_none, noverlap=100)
        pylab.show()
        return freqs, Pxi
        psd = pylab.loglog(freqs, Pxi)
        pylab.show()
    return Vt, times
Beispiel #30
0
    else:
        return None


pylab.ion()
reader = ArrayReader(port='/dev/ttyUSB0', baudrate=115200, timeout=0.05)

# Get background
if 1:
    if os.path.isfile(BACKGROUND_FILE):
        print 'reading background.txt'
        background = pylab.loadtxt(BACKGROUND_FILE)
    else:
        print 'getting new background image for equalization'
        numBackground = 5
        background = pylab.zeros((768, ))
        for i in range(0, numBackground):
            print i
            data = reader.getData()
            background = background + data
        background = background / numBackground
        print

    pylab.savetxt('background.txt', background)
    delta = 500.0 - background
else:
    print 'background subtraction disabled'
    delta = 0

i = 0
while 1:
Beispiel #31
0
def make_png(curTxtPath, is_np_array=False):
    from pathlib import Path

    path = Path(curTxtPath)
    parent_path = str(path.parent)

    for rotateDegree in [-8, -4, -2, 0, 2, 4, 8]:
        for FoVDegree in [0]:
            for scale_degree in range(3):
                dirname = str(rotateDegree) + '_' + str(FoVDegree) + '_' + str(1.0 + scale_degree * 0.1)
                dirpath = os.path.join(parent_path, dirname)

                if not os.path.exists(dirpath):
                    os.mkdir(dirpath)

    if is_np_array:
        tmpArr = np.load(curTxtPath)
    else:
        depthTxt = open(curTxtPath, "r")
        tmpArr = depthTxt.read().replace('\n', ' ').replace('  ', ' ').split(' ')
    print(len(tmpArr))

    txtArr = plt.zeros((height, width))  # 2D depth array

    for i in range(height):
        for j in range(width):
            txtArr[i][j] = stringToFloat(tmpArr[i][j])

    crdArr = depth_to_coordinates(txtArr)
    newOrigin = (np.min(crdArr, axis=0) + np.max(crdArr, axis=0)) * 0.5   # error occurs

    T = np.array([
        [1.0, 0.0, 0.0, newOrigin[0]],
        [0.0, 1.0, 0.0, newOrigin[1]],
        [0.0, 0.0, 1.0, newOrigin[2]],
        [0.0, 0.0, 0.0, 1.0]
    ])

    invT = np.array([
        [1.0, 0.0, 0.0, -newOrigin[0]],
        [0.0, 1.0, 0.0, -newOrigin[1]],
        [0.0, 0.0, 1.0, -newOrigin[2]],
        [0.0, 0.0, 0.0, 1.0]
    ])

    transposed_point_cloud = np.dot(invT, np.transpose(crdArr))

    # 회전한 png파일생성
    vt = 0
    for fsz in [5]:
        for rotateDegree in [-8, -4, -2, 0, 2, 4, 8]:
            for FoVDegree in [0]: # range(-4, 4 + 1, 2):
                for scale_degree in range(3):
                    t = time.time()
                    Rx = get_rotation_matrix_x_axis(rotateDegree)

                    scale_factor = 1.0 + scale_degree * 0.1

                    S = np.array([
                        [scale_factor, 0.0, 0.0, 0.0],
                        [0.0, scale_factor, 0.0, 0.0],
                        [0.0, 0.0, 1.0, 0.0],
                        [0.0, 0.0, 0.0, 1.0]
                    ])

                    rotatedCrdArr = np.transpose(np.dot(np.dot(T, np.dot(Rx, S)), transposed_point_cloud))
                    rotatedDepthArr = coordinates_to_depth(rotatedCrdArr, vt, fsz, np.sign(FoVDegree) * (2 ** abs(FoVDegree)))  # rotateDepthArr로 txt파일 만들면 회전변환된 depth 파일

                    def save_imagefile(newPngPath, rotatedDepthArr, t):
                        plt.imsave(newPngPath, rotatedDepthArr)
                        plt.imshow(rotatedDepthArr)
                        print(newPngPath + " done within %f" % (time.time() - t))

                    def save_npyfile(npy_path, rotatedDepthArr, t):
                        np.save(npy_path, rotatedDepthArr)
                        print(npy_path + " done within %f" % (time.time() - t))

                    dirname = str(rotateDegree) + '_' + str(FoVDegree) + '_' + str(scale_factor)

                    dirname = str(rotateDegree) + '_' + str(FoVDegree) + '_' + str(scale_factor)
                    dirpath = os.path.join(parent_path, dirname)
                    filename = os.path.basename(curTxtPath).split('.')[0] + '.png'
                    
                    # save_imagefile(curTxtPath + "-vt_" + str(vt) + "-fsz_" + str(fsz) + "-" + str(rotateDegree) + "-fov" + str(np.sign(FoVDegree) * (2 ** abs(FoVDegree))) + "-scale" + str(scale_degree * 0.1) + ".png", rotatedDepthArr, t)
                    save_imagefile(os.path.join(dirpath, filename), rotatedDepthArr, t)
Beispiel #32
0
def plotConn (include = ['all'], feature = 'strength', orderBy = 'gid', figSize = (10,10), groupBy = 'pop', groupByInterval = None, saveData = None, saveFig = None, showFig = True): 
    ''' 
    Plot network connectivity
        - include (['all',|'allCells','allNetStims',|,120,|,'E1'|,('L2', 56)|,('L5',[4,5,6])]): Cells to show (default: ['all'])
        - feature ('weight'|'delay'|'numConns'|'probability'|'strength'|'convergence'|'divergence'): Feature to show in connectivity matrix; 
            the only features applicable to groupBy='cell' are 'weight', 'delay' and 'numConns';  'strength' = weight * probability (default: 'strength')
        - groupBy ('pop'|'cell'|'y'|: Show matrix for individual cells, populations, or by other numeric tag such as 'y' (default: 'pop')
        - groupByInterval (int or float): Interval of groupBy feature to group cells by in conn matrix, e.g. 100 to group by cortical depth in steps of 100 um   (default: None)
        - orderBy ('gid'|'y'|'ynorm'|...): Unique numeric cell property to order x and y axes by, e.g. 'gid', 'ynorm', 'y' (requires groupBy='cells') (default: 'gid')
        - figSize ((width, height)): Size of figure (default: (10,10))
        - saveData (None|True|'fileName'): File name where to save the final data used to generate the figure; 
            if set to True uses filename from simConfig (default: None)
        - saveFig (None|True|'fileName'): File name where to save the figure; 
            if set to True uses filename from simConfig (default: None)
        - showFig (True|False): Whether to show the figure or not (default: True)

        - Returns figure handles
    '''

    print('Plotting connectivity matrix...')
    
    cells, cellGids, netStimPops = getCellsInclude(include)    

    # Create plot
    fig = figure(figsize=figSize)
    fig.subplots_adjust(right=0.98) # Less space on right
    fig.subplots_adjust(top=0.96) # Less space on top
    fig.subplots_adjust(bottom=0.02) # Less space on bottom

    h = axes()

    # Calculate matrix if grouped by cell
    if groupBy == 'cell': 
        if feature in ['weight', 'delay', 'numConns']: 
            connMatrix = zeros((len(cellGids), len(cellGids)))
            countMatrix = zeros((len(cellGids), len(cellGids)))
        else: 
            print 'Conn matrix with groupBy="cell" only supports features= "weight", "delay" or "numConns"'
            return fig
        cellInds = {cell['gid']: ind for ind,cell in enumerate(cells)}

        # Order by
        if len(cells) > 0:
            if orderBy not in cells[0]['tags']:  # if orderBy property doesn't exist or is not numeric, use gid
                orderBy = 'gid'
            elif not isinstance(cells[0]['tags'][orderBy], Number): 
                orderBy = 'gid' 
        
            if orderBy == 'gid': 
                yorder = [cell[orderBy] for cell in cells]
            else:
                yorder = [cell['tags'][orderBy] for cell in cells]
            
            sortedGids = {gid:i for i,(y,gid) in enumerate(sorted(zip(yorder,cellGids)))}
            cellInds = sortedGids

        # Calculate conn matrix
        for cell in cells:  # for each postsyn cell
            for conn in cell['conns']:
                if conn['preGid'] != 'NetStim' and conn['preGid'] in cellInds:
                    if feature in ['weight', 'delay']: 
                        if conn['preGid'] in cellInds:
                            connMatrix[cellInds[conn['preGid']], cellInds[cell['gid']]] += conn[feature]
                    countMatrix[cellInds[conn['preGid']], cellInds[cell['gid']]] += 1

        if feature in ['weight', 'delay']: connMatrix = connMatrix / countMatrix 
        elif feature in ['numConns']: connMatrix = countMatrix 

    # Calculate matrix if grouped by pop
    elif groupBy == 'pop': 
        
        # get list of pops
        popsTemp = list(set([cell['tags']['popLabel'] for cell in cells]))
        pops = [pop for pop in sim.net.allPops if pop in popsTemp]+netStimPops
        popInds = {pop: ind for ind,pop in enumerate(pops)}
        
        # initialize matrices
        if feature in ['weight', 'strength']: 
            weightMatrix = zeros((len(pops), len(pops)))
        elif feature == 'delay': 
            delayMatrix = zeros((len(pops), len(pops)))
        countMatrix = zeros((len(pops), len(pops)))
        
        # calculate max num conns per pre and post pair of pops
        numCellsPop = {}
        for pop in pops:
            if pop in netStimPops:
                numCellsPop[pop] = -1
            else:
                numCellsPop[pop] = len([cell for cell in cells if cell['tags']['popLabel']==pop])

        maxConnMatrix = zeros((len(pops), len(pops)))
        if feature == 'convergence': maxPostConnMatrix = zeros((len(pops), len(pops)))
        if feature == 'divergence': maxPreConnMatrix = zeros((len(pops), len(pops)))
        for prePop in pops:
            for postPop in pops: 
                if numCellsPop[prePop] == -1: numCellsPop[prePop] = numCellsPop[postPop]
                maxConnMatrix[popInds[prePop], popInds[postPop]] = numCellsPop[prePop]*numCellsPop[postPop]
                if feature == 'convergence': maxPostConnMatrix[popInds[prePop], popInds[postPop]] = numCellsPop[postPop]
                if feature == 'divergence': maxPreConnMatrix[popInds[prePop], popInds[postPop]] = numCellsPop[prePop]
        
        # Calculate conn matrix
        for cell in cells:  # for each postsyn cell
            for conn in cell['conns']:
                if conn['preGid'] == 'NetStim':
                    prePopLabel = conn['preLabel']
                else:
                    preCell = next((cell for cell in cells if cell['gid']==conn['preGid']), None)
                    prePopLabel = preCell['tags']['popLabel'] if preCell else None
                
                if prePopLabel in popInds:
                    if feature in ['weight', 'strength']: 
                        weightMatrix[popInds[prePopLabel], popInds[cell['tags']['popLabel']]] += conn['weight']
                    elif feature == 'delay': 
                        delayMatrix[popInds[prePopLabel], popInds[cell['tags']['popLabel']]] += conn['delay'] 
                    countMatrix[popInds[prePopLabel], popInds[cell['tags']['popLabel']]] += 1    
    
    # Calculate matrix if grouped by numeric tag (eg. 'y')
    elif groupBy in sim.net.allCells[0]['tags'] and isinstance(sim.net.allCells[0]['tags'][groupBy], Number):
        if not isinstance(groupByInterval, Number):
            print 'groupByInterval not specified'
            return
  
        # group cells by 'groupBy' feature (eg. 'y') in intervals of 'groupByInterval')
        cellValues = [cell['tags'][groupBy] for cell in cells]
        minValue = _roundFigures(groupByInterval * floor(min(cellValues) / groupByInterval), 3)
        maxValue  = _roundFigures(groupByInterval * ceil(max(cellValues) / groupByInterval), 3)
        
        groups = arange(minValue, maxValue, groupByInterval)
        groups = [_roundFigures(x,3) for x in groups]
        print groups

        if len(groups) < 2: 
            print 'groupBy %s with groupByInterval %s results in <2 groups'%(str(groupBy), str(groupByInterval))
            return
        groupInds = {group: ind for ind,group in enumerate(groups)}
        
        # initialize matrices
        if feature in ['weight', 'strength']: 
            weightMatrix = zeros((len(groups), len(groups)))
        elif feature == 'delay': 
            delayMatrix = zeros((len(groups), len(groups)))
        countMatrix = zeros((len(groups), len(groups)))

        # calculate max num conns per pre and post pair of pops
        numCellsGroup = {}
        for group in groups:
            numCellsGroup[group] = len([cell for cell in cells if group <= cell['tags'][groupBy] < (group+groupByInterval)])

        maxConnMatrix = zeros((len(groups), len(groups)))
        if feature == 'convergence': maxPostConnMatrix = zeros((len(groups), len(groups)))
        if feature == 'divergence': maxPreConnMatrix = zeros((len(groups), len(groups)))
        for preGroup in groups:
            for postGroup in groups: 
                if numCellsGroup[preGroup] == -1: numCellsGroup[preGroup] = numCellsGroup[postGroup]
                maxConnMatrix[groupInds[preGroup], groupInds[postGroup]] = numCellsGroup[preGroup]*numCellsGroup[postGroup]
                if feature == 'convergence': maxPostConnMatrix[groupInds[prePop], groupInds[postGroup]] = numCellsPop[postGroup]
                if feature == 'divergence': maxPreConnMatrix[groupInds[preGroup], groupInds[postGroup]] = numCellsPop[preGroup]
        
        # Calculate conn matrix
        for cell in cells:  # for each postsyn cell
            for conn in cell['conns']:
                if conn['preGid'] == 'NetStim':
                    prePopLabel = -1  # maybe add in future
                else:
                    preCell = next((cell for cell in cells if cell['gid']==conn['preGid']), None)
                    if preCell:
                        preGroup = _roundFigures(groupByInterval * floor(preCell['tags'][groupBy] / groupByInterval), 3)
                    else:
                        None

                postGroup = _roundFigures(groupByInterval * floor(cell['tags'][groupBy] / groupByInterval), 3)

                #print groupInds
                if preGroup in groupInds:
                    if feature in ['weight', 'strength']: 
                        weightMatrix[groupInds[preGroup], groupInds[postGroup]] += conn['weight']
                    elif feature == 'delay': 
                        delayMatrix[groupInds[preGroup], groupInds[postGroup]] += conn['delay'] 
                    countMatrix[groupInds[preGroup], groupInds[postGroup]] += 1    

    # no valid groupBy
    else:  
        print 'groupBy (%s) is not valid'%(str(groupBy))
        return

    if groupBy != 'cell':
        if feature == 'weight': 
            connMatrix = weightMatrix / countMatrix  # avg weight per conn (fix to remove divide by zero warning) 
        elif feature == 'delay': 
            connMatrix = delayMatrix / countMatrix
        elif feature == 'numConns':
            connMatrix = countMatrix
        elif feature in ['probability', 'strength']:
            connMatrix = countMatrix / maxConnMatrix  # probability
            if feature == 'strength':
                connMatrix = connMatrix * weightMatrix  # strength
        elif feature == 'convergence':
            connMatrix = countMatrix / maxPostConnMatrix
        elif feature == 'divergence':
            connMatrix = countMatrix / maxPreConnMatrix

    imshow(connMatrix, interpolation='nearest', cmap='jet', vmin=nanmin(connMatrix), vmax=nanmax(connMatrix))  #_bicolormap(gap=0)


    # Plot grid lines
    hold(True)
    if groupBy == 'cell':
        # Make pretty
        step = int(len(cells)/10.0)
        base = 100 if step>100 else 10
        step = int(base * floor(float(step)/base))
        h.set_xticks(arange(0,len(cells),step))
        h.set_yticks(arange(0,len(cells),step))
        h.set_xticklabels(arange(0,len(cells),step))
        h.set_yticklabels(arange(0,len(cells),step))
        h.xaxis.set_ticks_position('top')
        xlim(-0.5,len(cells)-0.5)
        ylim(len(cells)-0.5,-0.5)
        clim(nanmin(connMatrix),nanmax(connMatrix))

    elif groupBy == 'pop':
        for ipop, pop in enumerate(pops):
            plot(array([0,len(pops)])-0.5,array([ipop,ipop])-0.5,'-',c=(0.7,0.7,0.7))
            plot(array([ipop,ipop])-0.5,array([0,len(pops)])-0.5,'-',c=(0.7,0.7,0.7))

        # Make pretty
        h.set_xticks(range(len(pops)))
        h.set_yticks(range(len(pops)))
        h.set_xticklabels(pops)
        h.set_yticklabels(pops)
        h.xaxis.set_ticks_position('top')
        xlim(-0.5,len(pops)-0.5)
        ylim(len(pops)-0.5,-0.5)
        clim(nanmin(connMatrix),nanmax(connMatrix))

    else:
        for igroup, group in enumerate(groups):
            plot(array([0,len(groups)])-0.5,array([igroup,igroup])-0.5,'-',c=(0.7,0.7,0.7))
            plot(array([igroup,igroup])-0.5,array([0,len(groups)])-0.5,'-',c=(0.7,0.7,0.7))

        # Make pretty
        h.set_xticks([i-0.5 for i in range(len(groups))])
        h.set_yticks([i-0.5 for i in range(len(groups))])
        h.set_xticklabels([int(x) if x>1 else x for x in groups])
        h.set_yticklabels([int(x) if x>1 else x for x in groups])
        h.xaxis.set_ticks_position('top')
        xlim(-0.5,len(groups)-0.5)
        ylim(len(groups)-0.5,-0.5)
        clim(nanmin(connMatrix),nanmax(connMatrix))

    colorbar(label=feature, shrink=0.8) #.set_label(label='Fitness',size=20,weight='bold')
    xlabel('post')
    h.xaxis.set_label_coords(0.5, 1.06)
    ylabel('pre')
    title ('Connection '+feature+' matrix', y=1.08)

    #save figure data
    if saveData:
        figData = {'connMatrix': connMatrix, 'feature': feature, 'groupBy': groupBy,
         'include': include, 'saveData': saveData, 'saveFig': saveFig, 'showFig': showFig}
    
        _saveFigData(figData, saveData, 'conn')
 
    # save figure
    if saveFig: 
        if isinstance(saveFig, str):
            filename = saveFig
        else:
            filename = sim.cfg.filename+'_'+'conn.png'
        savefig(filename)

    # show fig 
    if showFig: _showFigure()

    return fig
Beispiel #33
0
n = int(1e+5); #number of microstates
N = 50; #number of spin-particles

def calc_energy(array, mu=1, B=1):
    #array should have random distribution of ones and neg_ones
    total_energy = -mu*B*sum(array)
    return total_energy

def gen_rand_array(size):
    #only allowed values are 1 & -1
    tmplist = [random.randrange(-1,2,2) for i in range(size)];#\vec{\pm 1}
    array = plt.array(tmplist);
    return array

if __name__ == '__main__':
    energies = plt.zeros(n); #the energies of all microstates
    
    t0 = time.time()
    for i in range(n):
        #create array of random ones
        tmp_arr = gen_rand_array(N);
        
        #calculate total energy
        tmp_energy = calc_energy(tmp_arr);
        
        #add energies to array
        energies[i] = tmp_energy;

    t1 = time.time()
    
    plt.figure("exercise c");
Beispiel #34
0
    def setmask(self, im):
        """
        input an image (for now an HDU) and set self.mask to 
        an array the size of the image with the phot region =1
          and expanded background annulus =2
        for now we also create a mask the size of the image, so I recommend
          to extract a subimage and call this method with that input
        this method well trim the polyon to fit in the image
        """
        imshape = im.shape
        mask = pl.zeros(imshape)

        if self.type == "circle":
            x, y, r = self.imcoords(im)
            x0 = int(x)
            y0 = int(y)
            dx = x - x0
            dy = y - y0
            # grr pixel centers again - is this right?
            #            dx=dx-0.5; dy=dy-0.5

            bg0_r = self.imcoords(im, reg="bg0")[2]  #-0.2 # fudge
            bg1_r = self.imcoords(im, reg="bg1")[2]  #+0.2 # fudge
            bg1_r0 = int(pl.ceil(bg1_r))
            r2 = r**2
            bg0_r2 = bg0_r**2
            bg1_r2 = bg1_r**2
            for i in pl.array(range(2 * bg1_r0 + 1)) - bg1_r0:
                for j in pl.array(range(2 * bg1_r0 + 1)) - bg1_r0:
                    if y0 + j >= 0 and x0 + i >= 0 and y0 + j < (
                            imshape[0] - 1) and x0 + i < (imshape[1] - 1):
                        d2 = (1. * i - dx)**2 + (1. * j - dy)**2
                        # d2 = (i-x)**2 + (j-y)**2 -> (i-x0-(x-x0))**2 + ...
                        if d2 <= r2:
                            mask[y0 + j,
                                 x0 + i] = 1  # remember indices inverted
                        if d2 >= bg0_r2 and d2 <= bg1_r2:
                            mask[y0 + j,
                                 x0 + i] = 2  # remember indices inverted
#                        if x0+i==6:
#                           print i,j,x0+i,y0+j,dx,dy,d2,bg0_r2,bg1_r2

        elif self.type == "polygon":
            # turn annulus back into mask, will trim at edges of image
            from matplotlib.path import Path
            from matplotlib import __version__ as mpver
            v = mpver.split('.')
            if v[0] < 1:
                raise Exception(
                    "need matplotlib >=1.3.1, or tell remy to add fallback nxutils option for Path.contains_points"
                )
            elif v[1] < 3:
                raise Exception(
                    "need matplotlib >=1.3.1, or tell remy to add fallback nxutils option for Path.contains_points"
                )
            elif v[2] < 1:
                raise Exception(
                    "need matplotlib >=1.3.1, or tell remy to add fallback nxutils option for Path.contains_points"
                )

            # Create vertex coordinates for each grid cell
            x, y = pl.meshgrid(pl.arange(imshape[1]), pl.arange(imshape[0]))
            x, y = x.flatten(), y.flatten()
            points = pl.vstack((x, y)).T
            mask1 = Path(self.imcoords(im, reg="bg1")).contains_points(points)
            mask1 = mask1.reshape((imshape[0], imshape[1]))
            mask0 = Path(self.imcoords(im, reg="bg0")).contains_points(points)
            #,radius=1)
            mask0 = mask0.reshape((imshape[0], imshape[1]))
            mask = Path(self.imcoords(im, reg="ap")).contains_points(points)
            mask = mask.reshape((imshape[0], imshape[1]))

            mask = mask + (1 * mask1 - 1 * mask0) * 2
        else:
            raise Exception("unknown region type %s" % self.type)
        self.mask = mask
        return mask
Beispiel #35
0
def dmatrix(d,**centers):
    """
    DM = dmatrix(d,**centers)
       
    Arguments:
    d = data
    *centers may contain centers, c, different from d, otherwise c = d
        
    Typically d = c but, in general, data does not have to equal its centers
    as in the case of the evaluation matrix, where the d becomes the
    evaluation points and the centers are the collocation data.
    
    Output DM:
    Compute the distance matrix with entries being the distances between the
    data and the centers.
    The Euclidian distance matrix, DM, is the m by n matrix with entries
         ||d_0 - c_0|| ||d_0 - c_1|| ... ||d_0 - c_n||
         ||d_1 - c_0|| ||d_1 - c_1|| ... ||d_1 - c_n||
                          ...
         ||d_m - c_0|| ||d_m - c_1|| ... ||d_m - c_n||
    
    m = # pts, n = dim of space
    
    ****** ASSUMPTION: # pts >= dimension of space
    ****** ASSUMPTION: c, d are ROW vectors, otherwise convert to row vectors
    
    Remark:
    d and c are called vectors but it might be more appropriate to call
    them matrices (or rank dim(d), rank dim(c) tensors). When called vectors
    it is assumed that each row is a vector in the space implying the number
    of columns is the dimension of the space and the number of rows is the
    number of points
    """
    # Test Input:
    # Are d and c arrays of row vectors?
    # If d and c are column vectors, convert them to row vectors.
    # If d and c are square, i.e. # pts = dimension of space, notify user 
    if d.ndim > 1:    
        if d.shape[1] > d.shape[0]:
            d = d.T
        elif d.shape[1] == d.shape[0]:
            print("Assuming data is in row-vector form.")
    else:   # 1-D data, convert to 2-D data with shape (M,1)
        d = array([d]).T
    
    ## **************** WHY DOES c = kwargs.get('centers',d) RETURN NONE????
    if centers.get('centers') is None:
        c = d
    else:
        c = centers.get('centers')

    if c.ndim > 1:
        if c.shape[1] > c.shape[0]:
            c = c.T
        elif c.shape[1] == c.shape[0]:
            print("Assuming centers are in row-vector form.")
    else:   # 1-D data, convert to 2-D data with shape (N,1)
        c = array([c]).T
    # **************************************************************************
    #                               Begin Algorithm
    # **************************************************************************
    # Obtain shape of input:
    M, sd = d.shape    # M = # pts, sd = dim of data space
    N, sc = c.shape    # N = # pts, sc = dim of centers space
    #
    # Raise error if centers and data have different dimension    
    if sd != sc:
        raise NameError('Data and centers must have same dimension')
    # ********** Construct the Distance Matrix DM **********
    # Initialize the distance matrix: (data # of pts) by (centers # of pts)
    # Denote the 
    # d_0 = (d[0,0], d[0,1], ...), d_1 = (d[1,0], d[1,1], ...), etc.
    #
    # The distance matrix is the M by N matrix with entries
    #      ||d_0 - c_0|| ||d_0 - c_1|| ... ||d_0 - c_n||
    #      ||d_1 - c_0|| ||d_1 - c_1|| ... ||d_1 - c_n||
    #                       ...
    #      ||d_m - c_0|| ||d_m - c_1|| ... ||d_m - c_n||
    #
    DM = zeros((M,N))
    # Determine the distance of each point in the data-set from its center
    for i in range(M):
        # Compute the row ||d_i - c_0|| ||d_i - c_1|| ... ||d_i - c_n||
        DM[i,:] = ((d[i]-c)**2).sum(1)
    # Finish distance formula by taking square root of each entry
    return sqrt(DM)
#Synaptic parameters, corresponding to a NetCon synapse built into NEURON
synapseParameters = {
    'idx' : 0,               # insert synapse on index "0", the soma
    'e' : 0.,                # reversal potential of synapse
    'syntype' : 'Exp2Syn',   # conductance based double-exponential synapse
    'tau1' : 1.0,            # Time constant, rise
    'tau2' : 1.0,            # Time constant, decay
    'weight' : 0.05,         # Synaptic weight
    'record_current' : True, # Will enable synapse current recording
}

#Generate the grid in xz-plane over which we calculate local field potentials
x = pl.linspace(-50, 50, 11)
z = pl.linspace(-50, 50, 11)
X, Z = pl.meshgrid(x, z)
y = pl.zeros(X.size)

#define parameters for extracellular recording electrode, using optional method
electrodeParameters = {
    'sigma' : 0.3,              # extracellular conductivity
    'x' : X.reshape(-1),        # x,y,z-coordinates of contact points
    'y' : y,
    'z' : Z.reshape(-1),
    'method' : 'som_as_point',  #treat soma segment as sphere source
}

################################################################################
# Main simulation procedure, setting up extracellular electrode, cell, synapse
################################################################################

#close open figures
#axis_2.set_ylim((-MAX_AMPLITUDE, MAX_AMPLITUDE))

# Add grids to the axis
axis_1.grid(True)
axis_2.grid(True)

# Add labels to x axis of both axis_1 and axis_2
axis_1.set_xlabel("Time (s)")
axis_2.set_xlabel("Frequency (radian/sec)")

# Add labels to y axis of both axis_1 and axis_2
axis_1.set_ylabel("Amplitude")
axis_2.set_ylabel("Amplitude")

# Make data placeholders
axis_1_data = zeros(0)
axis_2_data = zeros(0)

# Make data placeholders
axis_1_x = zeros(0)
axis_2_x = zeros(0)

wav_plot, = axis_1.plot(axis_1_x, axis_1_data, '-b', label='wave')
amp_freq, = axis_2.plot(axis_2_x, axis_2_data, '-b', label='Amps')

axis_1.legend([wav_plot], [wav_plot.get_label()])
axis_2.legend([amp_freq], [amp_freq.get_label()])

wave_obj = sa.WaveObject.from_wave_file(filename)
t = threading.Thread(target=wave_obj.play, name="Play Wav")
t.daemon = True
Beispiel #38
0
def possibility(N_A, q_A, N_B, q_B):
    numA = scp.comb(q_A + N_A - 1, q_A); #numerator
    numB = scp.comb(q_B + N_B - 1, q_B); #numerator
    q_T = q_A+q_B; N_T = N_A+N_B; #calc. total
    denT = scp.comb(q_T + N_T - 1, q_T); #denominator
    return numA*numB/float(denT)

#begin ex.m

Na = 2; Nb = 2; q_tot = 6; #self-explanatory
micro = 0; #total number of microstates

#array of macrostates qa 
#contains number of microstates possible in each macrostate
P = plt.zeros(q_tot + 1);
 
#loop through all macrostates
for qa in range(q_tot + 1):
    P[qa] = possibility(Na, qa, Nb, q_tot-qa)

test(P);

plt.figure("exercise m")
plt.title('possible number of microstate per macrostate');
plt.ylabel('probability of macrostate []');
plt.xlabel('macrostates [q_A]');
plt.plot(P,'b-');
plt.savefig("exm.png");

#begin ex.N
Beispiel #39
0
axis_2.set_ylim((-MAX_AMPLITUDE, MAX_AMPLITUDE))

# Add grids to the axis
axis_1.grid(True)
axis_2.grid(True)

# Add labels to x axis of both axis_1 and axis_2
axis_1.set_xlabel("Time (s)")
axis_2.set_xlabel("Frequency (radian/sec)")

# Add labels to y axis of both axis_1 and axis_2
axis_1.set_ylabel("Amplitude")
axis_2.set_ylabel("Amplitude")

# Make data placeholders
axis_1_data = zeros(0)
axis_2_data = zeros(FREQUENCY_MAX)

# Make data placeholders
axis_1_x = zeros(0)
axis_2_x = zeros(FREQUENCY_MAX)

wav_plot, = axis_1.plot(axis_1_x, axis_1_data, '-b', label='wave')
amp_freq, = axis_2.plot(axis_2_x, axis_2_data, '-b', label='Freqs')

axis_1.legend([wav_plot], [wav_plot.get_label()])
axis_2.legend([amp_freq], [amp_freq.get_label()])

# Get the hyperparameters from the .wav file. Reference Testing.py
# Append new data to our axis_1_x rather than "x"
# Define all the variables we need to run the function
Beispiel #40
0
import scipy.misc as scp

def possibility(N_A, q_A, N_B, q_B):
    numA = scp.comb(q_A + N_A - 1, q_A);
    numB = scp.comb(q_B + N_B - 1, q_B);
    q_T = q_A+q_B; N_T = N_A+N_B;
    denT = scp.comb(q_T + N_T - 1, q_T);
    return numA*numB/float(denT)

Na = 50#input('number of atoms in system A \n'); 
Nb = 50#input('number of atoms in system B \n');
q = 100#input('total amount of energy-units in systems \n');

#array of macrostates qa 
#contains number of microstates possible in each macrostate
P = plt.zeros(q + 1);
 
for qa in range(q + 1):
    P[qa] = possibility(Na, qa, Nb, q-qa)


index_max = P.argmax(); 
M = sum(P);
prob_max = P[index_max]/float(M);

print "N_a=%d, N_b=%d, q_tot=%d"%(Na,Nb,q)
print "most probable value is: ", index_max
print "probability of getting most probable value is: ", prob_max
print "probability of having q_a = 0 after equilibrium is: ", P[0]/float(M)
'''
terminal >> python oblig1_o.py 
Beispiel #41
0
def photcombine(a_wave,
                a_f,
                a_df,
                a_fl,
                c_wave,
                c_f,
                c_df,
                c_fl,
                f_wave,
                f_dwave,
                edit=False,
                preference=None):
    nfit = len(f_wave)
    f_f = pl.zeros(nfit)
    f_df = pl.zeros(nfit)
    f_fl = pl.zeros(nfit)

    for i in range(nfit):
        # TODO can't deal with flag=2,4
        # are there any detections:
        a_det = pl.where((abs(a_wave - f_wave[i]) <
                          (0.5 * f_dwave[i])) * (a_fl == 1))[0]
        c_det = pl.where((abs(c_wave - f_wave[i]) <
                          (0.5 * f_dwave[i])) * (c_fl == 1))[0]
        # are there any UL:
        a_ul = pl.where((abs(a_wave - f_wave[i]) <
                         (0.5 * f_dwave[i])) * (a_fl == 3))[0]
        c_ul = pl.where((abs(c_wave - f_wave[i]) <
                         (0.5 * f_dwave[i])) * (c_fl == 3))[0]

        # any cat UL?
        if len(c_ul) > 0:
            # more than one?
            if len(c_ul) > 1:
                d = abs(c_wave[c_ul] - f_wave[i])
                closest_c_ul = c_det[pl.where(d == d.min())[0]]
                print "ambiguous catalog upper limits, choosing %f for fitter %f" % (
                    c_wave[closest_c_ul], f_wave[i])
                print "     set=", c_wave[c_ul]
            else:
                closest_c_ul = c_ul
        else:
            closest_c_ul = -1

        # any app UL?
        if len(a_ul) > 0:
            # more than one?
            if len(a_ul) > 1:
                d = abs(a_wave[a_ul] - f_wave[i])
                closest_a_ul = a_det[pl.where(d == d.min())[0]]
                print "ambiguous apphot upper limits, choosing %f for fitter %f" % (
                    a_wave[closest_a_ul], f_wave[i])
                print "     set=", a_wave[a_ul]
            else:
                closest_a_ul = a_ul
        else:
            closest_a_ul = -1

        # any app detections?
        if len(a_det) > 0:
            # more than one?
            if len(a_det) > 1:
                d = abs(a_wave[a_det] - f_wave[i])
                closest_a_det = a_det[pl.where(d == d.min())[0]]
                print "ambiguous apphot photometry, choosing %f for fitter %f" % (
                    a_wave[closest_a_det], f_wave[i])
                print "     set=", a_wave[a_det]
            else:
                closest_a_det = a_det
        else:
            closest_a_det = -1

        # any cat detections?
        if len(c_det) > 0:
            # more than one?
            if len(c_det) > 1:
                d = abs(c_wave[c_det] - f_wave[i])
                closest_c_det = c_det[pl.where(d == d.min())[0]]
                print "ambiguous catalog photometry, choosing %f for fitter %f" % (
                    c_wave[closest_c_det], f_wave[i])
                print "     set=", c_wave[c_det]
            else:
                closest_c_det = c_det
        else:
            closest_c_det = -1

        # combine:
        if preference == "cat":
            # user wants cat, there's cat det, done.
            if closest_c_det >= 0:
                f_f[i] = c_f[closest_c_det]
                f_df[i] = c_df[closest_c_det]
                f_fl[i] = 1
                # throw away apphot det silently here
                # TODO check if apphot UL is lower than cat_phot?
            elif closest_c_ul >= 0:
                # there's no det, but a cat UL - is there app det?
                if closest_a_det >= 0:
                    if a_f[closest_a_det] <= c_f[closest_c_ul]:
                        # there's an appdet below the cat UL:
                        f_f[i] = a_f[closest_a_det]
                        f_df[i] = a_df[closest_a_det]
                        f_fl[i] = 1
                    else:
                        # there's an appdet _above_ the cat UL - WTF?
                        print "apphot detection brighter than catalog UL at ", f_wave[
                            i]
                        # assume apphot is wrong
                        f_f[i] = c_f[closest_c_ul]
                        f_df[i] = c_df[closest_c_ul]
                        f_fl[i] = 3
                else:
                    # start with that cat UL
                    f_f[i] = c_f[closest_c_ul]
                    f_df[i] = c_df[closest_c_ul]
                    f_fl[i] = 3
                    # now if there's also an app UL
                    if closest_a_ul >= 0:
                        # and its lower
                        if a_f[closest_a_ul] <= c_f[closest_c_ul]:
                            # use lower app UL instead of cat UL:
                            f_f[i] = a_f[closest_a_ul]
                            f_df[i] = a_df[closest_a_ul]
                            f_fl[i] = 3
            else:
                # user wanted cat, but there's no cat.
                if closest_a_det >= 0:
                    f_f[i] = a_f[closest_a_det]
                    f_df[i] = a_df[closest_a_det]
                    f_fl[i] = 1
                elif closest_a_ul >= 0:
                    f_f[i] = a_f[closest_a_ul]
                    f_df[i] = a_df[closest_a_ul]
                    f_fl[i] = 3
                # otherwise they get nothing - f_fl stays=0

        elif preference == "app":
            # user wants app, there's app det, done.
            if closest_cadet >= 0:
                f_f[i] = a_f[closest_a_det]
                f_df[i] = a_df[closest_a_det]
                f_fl[i] = 1
                # throw away catphot det silently here
                # TODO check if catphot UL is lower than appphot?
            elif closest_a_ul >= 0:
                # there's no det, but an app UL - is there a cat det?
                if closest_c_det >= 0:
                    if c_f[closest_c_det] <= a_f[closest_a_ul]:
                        # there's an catdet below the app UL:
                        f_f[i] = c_f[closest_c_det]
                        f_df[i] = c_df[closest_c_det]
                        f_fl[i] = 1
                    else:
                        # there's an catdet _above_ the app UL - WTF?
                        print "catalog detection brighter than appphot UL at ", f_wave[
                            i]
                        # assume apphot is wrong
                        f_f[i] = c_f[closest_c_det]
                        f_df[i] = c_df[closest_c_det]
                        f_fl[i] = 1
                else:
                    # start with that app UL
                    f_f[i] = a_f[closest_a_ul]
                    f_df[i] = a_df[closest_a_ul]
                    f_fl[i] = 3
                    # now if there's also a cat UL
                    if closest_c_ul >= 0:
                        # and its lower
                        if c_f[closest_c_ul] <= a_f[closest_a_ul]:
                            # use lower app UL instead of cat UL:
                            f_f[i] = c_f[closest_c_ul]
                            f_df[i] = c_df[closest_c_ul]
                            f_fl[i] = 3
            else:
                # user wanted app, but there's no app.
                if closest_c_det >= 0:
                    f_f[i] = c_f[closest_c_det]
                    f_df[i] = c_df[closest_c_det]
                    f_fl[i] = 1
                elif closest_c_ul >= 0:
                    f_f[i] = c_f[closest_c_ul]
                    f_df[i] = c_df[closest_c_ul]
                    f_fl[i] = 3
                # otherwise they get nothing - f_fl stays=0

        else:  # preference is neither cat nor app:
            # implicit preference for cat but some averaging
            if closest_c_det >= 0:
                if closest_a_det >= 0:
                    # 2 dets -average
                    f_f[i] = 0.5 (c_f[closest_c_det] + a_f[closest_a_det])
                    f_df[i] = max([
                        c_df[closest_c_det], a_df[closest_a_det],
                        abs(c_f[closest_c_det] - a_f[closest_a_det])
                    ])
                    f_fl[i] = 1
                else:
                    # cat det; is there an app UL?
                    if closest_a_ul >= 0:
                        if a_f[closest_a_ul] <= c_f[closest_c_det]:
                            print "apphot UL below cat detection at ", f_wave[
                                i]
                            # in case of discrepency, assum cat correct
                        f_f[i] = c_f[closest_c_det]
                        f_df[i] = c_df[closest_c_det]
                        f_fl[i] = 1
            elif closest_c_ul >= 0:
                # there's a catalog UL, but no det:
                # start by assuming cat right
                f_f[i] = c_f[closest_c_ul]
                f_df[i] = c_df[closest_c_ul]
                f_fl[i] = 3
                if closest_a_det >= 0:
                    if a_f[closest_a_det] <= c_f[closest_c_ul]:
                        # apphot det below cat UL- replace with that
                        f_f[i] = a_f[closest_a_det]
                        f_df[i] = a_df[closest_a_det]
                        f_fl[i] = 1
                elif closest_a_ul >= 0:
                    if a_f[closest_a_ul] <= c_f[closest_c_ul]:
                        # apphot UL below cat UL- replace with that
                        f_f[i] = a_f[closest_a_ul]
                        f_df[i] = a_df[closest_a_ul]
                        f_fl[i] = 3

    # next, set uncert minima to 10%
    z = pl.where(f_fl == 1)[0]
    for zz in z:
        f_df[zz] = max([f_df[zz], 0.1 * f_f[zz]])

    # todo check for and set UL confidence levels?
    if edit:
        global whatx, fit_1, fit_3, startpos, endpos, fits_1, xs_1, x1, y1, x3, y3
    # for interactive editing
    #  plot fit phot and prepare to edit it
    z = pl.where(f_fl == 1)[0]
    if len(z) > 0:
        fit_1 = pl.plot(f_wave[z], f_f[z], 'r.', markersize=8,
                        label="fitter")[0]
        fits_1 = []
        for j in range(len(z)):
            uncert = f_f[z[j]] + pl.array([-1, 1]) * f_df[z[j]]
            #if uncert[0]<pl.ylim()[0]: uncert[0]=pl.ylim()[0]
            fits_1.append(
                pl.plot(f_wave[z[j]] * pl.array([1, 1]), uncert, 'r')[0])
    else:
        fit_1 = None
    z = pl.where(f_fl == 3)[0]
    if len(z) > 0:
        fit_3 = pl.plot(f_wave[z], f_f[z], 'rv')[0]
    else:
        fit_3 = None

    ndets = len(fits_1)
    xs_1 = pl.zeros(ndets)  # x locations of the error bars
    for k in range(ndets):
        xs_1[k] = fits_1[k].get_data()[0][0]

    pl.legend(loc=4, prop={'size': 8}, numpoints=1)

    if edit:

        def click(event):
            if not event.inaxes: return
            global whatx, fit_1, fit_3, startpos, endpos, fits_1, xs_1, x1, y1, x3, y3
            startpos = event.xdata, event.ydata

            # find closest existing pt
            if fit_1 == None:
                #                print "no fit_1?!"
                x1 = []
                y1 = []
                d1 = pl.array([1e10])
            else:
                x1, y1 = fit_1.get_data()
                d1 = abs(event.xdata - x1)
            if fit_3 == None:
                #                print "no fit_3?!"
                x3 = []
                y3 = []
                d3 = pl.array([1e10])
            else:
                x3, y3 = fit_3.get_data()
                d3 = abs(event.xdata - x3)

            # todo: for deletions, make sure we have all avail wavelength pts
            # i suppose that the flux combination step that creates fit_wave
            # will do that...

#            print "x1=",x1
#            print "x3=",x3

            if len(d1) <= 0:
                d1 = pl.array([1e10])
            if len(d3) <= 0:
                d3 = pl.array([1e10])

            if d1.min() <= d3.min():
                whatpoint = pl.where(d1 == d1.min())[0][0]
                whatx = x1[whatpoint]
                print "deleting detection %d @ " % whatpoint, whatx
                fit_1.set_data(pl.delete(x1, whatpoint),
                               pl.delete(y1, whatpoint))
                # delete the uncert error line too
                #                ds_1=abs(event.xdata-xs_1)
                #                k=pl.where(ds_1==ds_1.min())[0][0]
                k = whatpoint
                fits_1[k].remove()
                fits_1 = pl.delete(fits_1, k)
                xs_1 = pl.delete(xs_1, k)
            else:
                whatpoint = pl.where(d3 == d3.min())[0][0]
                whatx = x3[whatpoint]
                print "deleting UL %d @ " % whatpoint, whatx
                x3 = pl.delete(x3, whatpoint)
                y3 = pl.delete(y3, whatpoint)
                fit_3.set_data(x3, y3)

            if event.button == 3:  #R-click
                x3 = pl.append(x3, whatx)
                y3 = pl.append(y3, startpos[1])
                if fit_3 == None:
                    fit_3 = pl.plot(x3, y3, 'rv')[0]
                else:
                    fit_3.set_data(x3, y3)

            pl.draw()
#            print x3
#            print x1
#            print xs_1

        def unclick(event):
            if not event.inaxes: return
            global whatx, fit_1, fit_3, startpos, endpos, fits_1, xs_1, x1, y1, x3, y3
            endpos = event.xdata, event.ydata
            if event.button == 1:
                if fit_1:
                    x1, y1 = fit_1.get_data()
                    x1 = pl.append(x1, whatx)
                    y1 = pl.append(y1, 0.5 * (startpos[1] + endpos[1]))
                    fit_1.set_data(x1, y1)
                else:
                    fit_1 = pl.plot(whatx, 0.5 * (startpos[1] + endpos[1]),
                                    'r.')[0]
                    fits_1 = []
                # add this to the list of uncert lines plots
                fits_1 = pl.append(
                    fits_1,
                    pl.plot([whatx, whatx], [startpos[1], endpos[1]], 'r')[0])
                xs_1 = pl.append(xs_1, whatx)
#                print "xs_1 = ",xs_1
# XXX TODO also set the uncert somewhere
            pl.draw()
#            print x3
#            print x1
#            print xs_1

        cid0 = pl.connect('button_press_event', click)
        cid1 = pl.connect('button_release_event', unclick)

        print "edit fitter points and then press enter in the terminal"
        x = raw_input()

        pl.disconnect(cid0)
        pl.disconnect(cid1)

    if not fit_3 == None:
        x3, y3 = fit_3.get_data()
        print "upper limits are now:", x3, y3

        for j in range(len(x3)):
            d = abs(f_wave - x3[j])
            z = pl.where(d == d.min())[0][0]
            f_f[z] = y3[j]
            f_fl[z] = 3
            f_df[z] = 0.999  # XXXX

    x1, y1 = fit_1.get_data()
    print "detections are now:", x1, y1

    for j in range(len(x1)):
        d = abs(f_wave - x1[j])
        z = pl.where(d == d.min())[0][0]
        f_f[z] = y1[j]
        f_fl[z] = 1
        f_df[z] = 0.1 * f_f[z]  # XXXX need real uncert from drawing!

    return f_f, f_df, f_fl