예제 #1
0
    def test_user_series(self):
        s = pd.Series(range(1, 10), index=range(11, 20))
        us = UserSeries(s)
        tm.assert_series_equal(s, us)

        def assert_op(pobj, userobj, op):
            return
            cls = type(userobj)
            correct = op(pobj)
            test = op(userobj)
            assert isinstance(test, cls)
            if isinstance(correct, pd.Series):
                tm.assert_series_equal(correct, test)
            if isinstance(correct, pd.DataFrame):
                tm.assert_frame_equal(correct, test)

        assert_op(s, us, lambda s: s.pct_change())
        assert_op(s, us, lambda s: s + 19)
        assert_op(s, us, lambda s: s / 19)
        assert_op(s, us, lambda s: np.log(s))
        assert_op(s, us, lambda s: np.log(s))
        assert_op(s, us, lambda s: np.diff(s))

        bools = us > 5
        tvals = np.repeat(1, len(us))
        fvals = np.repeat(0, len(us))
        wh = np.where(bools, tvals, fvals)
        assert wh.pobj is not None
        assert wh.dtype == int
        tm.assert_series_equal(wh, bools.astype(int))
예제 #2
0
파일: camera.py 프로젝트: cadik/opendr
    def compute_dr_wrt(self, wrt):

        if wrt not in (self.v, self.rt, self.t):
            return
        
        if wrt is self.t:
            if not hasattr(self, '_drt') or self._drt.shape[0] != self.v.r.size:                
                IS = np.arange(self.v.r.size)
                JS = IS % 3
                data = np.ones(len(IS))
                self._drt = sp.csc_matrix((data, (IS, JS)))
            return self._drt
        
        if wrt is self.rt:
            rot, rot_dr = cv2.Rodrigues(self.rt.r)
            rot_dr = rot_dr.reshape((3,3,3))
            dr = np.einsum('abc, zc -> zba', rot_dr, self.v.r).reshape((-1,3))
            return dr
        
        if wrt is self.v:
            rot = cv2.Rodrigues(self.rt.r)[0]
            
            IS = np.repeat(np.arange(self.v.r.size), 3)
            JS = np.repeat(np.arange(self.v.r.size).reshape((-1,3)), 3, axis=0)
            data = np.vstack([rot for i in range(self.v.r.size/3)])
            result = sp.csc_matrix((data.ravel(), (IS.ravel(), JS.ravel())))
            return result
예제 #3
0
    def grad_EVzxVzxT_by_hyper_exact(self, EVzxVzxT_list_this, Z, A, B, hyperno):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        if hyperno != 0:
            return EVzxVzxT_list_this * 0

        alpha = self.length_scale * self.length_scale

        I = np.identity(R)
        S = np.diag(B[0, :] * B[0, :])
        Sinv = np.diag(1 / B[0, :] * B[0, :])
        C = I * alpha
        Cinv = I * (1 / alpha)
        CinvSinv = 2 * Cinv + Sinv
        CinvSinv_inv = np.diag(1 / CinvSinv.diagonal())

        dC = self.length_scale * I
        dCinv = -Cinv.dot(dC).dot(Cinv)
        dCinvSinv = 2 * dCinv
        dCinvSinv_inv = -CinvSinv_inv.dot(dCinvSinv).dot(CinvSinv_inv)

        S1 = (
            dCinv
            - dCinv.dot(CinvSinv_inv).dot(Cinv)
            - Cinv.dot(dCinvSinv_inv).dot(Cinv)
            - Cinv.dot(CinvSinv_inv).dot(dCinv)
        )
        S2 = -Sinv.dot(dCinvSinv_inv).dot(Sinv)
        S3 = Sinv.dot(dCinvSinv_inv).dot(Cinv) + Sinv.dot(CinvSinv_inv).dot(dCinv)
        S4 = dCinv.dot(CinvSinv_inv).dot(Cinv) + Cinv.dot(dCinvSinv_inv).dot(Cinv) + Cinv.dot(CinvSinv_inv).dot(dCinv)

        T1s = np.tile(Z.dot(S1).dot(Z.T).diagonal(), [P, 1])
        T1 = np.tile(T1s, [N, 1, 1])
        T2s = T1s.T
        T2 = np.tile(T2s, [N, 1, 1])
        T3 = np.tile(Z.dot(S4).dot(Z.T), [N, 1, 1])
        T4 = np.tile(A.dot(S2).dot(A.T).diagonal(), [P, 1]).T
        T4 = np.expand_dims(T4, axis=2)
        T4 = np.repeat(T4, P, axis=2)
        T5 = A.dot(S3).dot(Z.T)
        T5 = np.expand_dims(T5, axis=2)
        T5 = np.repeat(T5, P, axis=2)
        T6 = np.swapaxes(T5, 1, 2)

        SCinvI = 2 * Cinv.dot(S) + I
        SCinvI_inv = np.diag(1 / SCinvI.diagonal())
        (temp, logDetSCinvI) = np.linalg.slogdet(SCinvI)
        detSCinvI = np.exp(logDetSCinvI)
        dDetSCinvI = -0.5 * np.power(detSCinvI, -0.5) * SCinvI_inv.dot(2 * dCinv).dot(S).trace()

        expTerm = EVzxVzxT_list_this / np.power(detSCinvI, -0.5)

        res = EVzxVzxT_list_this * (-0.5 * T1 - 0.5 * T2 + T3 - 0.5 * T4 + T5 + T6) + dDetSCinvI * expTerm

        res = np.sum(res, axis=0)

        return res
예제 #4
0
    def _h_arrows(self, length):
        """ length is in arrow width units """
        # It might be possible to streamline the code
        # and speed it up a bit by using complex (x,y)
        # instead of separate arrays; but any gain would be slight.
        minsh = self.minshaft * self.headlength
        N = len(length)
        length = length.reshape(N, 1)
        # This number is chosen based on when pixel values overflow in Agg
        # causing rendering errors
        # length = np.minimum(length, 2 ** 16)
        np.clip(length, 0, 2 ** 16, out=length)
        # x, y: normal horizontal arrow
        x = np.array([0, -self.headaxislength,
                      -self.headlength, 0],
                     np.float64)
        x = x + np.array([0, 1, 1, 1]) * length
        y = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
        y = np.repeat(y[np.newaxis, :], N, axis=0)
        # x0, y0: arrow without shaft, for short vectors
        x0 = np.array([0, minsh - self.headaxislength,
                       minsh - self.headlength, minsh], np.float64)
        y0 = 0.5 * np.array([1, 1, self.headwidth, 0], np.float64)
        ii = [0, 1, 2, 3, 2, 1, 0, 0]
        X = x.take(ii, 1)
        Y = y.take(ii, 1)
        Y[:, 3:-1] *= -1
        X0 = x0.take(ii)
        Y0 = y0.take(ii)
        Y0[3:-1] *= -1
        shrink = length / minsh if minsh != 0. else 0.
        X0 = shrink * X0[np.newaxis, :]
        Y0 = shrink * Y0[np.newaxis, :]
        short = np.repeat(length < minsh, 8, axis=1)
        # Now select X0, Y0 if short, otherwise X, Y
        np.copyto(X, X0, where=short)
        np.copyto(Y, Y0, where=short)
        if self.pivot == 'middle':
            X -= 0.5 * X[:, 3, np.newaxis]
        elif self.pivot == 'tip':
            X = X - X[:, 3, np.newaxis]   # numpy bug? using -= does not
                                          # work here unless we multiply
                                          # by a float first, as with 'mid'.
        elif self.pivot != 'tail':
            raise ValueError(("Quiver.pivot must have value in {{'middle', "
                              "'tip', 'tail'}} not {0}").format(self.pivot))

        tooshort = length < self.minlength
        if tooshort.any():
            # Use a heptagonal dot:
            th = np.arange(0, 8, 1, np.float64) * (np.pi / 3.0)
            x1 = np.cos(th) * self.minlength * 0.5
            y1 = np.sin(th) * self.minlength * 0.5
            X1 = np.repeat(x1[np.newaxis, :], N, axis=0)
            Y1 = np.repeat(y1[np.newaxis, :], N, axis=0)
            tooshort = np.repeat(tooshort, 8, 1)
            np.copyto(X, X1, where=tooshort)
            np.copyto(Y, Y1, where=tooshort)
        # Mask handling is deferred to the caller, _make_verts.
        return X, Y
예제 #5
0
    def grad_EVzxVzxT_by_c(self, EVzxVzxT_list_this, Z, A, B, C, Kpred, p, r):

        P = Z.shape[0]
        R = Z.shape[1]
        N = A.shape[0]

        ainv = 1 / (self.length_scale * self.length_scale)
        siginv = 1 / (B[0, 0] * B[0, 0])

        dA = np.zeros([N, R])
        dA[:, r] = Kpred[r][:, p]

        AAt = 2 * A[:, r] * dA[:, r]

        res1 = -0.5 * np.tile(AAt, [P, 1]).T * (siginv - siginv * (1 / (siginv + 2 * ainv)) * siginv)
        res1 = np.expand_dims(res1, axis=2)
        res1 = np.repeat(res1, P, axis=2)

        res2 = dA.dot(Z.T) * (ainv * (1 / (siginv + 2 * ainv)) * siginv)
        res2 = np.expand_dims(res2, axis=2)
        res2 = np.repeat(res2, P, axis=2)

        res3 = np.swapaxes(res2, 1, 2)

        res = EVzxVzxT_list_this * (res1 + res2 + res3)

        res = np.sum(res, axis=0)

        return res
예제 #6
0
    def test_bprop(self):
        r = []

        context = Context()
        for i in xrange(self.N):
            a = self.get_random_array()
            a_gpu = Connector(GpuMatrix.from_npa(a, 'float'), bu_device_id=context)
            vpooling_block = MeanPoolingBlock(a_gpu, axis=0)
            voutput, dL_dvoutput = vpooling_block.output.register_usage(context, context)
            _dL_voutput = self.get_random_array((dL_dvoutput.nrows, dL_dvoutput.ncols))
            GpuMatrix.from_npa(_dL_voutput, 'float').copy_to(context, dL_dvoutput)

            hpooling_block = MeanPoolingBlock(a_gpu, axis=1)
            houtput, dL_dhoutput = hpooling_block.output.register_usage(context, context)
            _dL_houtput = self.get_random_array((dL_dhoutput.nrows, dL_dhoutput.ncols))
            GpuMatrix.from_npa(_dL_houtput, 'float').copy_to(context, dL_dhoutput)

            vpooling_block.fprop()
            vpooling_block.bprop()
            dL_dmatrix = vpooling_block.dL_dmatrix.to_host()
            r.append(np.allclose(dL_dmatrix,
                                 np.repeat(_dL_voutput/a.shape[0], a.shape[0], 0),
                                 atol=1e-6))

            hpooling_block.fprop()
            hpooling_block.bprop()
            hpooling_block.dL_dmatrix.to_host()
            dL_dmatrix = hpooling_block.dL_dmatrix.to_host()
            r.append(np.allclose(dL_dmatrix,
                                 np.repeat(_dL_houtput/a.shape[1], a.shape[1], 1),
                                 atol=1e-6))

        self.assertEqual(sum(r), 2 * self.N)
예제 #7
0
def top_eigenvector(A,niter=1000,force_iteration=False):
    '''
    assuming the LEFT invariant subspace of A corresponding to the LEFT
    eigenvalue of largest modulus has geometric multiplicity of 1 (trivial
    Jordan block), returns the vector at the intersection of that eigenspace and
    the simplex

    A should probably be a ROW-stochastic matrix

    probably uses power iteration
    '''
    n = A.shape[0]
    np.seterr(invalid='raise',divide='raise')
    if n <= 25 and not force_iteration:
        x = np.repeat(1./n,n)
        x = np.linalg.matrix_power(A.T,niter).dot(x)
        x /= x.sum()
        return x
    else:
        x1 = np.repeat(1./n,n)
        x2 = x1.copy()
        for itr in xrange(niter):
            np.dot(A.T,x1,out=x2)
            x2 /= x2.sum()
            x1,x2 = x2,x1
            if np.linalg.norm(x1-x2) < 1e-8:
                break
        return x1
예제 #8
0
파일: preprocess.py 프로젝트: eneyi/cs287
def valid_test_Ngram(filepath, words2index, N, test=False):
    results = []
    if test == False:
        with open(filepath) as f:
            i = 1
            for line in f:
                lsplit = line.split()
                if lsplit[0] == 'Q':
                    topredict = np.array([words2index[x] for x in lsplit[1:]])
                if lsplit[0] == 'C':
                    l = np.append(
                        np.repeat(words2index['<s>'], N-1), [words2index[x] for x in lsplit[1:-1]])
                    lastNgram = l[-N+1:]
                    results.append((lastNgram, topredict))
    else:
        with open(filepath) as f:
            i = 1
            for line in f:
                lsplit = line.split()
                if lsplit[0] == 'Q':
                    topredict = np.array([words2index[x] for x in lsplit[1:]])
                if lsplit[0] == 'C':
                    l = np.append(
                        np.repeat(words2index['<s>'], N-1), [words2index[x] for x in lsplit[1:-1]])
                    lastNgram = l[-N+1:]
                    results.append((lastNgram, topredict))
    return results
예제 #9
0
파일: anchors.py 프로젝트: JieZou1/PanelSeg
def generate_anchors(base_size=16, ratios=None, scales=None):
    """
    Generate anchor (reference) windows by enumerating aspect ratios X
    scales w.r.t. a reference window.
    """

    if ratios is None:
        ratios = np.array([0.5, 1, 2])

    if scales is None:
        scales = np.array([2 ** 0, 2 ** (1.0 / 3.0), 2 ** (2.0 / 3.0)])

    num_anchors = len(ratios) * len(scales)

    # initialize output anchors
    anchors = np.zeros((num_anchors, 4))

    # scale base_size
    anchors[:, 2:] = base_size * np.tile(scales, (2, len(ratios))).T

    # compute areas of anchors
    areas = anchors[:, 2] * anchors[:, 3]

    # correct for ratios
    anchors[:, 2] = np.sqrt(areas / np.repeat(ratios, len(scales)))
    anchors[:, 3] = anchors[:, 2] * np.repeat(ratios, len(scales))

    # transform from (x_ctr, y_ctr, w, h) -> (x1, y1, x2, y2)
    anchors[:, 0::2] -= np.tile(anchors[:, 2] * 0.5, (2, 1)).T
    anchors[:, 1::2] -= np.tile(anchors[:, 3] * 0.5, (2, 1)).T

    return anchors
예제 #10
0
파일: test_util.py 프로젝트: Cadair/astropy
def test_rstrip_inplace():

    # Incorrect type
    s = np.array([1, 2, 3])
    with pytest.raises(TypeError) as exc:
        _rstrip_inplace(s)
    assert exc.value.args[0] == 'This function can only be used on string arrays'

    # Bytes array
    s = np.array(['a ', ' b', ' c c   '], dtype='S6')
    _rstrip_inplace(s)
    assert_equal(s, np.array(['a', ' b', ' c c'], dtype='S6'))

    # Unicode array
    s = np.array(['a ', ' b', ' c c   '], dtype='U6')
    _rstrip_inplace(s)
    assert_equal(s, np.array(['a', ' b', ' c c'], dtype='U6'))

    # 2-dimensional array
    s = np.array([['a ', ' b'], [' c c   ', ' a ']], dtype='S6')
    _rstrip_inplace(s)
    assert_equal(s, np.array([['a', ' b'], [' c c', ' a']], dtype='S6'))

    # 3-dimensional array
    s = np.repeat(' a a ', 24).reshape((2, 3, 4))
    _rstrip_inplace(s)
    assert_equal(s, ' a a')

    # 3-dimensional non-contiguous array
    s = np.repeat(' a a ', 1000).reshape((10, 10, 10))[:2, :3, :4]
    _rstrip_inplace(s)
    assert_equal(s, ' a a')
예제 #11
0
def fill_between_steps(x, y1, y2=0, h_align='mid'):
    ''' Fills a hole in matplotlib: fill_between for step plots.
    Parameters :
    ------------
    x : array-like
        Array/vector of index values. These are assumed to be equally-spaced.
        If not, the result will probably look weird...
    y1 : array-like
        Array/vector of values to be filled under.
    y2 : array-Like
        Array/vector or bottom values for filled area. Default is 0.
    '''
    # First, duplicate the x values
    xx = np.repeat(x,2)
    # Now: the average x binwidth
    xstep = np.repeat((x[1:] - x[:-1]), 2)
    xstep = np.concatenate(([xstep[0]], xstep, [xstep[-1]]))
    # Now: add one step at end of row.
    #~ xx = np.append(xx, xx.max() + xstep[-1])

    # Make it possible to change step alignment.
    if h_align == 'mid':
        xx -= xstep / 2.
    elif h_align == 'right':
        xx -= xstep

    # Also, duplicate each y coordinate in both arrays
    y1 = np.repeat(y1,2)#[:-1]
    if type(y2) == np.ndarray:
        y2 = np.repeat(y2,2)#[:-1]

    return xx, y1, y2
예제 #12
0
def trim_image(imData, bgValue=255):

    numRows, numCols = imData.shape

    tempCols = [np.all(imData[:, col] == np.repeat(bgValue, numRows)) for col in range(numCols)]
    tempRows = [np.all(imData[row, :] == np.repeat(bgValue, numCols)) for row in range(numRows)]

    if False not in tempRows or False not in tempCols:
        print 'The entire image is blank with background %i. Not trimming...' % bgValue
        return imData

    firstCol = tempCols.index(False)
    firstRow = tempRows.index(False)
    lastCol = -(1 + tempCols[::-1].index(False))
    lastRow = -(1 + tempRows[::-1].index(False))

    if lastRow == -1:
        if lastCol == -1:
            return imData[firstRow:, firstCol:]
        else:
            return imData[firstRow:, firstCol:(lastCol+1)]
    else:
        if lastCol == -1:
            return imData[firstRow:(lastRow+1), firstCol:]
        else:
            return imData[firstRow:(lastRow+1), firstCol:(lastCol+1)]
예제 #13
0
    def updateImage(self, contingencies, rect, sup_valmax):
        """
        Makes an image of size rect from contingencies. The image is used to update a rect inside the heatmap.
        """
        interval_width = int(rect.width() / contingencies.shape[2])
        interval_height = int(rect.height() / contingencies.shape[1])

        contingencies -= np.min(contingencies)
        contingencies /= np.max(contingencies)
        contingencies = np.nan_to_num(contingencies)
        contingencies_argmax = contingencies.argmax(axis=0)
        rows, cols = np.indices(contingencies_argmax.shape)
        contingencies_valmax = contingencies[contingencies_argmax, rows, cols]

        colors_argmax = np.repeat(np.repeat(contingencies_argmax, interval_width, axis=0),
                                  interval_height, axis=1)
        colors_valmax = np.repeat(np.repeat(contingencies_valmax, interval_width, axis=0),
                                  interval_height, axis=1)

        colors = self.color_array[colors_argmax] + ((255-self.color_array[colors_argmax]) * (1-colors_valmax[:, :, None]))
        if sup_valmax:
            colors += ((255-colors) * (1-sup_valmax))

        if rect.width() == self.image_width and rect.height() == self.image_height:
            self.hmi = Heatmap(colors)
            self.plot.addItem(self.hmi)
            self.hmi.setRect(QtCore.QRectF(self.X_min, self.Y_min, self.X_max-self.X_min, self.Y_max-self.Y_min))
        else:
            self.hmi.updateImage_(colors, rect)

        return contingencies_valmax
예제 #14
0
        def _mean4(data, offset=(0, 0), block_id=None):
            rows, cols = data.shape
            rows2, cols2 = data.shape
            pad = []
            # we assume that the chunks except the first ones are aligned
            if block_id[0] == 0:
                row_offset = offset[0] % 2
            else:
                row_offset = 0
            if block_id[1] == 0:
                col_offset = offset[1] % 2
            else:
                col_offset = 0
            row_after = (row_offset + rows) % 2
            col_after = (col_offset + cols) % 2
            pad = ((row_offset, row_after), (col_offset, col_after))

            rows2 = rows + row_offset + row_after
            cols2 = cols + col_offset + col_after

            av_data = np.pad(data, pad, 'edge')
            new_shape = (int(rows2 / 2.), 2, int(cols2 / 2.), 2)
            data_mean = np.ma.mean(av_data.reshape(new_shape), axis=(1, 3))
            data_mean = np.repeat(np.repeat(data_mean, 2, axis=0), 2, axis=1)
            data_mean = data_mean[row_offset:row_offset + rows,
                                  col_offset:col_offset + cols]
            return data_mean
예제 #15
0
def metric_vs_num_baggers(classifier, attack, percent_poisoning,
                         no_attack_base_error, 
                         no_attack_bag_errors, 
                         attack_base_error,
                         attack_bag_errors,
                         N,
                         metric,
                         ):    
    no_attack_base_errors = np.repeat(no_attack_base_error, N)
    attack_base_errors = np.repeat(attack_base_error, N)
    
    X = np.linspace(1, N, num=N, endpoint=True)
    
    title = get_attack_name(attack, percent_poisoning)
    
    plt.title(title, fontsize=18)
    
    plt.xlabel('Number of Baggers')
    plt.ylabel(metric)
    
    no_attack_base = plt.plot(X, no_attack_base_errors, 'b--', 
                              label=get_classifier_name(classifier))
    no_attack_bag = plt.plot(X, no_attack_bag_errors, 'b',
                             label='Bagged')
    attack_base = plt.plot(X, attack_base_errors, 'r--',
                           label=get_classifier_name(classifier, percent_poisoning))
    attack_bag = plt.plot(X, attack_bag_errors, 'r',
                          label='Bagged (poisoned)')
    
    #legend = plt.legend(loc='upper right', shadow=True, prop={'size':12})
    
    return plt
def MicrophoneResponseSpectrum(impulse_response, sampling_rate_hz):
  (signal_length, num_channels) = np.shape(impulse_response)
  if signal_length <= 0:
    raise ValueError('single length must be positive.')

  # Constructs the delay signal
  impulse_data = np.zeros((signal_length, 1))
  (sample_index, channel_index) = np.unravel_index(
      impulse_response.argmax(), impulse_response.shape)
  impulse_data[sample_index, 0] = 1.0
  print sample_index

  impulse_data = np.repeat(impulse_data, num_channels, axis=1)
  window_signal = np.repeat(
      np.expand_dims(
          scipy.signal.hamming(signal_length), axis=1), num_channels, axis=1)

  fft_size = int(GetMinFftSize(signal_length))
  delay_spectrum = np.fft.fft(
      impulse_data * window_signal, fft_size, axis=0)
  spectrum = np.fft.fft(
      impulse_response * window_signal, fft_size, axis=0)

  # Compensates the delay component.
  spectrum = spectrum / delay_spectrum
  lower_half_spectrum = spectrum[0: int(np.floor(fft_size / 2) + 1), :]
  freq_data = np.linspace(0.0, sampling_rate_hz / 2.0, len(lower_half_spectrum))

  return (freq_data, lower_half_spectrum)
예제 #17
0
def setup_xz(nx=128,nz=128,edge=None):
    if edge==None:
        dx = (2*np.pi)/(nx)
        x = np.arange(-1*np.pi,np.pi,dx)
        #print x
        x = np.repeat(x,nz)
        x = x.reshape(nx,nz)
    else:
        #x_b = edge #in rads, nz long
        x =[]   
        #edge  = -edge
        #edge[1:10] = np.pi/2
        for i,xmax in enumerate(edge):
            #xmax = -2.5
            #xmax = np.min(edge)
            x.append(np.linspace(xmax-np.pi/10.0,xmax+np.pi/20.0,nx))
            #x.append(np.linspace(-np.pi,xmax-.4,nx))
        x = np.array(x)
        print x.shape
        x = np.transpose(x)
        
    dz = (2*np.pi)/(nz)
    z = np.arange(0,2*np.pi,dz)
    z = np.repeat(z,nx)
    z = np.transpose(z.reshape(nz,nx))


    return x,z
예제 #18
0
    def test_repeatOp(self):
        for ndim in range(3):
            x = T.TensorType(config.floatX, [False] * ndim)()
            a = np.random.random((10, ) * ndim).astype(config.floatX)

            for axis in self._possible_axis(ndim):
                for dtype in tensor.discrete_dtypes:
                    r_var = T.scalar(dtype=dtype)
                    r = numpy.asarray(3, dtype=dtype)
                    if dtype in self.numpy_unsupported_dtypes:
                        self.assertRaises(TypeError,
                                repeat, x, r_var, axis=axis)
                    else:
                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))

                        r_var = T.vector(dtype=dtype)
                        if axis is None:
                            r = np.random.random_integers(
                                    5, size=a.size).astype(dtype)
                        else:
                            r = np.random.random_integers(
                                    5, size=(10,)).astype(dtype)

                        f = theano.function([x, r_var],
                                            repeat(x, r_var, axis=axis))
                        assert np.allclose(np.repeat(a, r, axis=axis),
                                           f(a, r))
예제 #19
0
def prepare_check_background(grating_vals):
    # create random checks for lower screen half
    lum_range = np.abs(np.diff(grating_vals))
    checks = np.tile(np.linspace(.5 - lum_range, .5 + lum_range, 13), (13, 1))
    map(np.random.shuffle, checks)
    check_bg = np.repeat(np.repeat(checks, 18, 0), 18, 1)[3:-3, 3:-3]
    return check_bg, checks.flatten()
예제 #20
0
    def concatenate_runs(self):
        
        #go trough all runs and concatenates the data into single arrays
        for run in self.runs:
            
            #those are the peaks of the runs
            self.S_photocurrent_max         = np.append(self.S_photocurrent_max,run.photocurrent_max)
            self.S_pulsedpower_max         = np.append(self.S_pulsedpower_max,run.pulsedpower_max)
            
            #those are the integrals of the runs
            self.S_photocurrent_integral    = np.append(self.S_photocurrent_integral,run.photocurrent_integral)
            self.S_pulsedpower_integral        = np.append(self.S_pulsedpower_integral,run.pulsedpower_integral)

            #this is the  average per sun
            self.S_photocurrent_max_avg     = np.append(self.S_photocurrent_max_avg,np.repeat(np.average(run.photocurrent_max),len(run.photocurrent_max)))
            self.S_pulsedpower_max_avg         = np.append(self.S_pulsedpower_max_avg,np.repeat(np.average(run.pulsedpower_max),len(run.pulsedpower_max)))
            
            #this is the CW power
            self.S_cw_power = np.append(self.S_cw_power,np.repeat(run.cw_power,len(run.pulsedpower_max)))
            #self.photocurrent_max = [self.photocurrent_max,run.photocurrent_max]
            #self.pulsedpower_max  = [self.pulsedpower_max,run.pulsedpower_max]
            
            #don't forget to delete the [0] in the first entry.
            self.S_photocurrent_max         = np.delete(self.S_photocurrent_max,0,0)
            self.S_pulsedpower_max         = np.delete(self.S_pulsedpower_max,0,0)
            self.S_photocurrent_integral    = np.delete(self.S_photocurrent_integral,0,0)
            self.S_pulsedpower_integral        = np.delete(self.S_pulsedpower_integral,0,0)
            self.S_photocurrent_max_avg     = np.delete(self.S_photocurrent_max_avg,0,0)
            self.S_pulsedpower_max_avg        = np.delete(self.S_pulsedpower_max_avg,0,0)
            self.S_cw_power                 = np.delete(self.S_cw_power,0,0)
예제 #21
0
    def set_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
        """
        Set the values of DOFs in a given region using a function of space
        coordinates or value `fun`.
        """
        if region is None:
            region = self.region

        if dpn is None:
            dpn = self.shape[0]

        aux = self.get_dofs_in_region(region, clean=True, warn=warn)
        nods = nm.unique(nm.hstack(aux))

        if callable(fun):
            vals = fun(self.get_coor(nods))

        elif nm.isscalar(fun):
            vals = nm.repeat([fun], nods.shape[0] * dpn)

        elif isinstance(fun, nm.ndarray):
            assert_(len(fun) == dpn)
            vals = nm.repeat(fun, nods.shape[0])

        else:
            raise ValueError('unknown function/value type! (%s)' % type(fun))

        return nods, vals
예제 #22
0
파일: test_idl.py 프로젝트: hitej/meta-core
 def test_arrays(self):
     s = readsav(path.join(DATA_PATH, 'struct_pointer_arrays.sav'), verbose=False)
     assert_array_identical(s.arrays.g[0], np.repeat(np.float32(4.), 2).astype(np.object_))
     assert_array_identical(s.arrays.h[0], np.repeat(np.float32(4.), 3).astype(np.object_))
     assert_(np.all(vect_id(s.arrays.g[0]) == id(s.arrays.g[0][0])))
     assert_(np.all(vect_id(s.arrays.h[0]) == id(s.arrays.h[0][0])))
     assert_(id(s.arrays.g[0][0]) == id(s.arrays.h[0][0]))
예제 #23
0
 def _get_sorted_theta(self):
     '''sorts the integral points by bond in descending order'''
     depsf_arr = np.array([])
     V_f_arr = np.array([])
     E_f_arr = np.array([])
     xi_arr = np.array([])
     stat_weights_arr = np.array([])
     nu_r_arr = np.array([])
     r_arr = np.array([])
     for reinf in self.cont_reinf_lst:
         n_int = len(np.hstack((np.array([]), reinf.depsf_arr)))
         depsf_arr = np.hstack((depsf_arr, reinf.depsf_arr))
         V_f_arr = np.hstack((V_f_arr, np.repeat(reinf.V_f, n_int)))
         E_f_arr = np.hstack((E_f_arr, np.repeat(reinf.E_f, n_int)))
         xi_arr = np.hstack((xi_arr, np.repeat(reinf.xi, n_int)))
         stat_weights_arr = np.hstack((stat_weights_arr, reinf.stat_weights))
         nu_r_arr = np.hstack((nu_r_arr, reinf.nu_r))
         r_arr = np.hstack((r_arr, reinf.r_arr))
     argsort = np.argsort(depsf_arr)[::-1]
     # sorting the masks for the evaluation of F
     idxs = np.array([])
     for i, reinf in enumerate(self.cont_reinf_lst):
         idxs = np.hstack((idxs, i * np.ones_like(reinf.depsf_arr)))
     masks = []
     for i, reinf in enumerate(self.cont_reinf_lst):
         masks.append((idxs == i)[argsort])
     max_depsf = [np.max(reinf.depsf_arr) for reinf in self.cont_reinf_lst]
     masks = [masks[i] for i in np.argsort(max_depsf)[::-1]]
     return depsf_arr[argsort], V_f_arr[argsort], E_f_arr[argsort], \
             xi_arr[argsort], stat_weights_arr[argsort], \
             nu_r_arr[argsort], masks, r_arr[argsort]
예제 #24
0
def test_linearsvc_fit_sampleweight():
    # check correct result when sample_weight is 1
    n_samples = len(X)
    unit_weight = np.ones(n_samples)
    clf = svm.LinearSVC(random_state=0).fit(X, Y)
    clf_unitweight = svm.LinearSVC(random_state=0).\
        fit(X, Y, sample_weight=unit_weight)

    # check if same as sample_weight=None
    assert_array_equal(clf_unitweight.predict(T), clf.predict(T))
    assert_allclose(clf.coef_, clf_unitweight.coef_, 1, 0.0001)

    # check that fit(X)  = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
    # X = X1 repeated n1 times, X2 repeated n2 times and so forth

    random_state = check_random_state(0)
    random_weight = random_state.randint(0, 10, n_samples)
    lsvc_unflat = svm.LinearSVC(random_state=0).\
        fit(X, Y, sample_weight=random_weight)
    pred1 = lsvc_unflat.predict(T)

    X_flat = np.repeat(X, random_weight, axis=0)
    y_flat = np.repeat(Y, random_weight, axis=0)
    lsvc_flat = svm.LinearSVC(random_state=0).fit(X_flat, y_flat)
    pred2 = lsvc_flat.predict(T)

    assert_array_equal(pred1, pred2)
    assert_allclose(lsvc_unflat.coef_, lsvc_flat.coef_, 1, 0.0001)
예제 #25
0
def make_dataset1():
    '''Make a dataset of single samples with labels from which distribution they come from'''
    # now lets make some samples 
    lns      = min_max_scale(lognormal(size=bsize)) #log normal
    powers   = min_max_scale(power(0.1,size=bsize)) #power law
    norms    = min_max_scale(normal(size=bsize))    #normal
    uniforms = min_max_scale(uniform(size=bsize))    #uniform
    # add our data together
    data = np.concatenate((lns,powers,norms,uniforms))
    
    # concatenate our labels
    labels = np.concatenate((
        (np.repeat(LOGNORMAL,bsize)),
        (np.repeat(POWER,bsize)),
        (np.repeat(NORM,bsize)),
        (np.repeat(UNIFORM,bsize))))
    tsize = len(labels)
    
    # make sure dimensionality and types are right
    data = data.reshape((len(data),1))
    data = data.astype(np.float32)
    labels = labels.astype(np.int32)
    labels = labels.reshape((len(data),))
    
    return data, labels, tsize
예제 #26
0
	def generate_Smolyak_points(self):
		# Merge all different approximation points
		all_points = np.array(0)
		all_complex = np.array(0)
		for i in xrange(1, self.mu + 1):
			all_points = np.append(all_points, self.extreme[i, :self.len[i]])
			all_complex = np.append(all_complex, np.repeat(i, self.len[i]))
		one_dim_len = all_complex.size
		# print all_points, all_complex
		res = np.array([all_points])
		cur_len = one_dim_len
		sum_weight = all_complex
		for i in xrange(1, self.d):
			res = np.repeat(res, one_dim_len, axis = 1)
			to_add = np.repeat(all_points[np.newaxis, :], cur_len, 0).reshape(-1)
			# print res.shape, to_add.shape
			res = np.vstack((res, to_add))
			sum_weight = np.repeat(sum_weight, one_dim_len)
			# print cur_len, all_complex
			sum_weight = sum_weight + np.repeat(all_complex[np.newaxis, :], cur_len, 0).reshape(-1)
			tokeep = (sum_weight <= self.mu)
			idx = np.arange(cur_len * one_dim_len)[tokeep]
			res = res[:, sum_weight <= self.mu]
			sum_weight = sum_weight[tokeep]
			cur_len = sum_weight.size
			# print (cur_len == res.shape[1])
			# print res.T, sum_weight
		self.grid = res.T
예제 #27
0
def test_linearsvr_fit_sampleweight():
    # check correct result when sample_weight is 1
    # check that SVR(kernel='linear') and LinearSVC() give
    # comparable results
    diabetes = datasets.load_diabetes()
    n_samples = len(diabetes.target)
    unit_weight = np.ones(n_samples)
    lsvr = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
                                    sample_weight=unit_weight)
    score1 = lsvr.score(diabetes.data, diabetes.target)

    lsvr_no_weight = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target)
    score2 = lsvr_no_weight.score(diabetes.data, diabetes.target)

    assert_allclose(np.linalg.norm(lsvr.coef_),
                    np.linalg.norm(lsvr_no_weight.coef_), 1, 0.0001)
    assert_almost_equal(score1, score2, 2)

    # check that fit(X)  = fit([X1, X2, X3],sample_weight = [n1, n2, n3]) where
    # X = X1 repeated n1 times, X2 repeated n2 times and so forth
    random_state = check_random_state(0)
    random_weight = random_state.randint(0, 10, n_samples)
    lsvr_unflat = svm.LinearSVR(C=1e3).fit(diabetes.data, diabetes.target,
                                           sample_weight=random_weight)
    score3 = lsvr_unflat.score(diabetes.data, diabetes.target,
                               sample_weight=random_weight)

    X_flat = np.repeat(diabetes.data, random_weight, axis=0)
    y_flat = np.repeat(diabetes.target, random_weight, axis=0)
    lsvr_flat = svm.LinearSVR(C=1e3).fit(X_flat, y_flat)
    score4 = lsvr_flat.score(X_flat, y_flat)

    assert_almost_equal(score3, score4, 2)
예제 #28
0
파일: main.py 프로젝트: rcaze/15_02BeCaSc
def spe_sen(target, actual):
    """Compute the (specificity,sensitivity) couple and the Matthews correlation
    coefficient for a desired Boolean function called ftar for a neuron
    implementing the Boolean function f.

    Parameters
    ----------
    target : array Bool
        actions taken
    actual : array Bool
        actions expected

    Returns
    -------
    spe : float between 0 and 1
        specificity of the response
    sen : float between 0 and 1
        sensitivity of the response
    """
    # Use the binary of the vector to see the difference between actual and
    # target
    tp = np.array(target)*2 - actual
    TN = len(np.repeat(tp, tp == 0))
    FN = len(np.repeat(tp, tp == 2))
    TP = len(np.repeat(tp, tp == 1))
    FP = len(np.repeat(tp, tp == -1))

    spe = float(TN)/(TN+FP)
    sen = float(TP)/(TP+FN)

    return spe, sen
def exp_dead_new(file_num, name_file, imsz, wcs, flat_list, foc_list, asp_solution, dead, cut, flat_idx, step, out_path, return_dict):
    print imsz
    count = np.zeros(imsz)

    x_lim = imsz[0]
    y_lim = imsz[1]

    length = flat_list[0].shape[0]
    half_len = length/2.
    print half_len
    l = imsz[0]/10
    start = foc_list[0,1]-half_len
    print foc_list.shape
    print start.shape

    ox = np.repeat(np.arange(l)+start,length+1000)
    oy = np.tile(np.arange(length+1000)+foc_list[0,0]-half_len-500,l)
    omask = (ox>=0) & (ox<imsz[0]) & (oy>=0) & (oy<imsz[1])
    ox = ox[omask]
    oy = oy[omask]
    gl,gb = wcs.all_pix2world(oy,ox,0)
    c = SkyCoord(gl*u.degree, gb*u.degree, frame='galactic')
    rd = c.transform_to(FK5)
    for i in range(asp_solution.shape[0]):
        hrflat = flat_list[flat_idx[i]]
        foc = foc_list[i,:]#wcs.sip_pix2foc(wcs.wcs_world2pix(coo,1),1)
        if (foc[1]+half_len)>=(start+l):
            print 'update'
            start = foc[1]-half_len
            ox = np.repeat(np.arange(l)+start,length+1000)
            oy = np.tile(np.arange(length+1000)+foc[0]-half_len-500,l)
            omask = (ox>=0) & (ox<imsz[0]) & (oy>=0) & (oy<imsz[1])
            if np.sum(omask)==0:
                break
            ox = ox[omask]
            oy = oy[omask]
            gl,gb = wcs.all_pix2world(oy,ox,0)
            c = SkyCoord(gl*u.degree, gb*u.degree, frame='galactic')
            rd = c.transform_to(FK5)
        fmask = (ox>=(foc[1]-length/2)) & (ox<(foc[1]+length/2)) & (oy>=(foc[0]-length/2)) & (oy<(foc[0]+length/2))
        if np.sum(fmask)==0:
            continue
        x = ox[fmask]
        y = oy[fmask]
        xi, eta = gn.gnomfwd_simple(rd.ra.deg[fmask], rd.dec.deg[fmask], 
                                        asp_solution[i,1], asp_solution[i,2], -asp_solution[i,3],1/36000.,0.)
        px = ((xi/36000.)/(1.25/2.)*(1.25/(800* 0.001666))+1.)/2.*length
        py = ((eta/36000.)/(1.25/2.)*(1.25/(800* 0.001666))+1.)/2.*length
        pmask = (px>=0) & (px<length) & (py>=0) & (py<length)
        if np.sum(pmask)==0:
            continue
        count[x[pmask].astype(int),y[pmask].astype(int)] += \
            hrflat[px[pmask].astype(int),py[pmask].astype(int)]*step*(1-dead[i])*cut[i]
        if i%100==0:
            with open('/scratch/dw1519/galex/fits/scan_map/%s_gal_sec_exp_tmp%d.dat'%(name_file, file_num),'w') as f:
                f.write('%d'%i)
            print i
    print '%d done'%file_num
    #return_dict[file_num] = count
    np.save('%s/%s_gal_sec_exp_tmp%d.npy'%(out_path, name_file, file_num), count)
예제 #30
0
파일: test_idl.py 프로젝트: hitej/meta-core
    def test_arrays_replicated_3d(self):
        with warnings.catch_warnings():
            warnings.filterwarnings('ignore',
                    message="warning: multi-dimensional structures")
            s = readsav(path.join(DATA_PATH,
                                  'struct_pointer_arrays_replicated_3d.sav'),
                        verbose=False)

        # Check column types
        assert_(s.arrays_rep.g.dtype.type is np.object_)
        assert_(s.arrays_rep.h.dtype.type is np.object_)

        # Check column shapes
        assert_equal(s.arrays_rep.g.shape, (4, 3, 2))
        assert_equal(s.arrays_rep.h.shape, (4, 3, 2))

        # Check values
        for i in range(4):
            for j in range(3):
                for k in range(2):
                    assert_array_identical(s.arrays_rep.g[i, j, k],
                            np.repeat(np.float32(4.), 2).astype(np.object_))
                    assert_array_identical(s.arrays_rep.h[i, j, k],
                            np.repeat(np.float32(4.), 3).astype(np.object_))
                    assert_(np.all(vect_id(s.arrays_rep.g[i, j, k]) == id(s.arrays_rep.g[0, 0, 0][0])))
                    assert_(np.all(vect_id(s.arrays_rep.h[i, j, k]) == id(s.arrays_rep.h[0, 0, 0][0])))
    return hist

img = cv2.imread('digits.png',0)

cells = [np.hsplit(row,100) for row in np.vsplit(img,50)]

# First half is trainData, remaining is testData
train_cells = [ i[:50] for i in cells ]
test_cells = [ i[50:] for i in cells]

######     Now training      ########################

deskewed = [map(deskew,row) for row in train_cells]
hogdata = [map(hog,row) for row in deskewed]
trainData = np.float32(hogdata).reshape(-1,64)
responses = np.float32(np.repeat(np.arange(10),250)[:,np.newaxis])

svm = cv2.SVM()
svm.train(trainData,responses, params=svm_params)
svm.save('svm_data.dat')

######     Now testing      ########################

deskewed = [map(deskew,row) for row in test_cells]
hogdata = [map(hog,row) for row in deskewed]
testData = np.float32(hogdata).reshape(-1,bin_n*4)
result = svm.predict_all(testData)

#######   Check Accuracy   ########################
mask = result==responses
correct = np.count_nonzero(mask)
예제 #32
0
# plt.plot(time, intensity, '-')
# plt.show()

# np.savetxt("hene.txt", intensity, header="X in seconds")

xMax = +100
xMin = -100
N = 1024

dx = (xMax - xMin) / N
xs = np.linspace(xMin, xMax, N)

fMax = 1 / dx / 2
fs = np.linspace(0, fMax, N)
print(fs)
print(fMax)

f1 = 1 / 0.4
f2 = 1 / 0.8
fm = np.repeat(1, N) * (f1 + f2 / 2)

amplitude = np.exp(-(fs - fm)**2 / 0.2)
phase = np.random.uniform(size=N) * 6.28
spectrum = amplitude * np.exp(I * phase)
spectrum[0] = 2 * N

intensity = irfft(spectrum)

plt.plot(amplitude, '-')
plt.show()
    def extract_particles(self, segmentation):
        """
        Saves particle centers into output .star file, afetr dismissing regions 
        that are too big to contain a particle.
        
        Args:
            segmentation: Segmentation of the micrograph into noise and particle projections.
        """
        segmentation = segmentation[self.query_size // 2 - 1:-self.query_size // 2,
                                    self.query_size // 2 - 1:-self.query_size // 2]
        labeled_segments, _ = ndimage.label(segmentation, np.ones((3, 3)))
        values, repeats = np.unique(labeled_segments, return_counts=True)

        values_to_remove = np.where(repeats > self.max_size ** 2)
        values = np.take(values, values_to_remove)
        values = np.reshape(values, (1, 1, np.prod(values.shape)), 'F')

        labeled_segments = np.reshape(labeled_segments, (labeled_segments.shape[0],
                                                         labeled_segments.shape[1], 1), 'F')
        matrix1 = np.repeat(labeled_segments, values.shape[2], 2)
        matrix2 = np.repeat(values, matrix1.shape[0], 0)
        matrix2 = np.repeat(matrix2, matrix1.shape[1], 1)

        matrix3 = np.equal(matrix1, matrix2)
        matrix4 = np.sum(matrix3, 2)

        segmentation[np.where(matrix4 == 1)] = 0
        labeled_segments, _ = ndimage.label(segmentation, np.ones((3, 3)))

        max_val = np.amax(np.reshape(labeled_segments, (np.prod(labeled_segments.shape))))
        center = center_of_mass(segmentation, labeled_segments, np.arange(1, max_val))
        center = np.rint(center)

        img = np.zeros((segmentation.shape[0], segmentation.shape[1]))
        img[center[:, 0].astype(int), center[:, 1].astype(int)] = 1
        y, x = np.ogrid[-self.moa:self.moa+1, -self.moa:self.moa+1]
        element = x*x+y*y <= self.moa * self.moa
        img = binary_dilation(img, structure=element)
        labeled_img, _ = ndimage.label(img, np.ones((3, 3)))
        values, repeats = np.unique(labeled_img, return_counts=True)
        y = np.where(repeats == np.count_nonzero(element))
        y = np.array(y)
        y = y.astype(int)
        y = np.reshape(y, (np.prod(y.shape)), 'F')
        y -= 1
        center = center[y, :]

        center = center + (self.query_size // 2 - 1) * np.ones(center.shape)
        center = center + (self.query_size // 2 - 1) * np.ones(center.shape)
        center = center + np.ones(center.shape)
        center = 2 * center
        center = center + 99 * np.ones(center.shape)

        # swap columns to align with Relion
        col_2 = center[:, 1].copy()
        center[:, 1] = center[:, 0]
        center[:, 0] = col_2[:]

        basename = os.path.basename(self.filenames)
        name_str, ext = os.path.splitext(basename)

        applepick_path = os.path.join(self.output_directory, "{}_applepick.star".format(name_str))
        with open(applepick_path, "w") as f:
            np.savetxt(f, ["data_root\n\nloop_\n_rlnCoordinateX #1\n_rlnCoordinateY #2"], fmt='%s')
            np.savetxt(f, center, fmt='%d %d')
            
        return center
예제 #34
0
def _make_concat_multiindex(indexes,
                            keys,
                            levels=None,
                            names=None) -> MultiIndex:

    if (levels is None
            and isinstance(keys[0], tuple)) or (levels is not None
                                                and len(levels) > 1):
        zipped = list(zip(*keys))
        if names is None:
            names = [None] * len(zipped)

        if levels is None:
            _, levels = factorize_from_iterables(zipped)
        else:
            levels = [ensure_index(x) for x in levels]
    else:
        zipped = [keys]
        if names is None:
            names = [None]

        if levels is None:
            levels = [ensure_index(keys)]
        else:
            levels = [ensure_index(x) for x in levels]

    if not all_indexes_same(indexes):
        codes_list = []

        # things are potentially different sizes, so compute the exact codes
        # for each level and pass those to MultiIndex.from_arrays

        for hlevel, level in zip(zipped, levels):
            to_concat = []
            for key, index in zip(hlevel, indexes):
                # Find matching codes, include matching nan values as equal.
                mask = (isna(level) & isna(key)) | (level == key)
                if not mask.any():
                    raise ValueError(f"Key {key} not in level {level}")
                i = np.nonzero(mask)[0][0]

                to_concat.append(np.repeat(i, len(index)))
            codes_list.append(np.concatenate(to_concat))

        concat_index = _concat_indexes(indexes)

        # these go at the end
        if isinstance(concat_index, MultiIndex):
            levels.extend(concat_index.levels)
            codes_list.extend(concat_index.codes)
        else:
            codes, categories = factorize_from_iterable(concat_index)
            levels.append(categories)
            codes_list.append(codes)

        if len(names) == len(levels):
            names = list(names)
        else:
            # make sure that all of the passed indices have the same nlevels
            if not len({idx.nlevels for idx in indexes}) == 1:
                raise AssertionError(
                    "Cannot concat indices that do not have the same number of levels"
                )

            # also copies
            names = list(names) + list(get_unanimous_names(*indexes))

        return MultiIndex(levels=levels,
                          codes=codes_list,
                          names=names,
                          verify_integrity=False)

    new_index = indexes[0]
    n = len(new_index)
    kpieces = len(indexes)

    # also copies
    new_names = list(names)
    new_levels = list(levels)

    # construct codes
    new_codes = []

    # do something a bit more speedy

    for hlevel, level in zip(zipped, levels):
        hlevel = ensure_index(hlevel)
        mapped = level.get_indexer(hlevel)

        mask = mapped == -1
        if mask.any():
            raise ValueError(
                f"Values not found in passed level: {hlevel[mask]!s}")

        new_codes.append(np.repeat(mapped, n))

    if isinstance(new_index, MultiIndex):
        new_levels.extend(new_index.levels)
        new_codes.extend([np.tile(lab, kpieces) for lab in new_index.codes])
    else:
        new_levels.append(new_index)
        new_codes.append(np.tile(np.arange(n), kpieces))

    if len(new_names) < len(new_levels):
        new_names.extend(new_index.names)

    return MultiIndex(levels=new_levels,
                      codes=new_codes,
                      names=new_names,
                      verify_integrity=False)
YEARMONTH
201301       2
201302       3


#################################DATA FRAME OPERATIONS##############################################################
#Create empty dataframe and keep appending / adding dataframes to it
import os
df = pd.DataFrame({})
for fil in os.listdir('folder_name_with_csv_files/):
    df1 = pd.read_csv(fil)
    df = df.append(df1)


#Creating empty dataframe
df = pd.DataFrame({c: np.repeat(0, [nrow]) for c in data['PAGENO'].unique()})

#Assigning values to a cell of a dataframe
#Populating the dataframe
for row in data.iterrows():
	if list(list(row)[1])[1] in data['PAGENO'].unique():
		df.set_value(index=di[list(list(row)[1])[0]], col=list(list(row)[1])[1], value=list(list(row)[1])[2])

#Extracting one by one rows
for i in range(0,nrow):
	row_df=list(df.iloc[i,0:ncol])#iloc for extracting by index, loc for extracting by names

#Extracting rows by names
df.loc[['row_name_1', 'row_name_2'],:]

#Extracting rows by index
예제 #36
0
def test(testloader, model, deeplab, mid, criterion, gpuid, SR_dir):
    avg_psnr = 0
    interp = torch.nn.Upsample(size=(505, 505), mode='bilinear')

    data_list = []
    for iteration, batch in enumerate(testloader):
        input_ss, input, target, label, size, name = Variable(batch[0], volatile=True), Variable(batch[1], volatile=True), \
                                                     Variable(batch[2], volatile=True), batch[3], batch[4], batch[5]
        input_ss = input_ss.cuda(gpuid)
        seg = deeplab(input_ss)
        size = (input_ss.size()[2], input_ss.size()[3])
        label = mid(seg, size)

        #=======label transform h*w to 21*h*w=======#
        label_argmax = False
        if label_argmax == True:
            Label_patch = label.cpu().data[0:1].numpy()
            Label_patch = np.expand_dims(np.argmax(Label_patch, axis=1), axis=0)
            label_pro = np.repeat(Label_patch , 21, axis=1)
            for i in range(21):
                tmp = label_pro[:,i:i+1]
                if i == 0:
                    tmp[tmp==255] = 0
                tmp[tmp != i] = -1
                tmp[tmp == i] = 1
                tmp[tmp == -1] = 0
            label = Variable(torch.from_numpy(label_pro[:, :, :, :]).float())
            transform_test = False
            if transform_test == True:
                Label_patch_test = Label_patch.copy()
                Label_patch_test [Label_patch_test == 255] = 0
                if (np.argmax(label_pro, axis=1).reshape((label.size())) - Label_patch_test).any() != 0:
                    print(">>>>>>Transform Error!")
        #=======label transform h*w to 21*h*w=======#
        input = input.cuda(gpuid)
        target = target.cuda(gpuid)
        label = label.cuda(gpuid)

        #=========image mask generation=========#
        for i in range(21):
            mask = label[:,i:i+1,:,:].repeat(1,3,1,1)
            mask_selected = torch.mul(mask, input)
            if i == 0:
                input_cls = mask_selected
            else:
                input_cls = torch.cat((input_cls, mask_selected), dim=1)
        input_cls = input_cls.cuda(gpuid)
        Blur_SR = model(input_cls)
        #output = model_pretrained(input)


        im_h = Blur_SR.cpu().data[0].numpy().astype(np.float32)
        im_h[im_h < 0] = 0
        im_h[im_h > 1.] = 1.
        SR = Variable((torch.from_numpy(im_h)).unsqueeze(0)).cuda(gpuid)

        result = transforms.ToPILImage()(SR.cpu().data[0])
        path = join(SR_dir, '{0:04d}.jpg'.format(iteration))
        #result.save(path)
        mse = criterion(SR, target)
        psnr = 10 * log10(1 / mse.data[0])
        avg_psnr += psnr
        print("%s: %s.png" % (iteration, name[0]))
        print('===>psnr: {:.4f} dB'.format(psnr))


        ##########show results###############
        is_show = False
        if is_show == True:
            label_show = label.cpu().data[0].numpy().transpose((1, 2, 0))
            label_show = np.asarray(np.argmax(label_show, axis=2), dtype=np.int)

            #image_out = input.cpu().data[0].numpy()
            #image_out = image_out.transpose((1, 2, 0))
            #image_out += IMG_MEAN
            #image_out = image_out[:, :, ::-1]  # BRG2RGB
            #image_out = np.asarray(image_out, np.uint8)
            image = input.cpu().data[0].numpy().transpose((1, 2, 0))
            image_out = SR.cpu().data[0].numpy().transpose((1, 2, 0))

            label_heatmap = label.cpu().data[0].view(21, 1, input.data[0].size(1), input.data[0].size(2))
            label_heatmap = torchvision.utils.make_grid(label_heatmap)
            label_heatmap = label_heatmap.numpy().transpose((1, 2, 0))
            images_cls = input_cls.cpu().data[0].view(21, 3, input.data[0].size(1), input.data[0].size(2))
            images_cls = torchvision.utils.make_grid(images_cls)
            images_cls = images_cls.numpy().transpose((1, 2, 0))

            show_seg(image, label_show, image_out,label_heatmap, images_cls)
        #####################################
        #size = (target.size()[2], target.size()[3])
        #gt = np.asarray(label[0].numpy()[:size[0], :size[1]], dtype=np.int)
        #seg_out = torch.nn.Upsample(size, mode='bilinear')(seg)
        #seg_out = seg_out.cpu().data[0].numpy()
        #seg_out = seg_out.transpose(1, 2, 0)
        #seg_out = np.asarray(np.argmax(seg_out, axis=2), dtype=np.int)
        #data_list.append([gt.flatten(), seg_out.flatten()])
    #get_iou(data_list, NUM_CLASSES )
    print("===> Avg. SR PSNR: {:.4f} dB".format(avg_psnr / iteration))
예제 #37
0
def slice_plot(netIn,
               ax,
               nLabs='',
               tLabs='',
               timeunit='',
               linestyle='k-',
               nodesize=100):
    '''

    Fuction draws "slice graph" and exports axis handles


    **PARAMETERS**

    :netIn: temporal network input (graphlet or contact)
    :ax: matplotlib figure handles.
    :nLabs: nodes labels. List of strings.
    :tLabs: labels of dimension Graph is expressed across. List of strings.
    :timeunit: unit time axis is in.
    :linestyle: line style of Bezier curves.
    :nodesize: size of nodes


    **OUTPUT**

    :ax: axis handle of slice graph


    **SEE ALSO**

    - *circle_plot*
    - *graphlet_stack_plot*


    **HISTORY**

    :modified: Dec 2016, WHT (documentation, improvments)
    :created: Sept 2016, WHT

    '''
    #Get input type (C or G)
    inputType = checkInput(netIn)
    nettype = 'xx'
    #Convert C representation to G

    if inputType == 'G':
        cfg = {}
        netIn = graphlet2contact(netIn)
        inputType = 'C'
    edgeList = [
        tuple(np.array(e[0:2]) + e[2] * netIn['netshape'][0])
        for e in netIn['contacts']
    ]

    if nLabs != '' and len(nLabs) == netIn['netshape'][0]:
        pass
    elif nLabs != '' and len(nLabs) != netIn['netshape'][0]:
        raise ValueError('specified node label length does not match netshape')
    elif nLabs == '' and netIn['nLabs'] == '':
        nLabs = np.arange(1, netIn['netshape'][0] + 1)
    else:
        nLabs = netIn['nLabs']

    if tLabs != '' and len(nLabs) == netIn['netshape'][-1]:
        pass
    elif tLabs != '' and len(nLabs) != netIn['netshape'][-1]:
        raise ValueError('specified time label length does not match netshape')
    elif tLabs == '' and str(netIn['t0']) == '':
        tLabs = np.arange(1, netIn['netshape'][-1] + 1)
    else:
        tLabs = np.arange(netIn['t0'],
                          netIn['Fs'] * netIn['netshape'][-1] + netIn['t0'],
                          netIn['Fs'])

    if timeunit == '':
        timeunit = netIn['timeunit']

    timeNum = len(tLabs)
    nodeNum = len(nLabs)
    pos = []
    posy = np.tile(list(range(0, nodeNum)), timeNum)
    posx = np.repeat(list(range(0, timeNum)), nodeNum)

    #plt.plot(points)
    #Draw Bezier vectors around egde positions
    for edge in edgeList:
        bvx, bvy = bezier_points((posx[edge[0]], posy[edge[0]]),
                                 (posx[edge[1]], posy[edge[1]]), nodeNum, 20)
        ax.plot(bvx, bvy, linestyle)
    ax.set_yticks(range(0, len(nLabs)))
    ax.set_xticks(range(0, len(tLabs)))
    ax.set_yticklabels(nLabs)
    ax.set_xticklabels(tLabs)
    ax.grid()
    ax.set_frame_on(False)
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()
    ax.set_xlim([min(posx) - 1, max(posx) + 1])
    ax.set_ylim([min(posy) - 1, max(posy) + 1])
    ax.scatter(posx, posy, s=nodesize, c=posy, zorder=10)
    if timeunit != '':
        timeunit = ' (' + timeunit + ')'
    ax.set_xlabel('Time' + timeunit)

    return ax
def append_out_row_pandas_format(bbh_dict, bbh_df, key, output_file):
    query_row = [key] + list(bbh_dict[key]) + \
    list(np.repeat(np.nan, len(bbh_df.columns)-len(bbh_dict[key])-1))
    new_row = pd.Series(query_row, index=bbh_df.columns)
    trans_row = new_row.to_frame().T
    trans_row.to_csv(output_file,mode='a',index=False,header=False, sep="\t")
예제 #39
0
def load_digits(fn):
    print 'loading "%s" ...' % fn
    digits_img = cv2.imread(fn, 0)
    digits = split2d(digits_img, (SZ, SZ))
    labels = np.repeat(np.arange(CLASS_N), len(digits) / CLASS_N)
    return digits, labels
예제 #40
0
def test_combine_misfits():
    source, targets = scenario('wellposed', 'noisefree')

    p = ToyProblem(name='toy_problem',
                   ranges={
                       'north': gf.Range(start=-10., stop=10.),
                       'east': gf.Range(start=-10., stop=10.),
                       'depth': gf.Range(start=0., stop=10.)
                   },
                   base_source=source,
                   targets=targets)

    ngx, ngy, ngz = 11, 11, 11
    xg = num.zeros((ngz * ngy * ngx, 3))

    xbounds = p.get_parameter_bounds()
    cx = num.linspace(xbounds[0][0], xbounds[0][1], ngx)
    cy = num.linspace(xbounds[1][0], xbounds[1][1], ngy)
    cz = num.linspace(xbounds[2][0], xbounds[2][1], ngz)

    xg[:, 0] = num.tile(cx, ngy * ngz)
    xg[:, 1] = num.tile(num.repeat(cy, ngx), ngz)
    xg[:, 2] = num.repeat(cz, ngx * ngy)

    misfitss = p.evaluate_many(xg)
    # misfitss[imodel, itarget, 0], misfitss[imodel, itarget, 1]
    gms = p.combine_misfits(misfitss)
    gms_contrib = p.combine_misfits(misfitss, get_contributions=True)

    # gms[imodel]
    # gms_contrib[imodel, itarget]

    bweights = num.ones((2, p.ntargets))
    gms_2 = p.combine_misfits(misfitss, extra_weights=bweights)
    gms_2_contrib = p.combine_misfits(misfitss,
                                      extra_weights=bweights,
                                      get_contributions=True)

    # gms_2[imodel, ibootstrap]
    # gms_2_contrib[imodel, ibootstrap, itarget]

    for ix, x in enumerate(xg):
        misfits = p.evaluate(x)
        # misfits[itarget, 0], misfits[itarget, 1]
        gm = p.combine_misfits(misfits)
        # gm is scalar
        t.assert_equal(gm, gms[ix])

        gm_contrib = p.combine_misfits(misfits, get_contributions=True)

        assert_ae(gms_contrib[ix, :], gm_contrib)

        gm_2 = p.combine_misfits(misfits, extra_weights=bweights)

        assert gm_2[0] == gm
        assert gm_2[1] == gm
        assert gms_2[ix, 0] == gm
        assert gms_2[ix, 1] == gm

        gm_2_contrib = p.combine_misfits(misfits,
                                         extra_weights=bweights,
                                         get_contributions=True)

        assert_ae(gm_2_contrib[0, :], gm_contrib)
        assert_ae(gm_2_contrib[1, :], gm_contrib)
        assert_ae(gms_2_contrib[ix, 0, :], gm_contrib)
        assert_ae(gms_2_contrib[ix, 1, :], gm_contrib)
예제 #41
0
for reinsert_idx in fixed.keys():
    paramset = np.insert(np.array(paramset),
                         reinsert_idx,
                         fixed[reinsert_idx],
                         axis=1)

print(paramset[0])
print(orig_params)

nstims = len(opt_stim_list)
convert_allen_data()
#allparams_from_mapping(paramset)

###### TEN COPIES OF ORIG PARAMS FOR DEBUG #################
param_values = np.array(orig_params).reshape(1, -1)
param_values = np.repeat(param_values, 10, axis=0)
print(param_values.shape, "pvals shape!!!!!!!!")
allparams_from_mapping(param_values)
###### TEN COPIES OF ORIG PARAMS FOR DEBUG #################

for i in range(0, 3):
    if i != 0:
        p_object = run_model(0, i)
        p_object.wait()
        #getVolts(0)
        stim_swap(0, i)

import shutil, errno


def copyanything(src, dst):
예제 #42
0
파일: save.py 프로젝트: baccandr/PyCHAM
def saving(filename, y_mat, Nresult_dry, Nresult_wet, t_out, savefolder, dydt_vst, num_comp, 
	Cfactor_vst, testf, numsb, comp_namelist, dydt_trak, y_mw, MV,
	time_taken, seed_name, x2, rbou_rec, wall_on, space_mode, rbou00, upper_bin_rad_amp, 
	indx_plot, comp0, yrec_p2w, sch_name, inname, rel_SMILES, Psat_Pa_rec, OC, H2Oi,
	seedi, siz_str, cham_env):

	# inputs: ----------------------------------------------------------------------------
	
	# filename - name of model variables file
	# y_mat - species (columns) concentrations with time (rows) (molecules/cc (air))
	# Nresult_dry  - number concentration of dry particles per size bin (#/cc (air))
	# Nresult_wet  - number concentration of dry particles per size bin (#/cc (air))
	# Cfactor - conversion factor to change gas-phase concentrations from molecules/cc 
	# (air) into ppb
	# testf - flag to show whether in normal mode (0) or test mode (1)
	# numsb - number of size bins
	# dydt_vst - tendency to change of user-specified components
	# dydt_trak - user-input names of components to track
	# comp_namelist - names of components given by the chemical scheme file
	# upper_bin_rad_amp - factor upper bin radius found increased by in 
	#						Size_distributions.py for more than 1 size bin, or in 
	# 						pp_intro.py for 1 size bin
	# Cfactor_vst - one billionth the molecular concentration in a unit volume of chamber
	#				(molecules/cc) per recording time step
	# time_taken - computer time for entire simulation (s)
	# seed_name - name of seed component
	# y_mw - molecular weights (g/mol)
	# MV - molar volumes (cm3/mol)
	# time_taken - simulation computer time (s)
	# seed_name - chemical scheme name of component comprising seed particles
	# x2 - record of size bin radii (um)
	# rbou_rec - radius bounds per size bin (columns) per time step (rows) (um)
	# wall_on - marker for whether wall on or off
	# space_mode - type of spacing between particle size bins (log or lin)
	# rbou00 - initial lower size (radius) bound of particles (um)
	# upper_bin_rad_amp - factor increase in radius of upper bound
	# indx_plot - index of components to plot gas-phase concentration temporal profiles of in 
	# standard results plot
	# comp0 - names of components to plot gas-phase concentration temporal profiles of in 
	# standard results plot
	# yrec_p2w - concentration of components on the wall due to 
	#	particle-wall loss, stacked by component first then by
	#	size bin (molecules/cc)
	# sch_name - path to chemical scheme file
	# inname - path to model variables file
	# rel_SMILES - SMILES strings for components in chemical scheme
	# Psat_Pa_rec - pure component saturation vapour pressures at 298.15 K
	# OC - oxygen to carbon ratio of components
	# H2Oi - index of water
	# seedi - index of seed components
	# siz_str - the size structure
	# cham_env - chamber environmental conditions (temperature (K), 
	# pressure (Pa) and relative humdity
	# ---------------------------------------------------------------
	
	
	if ((numsb-wall_on) > 0): # correct for changes to size bin radius bounds
		rbou_rec[:, 0] = rbou00
		rbou_rec[:, -1] = rbou_rec[:, -1]/upper_bin_rad_amp

	if (testf == 1):
		return(0) # return dummy

	dir_path = os.getcwd() # current working directory
	output_root = 'PyCHAM/output'
	filename = os.path.basename(filename)
	filename = os.path.splitext(filename)[0]
	# one folder for one simulation
	output_by_sim = os.path.join(dir_path, output_root, filename, savefolder)
	# create folder to store results
	os.makedirs(output_by_sim)
	
	# create folder to store copies of inputs
	os.makedirs(str(output_by_sim+'/inputs'))
	# making a copy of the chemical scheme and model variables input files
	output_by_sim_ext = str(output_by_sim+'/inputs/'+sch_name.split('/')[-1])
	copyfile(sch_name, output_by_sim_ext)
	output_by_sim_ext = str(output_by_sim+'/inputs/'+inname.split('/')[-1])
	if (inname != 'Default'): # if not in default model variables mode 
		copyfile(inname, output_by_sim_ext)
	
	# saving dictionary
	# dictionary containing variables for model and components
	const = {}
	const["number_of_size_bins"] = numsb
	const["number_of_components"] = num_comp
	const["molecular_weights_g/mol_corresponding_to_component_names"] = (np.squeeze(y_mw[:, 0]).tolist())
	const["molar_volumes_cm3/mol"] = (MV[:, 0].tolist())
	const["chem_scheme_names"] = comp_namelist
	const["SMILES"] = rel_SMILES
	const["factor_for_multiplying_ppb_to_get_molec/cm3_with_time"] = (Cfactor_vst.tolist())
	const["simulation_computer_time(s)"] = time_taken
	const["seed_name"] = seed_name
	const["wall_on_flag_0forNO_1forYES"] = wall_on
	const["space_mode"] = space_mode
	const["pure_component_saturation_vapour_pressures_at_298.15K"] = Psat_Pa_rec.tolist()
	const["oxygen_to_carbon_ratios_of_components"] = OC.tolist()
	const["index_of_water"] = H2Oi
	const["index_of_seed_components"] = seedi.tolist()
	const["size_structure_0_for_moving_centre_1_for_full_moving"] = siz_str

	with open(os.path.join(output_by_sim,'model_and_component_constants'),'w') as f:
		for key in const.keys():
			f.write("%s,%s\n"%(key, const[key]))
	
	# convert gas-phase concentrations from molecules/cc (air) into ppb
	# leaving any particle-phase concentrations as molecules/cc (air)
	y_mat[:, 0:num_comp] = y_mat[:, 0:num_comp]/(Cfactor_vst.reshape(len(Cfactor_vst), 1))
	
	
	y_header = str('') # prepare header for concentrations with time file 
	# prepare header for
	x2_header = str('') # prepare header for files relating to size bins
	
	for i in range(numsb+1): # loop through size bins

		if i == 0:
			end = '_g'
		if ((i > 0) and (i < numsb)):
			end = '_p'
			x2_header = str(x2_header+str(i))
		if (i == numsb):
			if (wall_on == 0):
				end = '_p'
				x2_header = str(x2_header+str(np.repeat(i, num_comp)))
			else:
				end = '_w'
		for ii in range(num_comp):
			if i  == 0 and ii == 0:
				start = ''
			else:
				start = ', '
			y_header = str(y_header+str(start+comp_namelist[ii])+end)
			
	# saving both gas, particle and wall concentrations of components
	np.savetxt(os.path.join(output_by_sim, 'concentrations_all_components_all_times_gas_particle_wall'), y_mat, delimiter=',', header=str('time changes with rows which correspond to the time output file, components in columns, with _g representing gas phase (ppb), _pi representing particle phase where i is the size bin number (starting at 1) (molecules/cc (air)) and _w is the wall phase (molecules/cc (air))\n'+y_header)) 		
	 
	# saving time of outputs
	np.savetxt(os.path.join(output_by_sim, 'time'), t_out, delimiter=',', header='time (s), these correspond to the rows in the concentrations_all_components_all_times_gas_particle_wall, particle_number_concentration and size_bin_radius output files')
	
	# saving environmental conditions (temperature, pressure, relative humidity)
	np.savetxt(os.path.join(output_by_sim, 'chamber_environmental_conditions'), cham_env, delimiter=',', header='chamber environmental conditions throughout the simulation, with rows corresponding to the time points in the time output file, first column is temperature (K), second is pressure (Pa) and third is relative humidity (fraction (0-1))')
	
	# saving the index and names of components whose gas-phase temporal profiles can be plotted on the standard results plot
	fname = os.path.join(output_by_sim, 'components_with_initial_gas_phase_concentrations_specified')
	np.savetxt(fname, [indx_plot, comp0], delimiter =', ', header='index (top row) and chemical scheme name (bottom row) of components with initial gas-phase concentrations specified', fmt ='% s') 
	
	# if tracking of tendencies to change requested by user, loop through the components
	# and save the tendency record for each of these (%/hour)
	if (len(dydt_vst) > 0):
		compind = 0
		# loop through components to record the tendency of change \n')
		for compi in dydt_vst.get('comp_index'):
			# open relevant dictionary value, to get the 2D numpy array for saving
			dydt_rec = np.array(dydt_vst.get(compi))
			
			# get user-input name of this component
			comp_name = str(dydt_trak[compind] +'_rate_of_change')
			# save
			np.savetxt(os.path.join(output_by_sim, comp_name), dydt_rec, delimiter=',', header='tendency to change, top row gives equation number (where number 0 is the first equation), penultimate column is gas-particle partitioning and final column is gas-wall partitioning (molecules/cc.s (air))')
			compind += 1
	
	
	if ((numsb-wall_on) > 0): # if particles present
	
		# saving the concentration of components on the wall due to particle deposition to wall
		np.savetxt(os.path.join(output_by_sim, 'concentrations_all_components_all_times_on_wall_due_to_particle_deposition_to_wall'), yrec_p2w, delimiter=',', header=str('concentration of components on wall due to particle deposition to wall (molecules/cc (air)) time changes with rows which correspond to the time output file, components in columns and size bin changing with columns with size bin numbers given in the second row of the header\n'+x2_header)) 
	
		np.savetxt(os.path.join(output_by_sim, 'particle_number_concentration_dry'), Nresult_dry, delimiter=',',
				header=('particle number concentration assuming water removed from particles (#/cc (air)), with time changing with rows (corresponding times given in the time output file) and size bin changing with columns with size bin numbers given in the second row of the header\n'+x2_header))
		
		np.savetxt(os.path.join(output_by_sim, 'particle_number_concentration_wet'), Nresult_wet, delimiter=',',
				header=('particle number concentration assuming water not removed from particles (#/cc (air)), with time changing with rows (corresponding times given in the time output file) and size bin changing with columns with size bin numbers given in the second row of the header\n'+x2_header))	
	
		np.savetxt(os.path.join(output_by_sim, 'size_bin_radius'), x2, delimiter=',',
				header= str('particle radii (um) per size_bin (including water contribution to size), with size bins represented by columns and their number (starting from 1) given in second line of header, per time step which is represented by rows and corresponding times given in the time output file \n'+x2_header))
	
		np.savetxt(os.path.join(output_by_sim, 'size_bin_bounds'), rbou_rec, delimiter=',',
				header=str('particle size bin bounds (um), with size bins represented by columns and their number (starting at 1 and in line with the lower bound) given in second line of header, per time step which is is represented by rows and corresponding times given in the time output file \n'+x2_header))		
		
	# if save name is the default, then remove to ensure no duplication in future
	if (savefolder == 'default_res_name'):
		import shutil	
		shutil.rmtree(output_by_sim)
	return()
def inference_in_batches(all_images, batch_size, mode):
    """Returns a numpy array of the activations of shape len(all_images)xACTIVATION_DIM
    - all_images: 2d numpy array of shape Nx(data.ORIGINAL_IMG_DIM)x(data.ORIGINAL_IMG_DIM)x(data.IMG_CHANNELS)
    """
    graph = tf.Graph()
    with graph.as_default():
        images = tf.placeholder(tf.float32,
                                shape=[
                                    None, data.ORIGINAL_IMG_DIM,
                                    data.ORIGINAL_IMG_DIM, data.IMG_CHANNELS
                                ],
                                name="input-images")

        # random augmentation & deterministic normalization
        augmented_images = tf.map_fn(lambda img: augment_normalize(img, mode),
                                     images)

        # rescale to Inception-expected size
        rescaled_batch = tf.image.resize_images(augmented_images,
                                                size=[299, 299])
        activations = inception_v3_features(rescaled_batch)

        init = tf.group(tf.global_variables_initializer(),
                        tf.local_variables_initializer())
        saver = inception_v3.create_saver()

    sess = tf.Session(graph=graph)

    # simply repeat all images so we have multiple augmentation epochs (for train mode only)
    augmentation_epochs = NUMBER_OF_AUGMENTATION_EPOCHS if mode is 'train' else 1

    num_images = len(all_images) * augmentation_epochs
    all_images = np.repeat(all_images, repeats=augmentation_epochs, axis=0)

    result = np.ndarray(shape=(num_images, ACTIVATION_DIM))
    with sess.as_default():
        sess.run(init)
        inception_v3.restore(sess, saver)

        full_batches = num_images // batch_size
        num_batches = full_batches if num_images % batch_size == 0 else full_batches + 1

        start_time = 0
        end_time = 0
        for i in range(num_batches):
            msg = "\r- Processing batch: {0:>6} / {1}".format(
                i + 1, num_batches)

            # -- time tracking stats --
            if start_time:
                time_for_last_batch = end_time - start_time
                estimated_remaining = (num_batches - i) * time_for_last_batch
                msg += " (ETA: {})".format(estimated_remaining)
            # -------------------------
            sys.stdout.write(msg)
            sys.stdout.flush()
            # -------------------------

            start_time = datetime.now()

            from_idx = i * batch_size
            to_idx = min((i + 1) * batch_size, num_images)
            images_batch = all_images[from_idx:to_idx]
            batch_result = sess.run(activations,
                                    feed_dict={images: images_batch})

            end_time = datetime.now()

            result[from_idx:to_idx] = np.squeeze(
                batch_result)  # remove 1x dimensions
        print("")  # new line

    return result
예제 #44
0
파일: utils.py 프로젝트: maxnoe/astropy
    def __init__(self,
                 data,
                 position,
                 size,
                 wcs=None,
                 mode='trim',
                 fill_value=np.nan,
                 copy=False):
        if wcs is None:
            wcs = getattr(data, 'wcs', None)

        if isinstance(position, SkyCoord):
            if wcs is None:
                raise ValueError('wcs must be input if position is a '
                                 'SkyCoord')
            position = skycoord_to_pixel(position, wcs, mode='all')  # (x, y)

        if np.isscalar(size):
            size = np.repeat(size, 2)

        # special handling for a scalar Quantity
        if isinstance(size, u.Quantity):
            size = np.atleast_1d(size)
            if len(size) == 1:
                size = np.repeat(size, 2)

        if len(size) > 2:
            raise ValueError('size must have at most two elements')

        shape = np.zeros(2).astype(int)
        pixel_scales = None
        # ``size`` can have a mixture of int and Quantity (and even units),
        # so evaluate each axis separately
        for axis, side in enumerate(size):
            if not isinstance(side, u.Quantity):
                shape[axis] = int(np.round(size[axis]))  # pixels
            else:
                if side.unit == u.pixel:
                    shape[axis] = int(np.round(side.value))
                elif side.unit.physical_type == 'angle':
                    if wcs is None:
                        raise ValueError('wcs must be input if any element '
                                         'of size has angular units')
                    if pixel_scales is None:
                        pixel_scales = u.Quantity(proj_plane_pixel_scales(wcs),
                                                  wcs.wcs.cunit[axis])
                    shape[axis] = int(
                        np.round((side / pixel_scales[axis]).decompose()))
                else:
                    raise ValueError('shape can contain Quantities with only '
                                     'pixel or angular units')

        data = np.asanyarray(data)
        # reverse position because extract_array and overlap_slices
        # use (y, x), but keep the input position
        pos_yx = position[::-1]

        cutout_data, input_position_cutout = extract_array(
            data,
            tuple(shape),
            pos_yx,
            mode=mode,
            fill_value=fill_value,
            return_position=True)
        if copy:
            cutout_data = np.copy(cutout_data)
        self.data = cutout_data

        self.input_position_cutout = input_position_cutout[::-1]  # (x, y)
        slices_original, slices_cutout = overlap_slices(data.shape,
                                                        shape,
                                                        pos_yx,
                                                        mode=mode)

        self.slices_original = slices_original
        self.slices_cutout = slices_cutout

        self.shape = self.data.shape
        self.input_position_original = position
        self.shape_input = shape

        ((self.ymin_original, self.ymax_original),
         (self.xmin_original, self.xmax_original)) = self.bbox_original

        ((self.ymin_cutout, self.ymax_cutout),
         (self.xmin_cutout, self.xmax_cutout)) = self.bbox_cutout

        # the true origin pixel of the cutout array, including any
        # filled cutout values
        self._origin_original_true = (self.origin_original[0] -
                                      self.slices_cutout[1].start,
                                      self.origin_original[1] -
                                      self.slices_cutout[0].start)

        if wcs is not None:
            self.wcs = deepcopy(wcs)
            self.wcs.wcs.crpix -= self._origin_original_true
            self.wcs.array_shape = self.data.shape
            if wcs.sip is not None:
                self.wcs.sip = Sip(wcs.sip.a, wcs.sip.b, wcs.sip.ap,
                                   wcs.sip.bp,
                                   wcs.sip.crpix - self._origin_original_true)
        else:
            self.wcs = None
예제 #45
0
import numpy as np
import matplotlib.pyplot as plt
import json


up = 30
down = -30
code = np.array([+1, +1, +1, -1, -1, -1, +1, -1, -1, +1, -1], dtype='int8')  # 11
code = np.repeat(code, 5)

with open('wifi/Кущ.dat') as f:
    data = np.array(f.readlines(), dtype='float64')

# Returns the discrete, linear convolution of two one-dimensional sequences.
# Mode ‘same’ returns output of length max(M, N). Boundary effects are still visible.
cnl = np.convolve(data, code[::-1], mode='same')  # or 'full'

plt.plot(cnl)
# plt.plot(np.convolve(data, code[::-1], mode='full'))
plt.title('КРАСИВО')
plt.show()

bit = []

for i in range(cnl.shape[0]):
    if cnl[i] > up and cnl[i - 1] < cnl[i] and cnl[i + 1] < cnl[i]:
        bit.append(1)
    elif cnl[i] < down and cnl[i - 1] > cnl[i] and cnl[i + 1] > cnl[i]:
        bit.append(0)

# Packs the elements of a binary-valued array into bits in a uint8 array.
예제 #46
0
            test['PM2.5'][i] = test['PM2.5'][i - 1]
    for i in range(2340):
        if test['RH'][i] <= 0:
            test['RH'][i] = test['RH'][i - 1]

    test = test.stack()
    test = test.unstack(0)
    test = test.T
    return (test)


test = test_data_input(sys.argv[1])
x_test = np.matrix(test)
x_test = x_test[:, range(22, 198, 1)]
x_test = np.concatenate(
    (np.matrix(np.repeat(1,
                         np.array(x_test.shape)[0])).T, x_test), axis=1)
file = open('best.pickle', 'rb')
coe = pickle.load(file)
file.close()
beta75 = np.matrix(coe['q75'])
beta99 = np.matrix(coe['q99'])
beta = np.matrix(coe['final'])
did = np.dot(x_test, beta99) - np.dot(x_test, beta75)
label = np.matrix(np.zeros((260, 1)))
for i in range(260):
    if did[i] > 10:
        label[i, 0] = 1
x_test = np.concatenate((x_test, label), axis=1)
y_hat = np.dot(x_test, beta)
test['reg'] = y_hat
y_hat = test.iloc[:, -1]
예제 #47
0
def plot_source_locations(source_xs_0, source_ys_0, source_xs_120, source_ys_120, source_xs_90, source_ys_90,
                          damage_xs=[], damage_ys=[]):
    # center marks
    #source_xs = np.hstack((source_xs_0, source_xs_120, source_xs_90, damage_xs))
    #source_ys = np.hstack((source_ys_0, source_ys_120, source_ys_90, damage_ys))
    source_xs = np.hstack((source_xs_0, source_xs_120, source_xs_90))
    source_ys = np.hstack((source_ys_0, source_ys_120, source_ys_90))

    mark_radii = np.repeat(10, len(source_xs))
    mark_colors = np.repeat(5, len(source_xs))
    plt.scatter(source_xs, source_ys, s=mark_radii, c=mark_colors, cmap=cm.Set1, alpha=1.0)

    fig = plt.gcf()
    ax = fig.gca()
    error_radius = 2.9

    # error circles
    patches = []
    for coordinate in zip(source_xs_0,source_ys_0):
        patches.append(mpatches.Circle(coordinate, error_radius))
    collection = mcollections.PatchCollection(patches, cmap=cm.brg,
                                              norm=mpl.colors.Normalize(0.,1.),
                                              alpha=0.2)
    collection.set_array(np.repeat(0.5, len(source_xs_0)))
    ax.add_collection(collection)

    patches = []
    for coordinate in zip(source_xs_120,source_ys_120):
        patches.append(mpatches.Circle(coordinate, error_radius))
    collection = mcollections.PatchCollection(patches, cmap=cm.brg,
                                              norm=mpl.colors.Normalize(0.,1.),
                                              alpha=0.2)
    collection.set_array(np.repeat(1.0, len(source_xs_120)))
    ax.add_collection(collection)

    patches = []
    for coordinate in zip(source_xs_90,source_ys_90):
        patches.append(mpatches.Circle(coordinate, error_radius))
    collection = mcollections.PatchCollection(patches, cmap=cm.brg,
                                              norm=mpl.colors.Normalize(0.,1.),
                                              alpha=0.2)
    collection.set_array(np.repeat(0.0, len(source_xs_90)))
    ax.add_collection(collection)

    # damage marks
    """
    patches = []
    for coordinate in zip(damage_xs,damage_ys):
        coordinate = np.array(coordinate)
        #patches.append(mpatches.RegularPolygon(coordinate, 5, 0.4))
        patches.append(mpatches.Rectangle(coordinate-[0.04,0.4], 0.08, 0.8))
        patches.append(mpatches.Rectangle(coordinate-[0.4,0.05], 0.8, 0.08))
    collection = mcollections.PatchCollection(patches, color='k', alpha=1.0)
    #collection.set_array(np.repeat(0.5, len(source_xs_0)))
    ax.add_collection(collection)
    """


    # cavity
    origin_x = np.zeros(1)
    origin_y = np.zeros(1)
    cavity_radius = 10000
    #plt.scatter([0, 0], [0, 0], s=[90000, 0], c=[3,1], cmap=cm.Set1, alpha=0.1)
    cavity=plt.Circle((0,0),11.43,color='k',alpha=0.1)
    ax.add_artist(cavity)

    plt.xlim((-16, 16))
    plt.ylim((-16, 16))
예제 #48
0
        I_PotEvapTransp -
        np.multiply(I_InterceptEffectonTransp, D_InterceptEvap),
        D_RelWaterAv))
D_SurfaceFlow = (np.multiply(isL_Lake == 1,
                             (array_sum(I_DailyRainAmount,
                                        shape=(subCatchment, 1)) +
                              np.multiply(isL_Lake != 1, (
                                  array_sum(I_DailyRainAmount,
                                            shape=(subCatchment, 1)) -
                                  array_sum(D_InterceptEvap,
                                            shape=(subCatchment, 1)) -
                                  array_sum(D_Infiltration,
                                            shape=(subCatchment, 1)) -
                                  D_DeepInfiltration)))))
D_SoilQflowRelFrac = np.repeat(
    I_SoilQflowFrac,
    subCatchment).reshape((subCatchment, 1))
D_SoilDischarge = np.multiply(D_SoilQflowRelFrac,
                              (D_SoilWater[time] - I_AvailWaterClass))
calculate.update(D_CumNegRain,
                 inflow=0,
                 outflow=(D_InterceptEvap +
                          D_Infiltration +
                          D_DeepInfiltration +
                          D_SurfaceFlow),
                 dt=dt,
                 non_negative=False)
calculate.update(D_CumEvapTranspClass,
                 inflow=D_ActEvapTransp,
                 outflow=D_InterceptEvap,
                 dt=dt)
 #Repeat every mixture number
 for i, nmix in enumerate(mix_list):
     
     #Call my function for training the UBM
     folder_path = "./models"
     [w, m, cov] = UBM_train(train_x, nmix, UBM_iter[i], UBM_ini, folder_path)
     
     #Preserve the UBM parameters as numpy-lists in local files
     np.save(folder_path + '/ubm_' + str(nmix) + 'mixGauss_weight', w)
     np.save(folder_path + '/ubm_' + str(nmix) + 'mixGauss_mean', m)
     np.save(folder_path + '/ubm_' + str(nmix) + 'mixGauss_covariance', cov)
     
     #Plot the Gaussian distribution
     plt.rcParams["font.size"] = 16
     plt.figure(figsize=(12, 8))
     colors = np.repeat(['r', 'g', 'b', 'm'], np.ceil(nmix/4))
     for k in range(nmix):
         plt.scatter(m[k, 0], m[k, 1], c=colors[k], marker='o', zorder=3)
     
     #Plot the contour graph
     nfiles = len(train_x)
     ndim = train_x[0].shape[1]
     ave_x = np.zeros((nfiles, ndim))
     for d in range(nfiles):
         ave_x[d, :] = np.average(train_x[d], axis=0)
     x_min, x_max = np.amin(ave_x[:, 0])-0.5, np.amax(ave_x[:, 0])+0.5
     y_min, y_max = np.amin(ave_x[:, 1])-0.5, np.amax(ave_x[:, 1])+0.5
     xlist = np.linspace(x_min, x_max, 50)
     ylist = np.linspace(y_min, y_max, 50)
     x, y = np.meshgrid(xlist, ylist)
     pos = np.dstack((x,y))
예제 #50
0
def call_differential_editing_sites(config_file):
    stability_value = 0.03  #value below which you may use a lower coverage for adding more samples to increase power
    min_disease_people = 5  #min number people supporting higher coverage for whch you may base stability off measurements off of
    min_control_people = 5  #min number control poeple supporting higher coverage for which you may base stability off of
    min_disease_people_5_cov = 10  #min disease number of people of 5 coverage you must have if needing to use unstable 5x coverage
    min_control_people_5_cov = 10  #min control number of people of 5 coverage you must have if needing to use unstable 5x coverage
    editing_file = './temp.csv'
    output_file = './editing_sites.with_stats_converted_disease.csv'
    #read in files
    editing_table = pd.read_csv(editing_file, sep='\t')
    #config_table = pd.read_csv(config_file,sep=',',header=None)
    config_table = pd.read_csv(config_file, sep=',', skiprows=1, header=None)
    all_people = config_table[0]
    disease_people = config_table[0][config_table[1] == "DIS"].reset_index(
        drop=True)  #TODO Change do disease!!!
    control_people = config_table[0][config_table[1] == "CTRL"].reset_index(
        drop=True)  #TODO Change to control!!!

    #now get just an editing table and coverage table
    edit_level_table = editing_table[all_people]

    #edit_level_table = editing_table[np.r_[all_people]]

    def get_editing_levels_for_cov_table(i):
        info = i.astype(str).str.split(pat="\\^")
        editing_levels = info.apply(lambda x: float('nan')
                                    if x[0] == "nan" else x[2])
        return editing_levels

    cov_table = edit_level_table.apply(get_editing_levels_for_cov_table)
    cov_table = cov_table.apply(lambda x: pd.to_numeric(
        x))  #TODO check if as.numeric and pandas to_numeric do the same.

    def get_editing_levels(i):
        info = i.astype(str).str.split(pat="\\^")
        editing_levels = info.apply(lambda x: float('nan')
                                    if x[0] == "nan" else x[0])
        return editing_levels

    edit_level_table = edit_level_table.apply(get_editing_levels)
    edit_level_table = edit_level_table.apply(
        lambda x: pd.to_numeric(x))  #TODO check precision on R and python

    #go down line by line and get the prevalence info and mean editing levels based off of stable coverages
    #WARNING I'm using float here, not integer allowing NaN values. Is ok?
    coverage_threshold_used = np.repeat(
        0., edit_level_table.shape[0]
    )  #will hold the coverage threshold required for this editing site
    stability_based_on = np.repeat(
        0., edit_level_table.shape[0]
    )  #will hold what coverage stability requirements were determined
    stable_mean_disease_editing_level = np.repeat(
        0., edit_level_table.shape[0]
    )  #mean autistic editing level using individuals passing coverage threshold
    stable_std_dev_disease_editing_level = np.repeat(
        0., edit_level_table.shape[0]
    )  #standard deviation of autistic editing level using individuals passing coverage threshold
    stable_mean_control_editing_level = np.repeat(
        0., edit_level_table.shape[0]
    )  #mean control editing level using individuals passing coverage threshold
    stable_std_dev_control_editing_level = np.repeat(
        0., edit_level_table.shape[0]
    )  #standard deviation of control editing level using individuals passing coverage threshold
    stable_number_disease_with_at_least_min_coverage = np.repeat(
        0., edit_level_table.shape[0]
    )  #number of autistic individuals passing the coverage threshold
    stable_number_disease_nonzero_editing_and_min_coverage = np.repeat(
        0., edit_level_table.shape[0]
    )  #number of autistic individuals without non zero editing level and passing coverage threshold
    stable_disease_prevalence = np.repeat(
        0., edit_level_table.shape[0]
    )  #proportion autistic individuals with nonzero editing
    stable_number_control_with_at_least_min_coverage = np.repeat(
        0.,
        edit_level_table.shape[0])  #same as disease but for control subjects
    stable_number_control_nonzero_editing_and_min_coverage = np.repeat(
        0., edit_level_table.shape[0])
    stable_control_prevalence = np.repeat(0., edit_level_table.shape[0])
    stable_total_number_individuals_nonzero_editing_and_min_coverage = np.repeat(
        0., edit_level_table.shape[0]
    )  #total number of disease and control subjects passing the coverage threshold and having nonzero editing level
    stable_mann_whitney_p_value = np.repeat(
        0., edit_level_table.shape[0]
    )  #wilcoxon rank sum test p value using individuals passing the coverage threshold
    stable_editing_level_effect_size = np.repeat(
        0., edit_level_table.shape[0]
    )  #difference between mean disease and mean control
    stable_frequency_fishers_p_value = np.repeat(
        0., edit_level_table.shape[0]
    )  #prevalence p value determined using two-tailed fisher's exact test
    stable_frequency_OR = np.repeat(
        0., edit_level_table.shape[0])  #odds ratio of the fisher's exact teest
    stable_prevalence_effect_size = np.repeat(
        0., edit_level_table.shape[0]
    )  #difference in editing level prevalences between disease and control subjects
    #WARNING those are np arrays.

    for i in range(0, edit_level_table.shape[0]):
        print i  #keep track of progress
        disease_edit_row = edit_level_table.loc[i, disease_people]
        control_edit_row = edit_level_table.loc[i, control_people]
        disease_cov_row = cov_table.loc[i, disease_people]
        control_cov_row = cov_table.loc[i, control_people]
        #find what coverage we can base stability off of
        number_disease_20_cov = disease_cov_row[disease_cov_row >= 20].count()
        number_control_20_cov = control_cov_row[control_cov_row >= 20].count()
        number_disease_15_cov = disease_cov_row[disease_cov_row >= 15].count()
        number_control_15_cov = control_cov_row[control_cov_row >= 15].count()
        number_disease_10_cov = disease_cov_row[disease_cov_row >= 10].count()
        number_control_10_cov = control_cov_row[control_cov_row >= 10].count()
        number_disease_5_cov = disease_cov_row[disease_cov_row >= 5].count()
        number_control_5_cov = control_cov_row[control_cov_row >= 5].count()
        if number_disease_20_cov >= min_disease_people and number_control_20_cov >= min_control_people:
            stability_based_on[i] = 20
        elif number_disease_15_cov >= min_disease_people and number_control_15_cov >= min_control_people:
            stability_based_on[i] = 15
        elif number_disease_10_cov >= min_disease_people and number_control_10_cov >= min_control_people:
            stability_based_on[i] = 10
        elif number_disease_5_cov >= min_disease_people_5_cov and number_control_5_cov >= min_control_people_5_cov:
            stability_based_on[i] = 5
        else:
            #stability_based_on[i] = -99999 # there's no np.nan integer representation, only float. We use an invalid value.
            stability_based_on[i] = float('nan')

        #need to deal with cases where there just are not enough disease individuals or control individuals to calculate mean
        if np.isnan(stability_based_on[i]):

            coverage_threshold_used[
                i] = 5  #I warn users not to use editing sites that don't have any stability_based_on measurement. We include min coverage of 5 just to get statistical information anyways
            #stable_min_cov=5
            #otherwise we can now try to find the stable_min_cov that'll be used for calculation of all statistics'

        else:
            current_stability_cov = stability_based_on[i]
            stability_disease_mean = disease_edit_row[
                disease_cov_row >= current_stability_cov].mean()
            stability_control_mean = control_edit_row[
                control_cov_row >= current_stability_cov].mean()
            #print np.arange(5,stability_based_on[i]+1e-4,5)
            for j in np.arange(
                    5, stability_based_on[i] + 1e-4,
                    5):  #WARNING using 1e-4 allowing to include stop
                disease_mean = disease_edit_row[disease_cov_row >= j].mean()
                control_mean = control_edit_row[control_cov_row >= j].mean()
                if np.absolute(disease_mean - stability_disease_mean
                               ) <= stability_value and np.absolute(
                                   control_mean -
                                   stability_control_mean) <= stability_value:
                    coverage_threshold_used[i] = j
                    break
        #now let's calculate all our statics based on the stable coverage threshold
        stable_min_cov = coverage_threshold_used[i]
        disease_adju_edit_row = disease_edit_row[np.logical_and(
            np.logical_and((~np.isnan(disease_edit_row)),
                           (~np.isnan(disease_cov_row))),
            (disease_cov_row >= stable_min_cov))]
        disease_adju_cov_row = disease_cov_row[np.logical_and(
            (~np.isnan(disease_cov_row)), (disease_cov_row >= stable_min_cov))]
        control_adju_edit_row = control_edit_row[np.logical_and(
            np.logical_and((~np.isnan(control_edit_row)),
                           (~np.isnan(control_cov_row))),
            (control_cov_row >= stable_min_cov))]
        control_adju_cov_row = control_cov_row[np.logical_and(
            (~np.isnan(control_cov_row)), (control_cov_row >= stable_min_cov))]
        stable_mean_disease_editing_level[i] = disease_adju_edit_row.mean()
        stable_std_dev_disease_editing_level[i] = disease_adju_edit_row.std()
        stable_mean_control_editing_level[i] = control_adju_edit_row.mean()
        stable_std_dev_control_editing_level[i] = control_adju_edit_row.std()
        stable_number_disease_with_at_least_min_coverage[
            i] = disease_adju_cov_row[
                disease_adju_cov_row >= stable_min_cov].count()
        stable_number_disease_nonzero_editing_and_min_coverage[
            i] = disease_adju_cov_row[(~np.isnan(disease_adju_cov_row)) &
                                      (disease_adju_cov_row >= stable_min_cov)
                                      & (disease_adju_edit_row > 0)].count()
        stable_disease_prevalence[
            i] = stable_number_disease_nonzero_editing_and_min_coverage[
                i] / stable_number_disease_with_at_least_min_coverage[i]
        stable_number_control_with_at_least_min_coverage[
            i] = control_adju_cov_row[
                control_adju_cov_row >= stable_min_cov].count()
        stable_number_control_nonzero_editing_and_min_coverage[
            i] = control_adju_cov_row[(~np.isnan(control_adju_cov_row)) &
                                      (control_adju_cov_row >= stable_min_cov)
                                      & (control_adju_edit_row > 0)].count()
        stable_control_prevalence[
            i] = stable_number_control_nonzero_editing_and_min_coverage[
                i] / stable_number_control_with_at_least_min_coverage[i]
        stable_total_number_individuals_nonzero_editing_and_min_coverage[i] = (
            stable_number_disease_nonzero_editing_and_min_coverage[i] +
            stable_number_control_nonzero_editing_and_min_coverage[i]).sum()
        if (len(disease_adju_edit_row) >= 1) & (len(control_adju_edit_row) >=
                                                1):
            if (np.all(disease_adju_edit_row.values ==
                       control_adju_edit_row.values)):
                stable_mann_whitney_p_value[i] = float('nan')
            else:
                temp, stable_mann_whitney_p_value[i] = mannwhitneyu(
                    disease_adju_edit_row,
                    control_adju_edit_row,
                    alternative='two-sided')
        else:
            stable_mann_whitney_p_value[i] = float('nan')
        stable_editing_level_effect_size[i] = np.absolute(
            stable_mean_disease_editing_level[i] -
            stable_mean_control_editing_level[i])
        fisher_matrix = np.matrix(
            [[
                stable_number_disease_nonzero_editing_and_min_coverage[i],
                stable_number_disease_with_at_least_min_coverage[i] -
                stable_number_disease_nonzero_editing_and_min_coverage[i]
            ],
             [
                 stable_number_control_nonzero_editing_and_min_coverage[i],
                 stable_number_control_with_at_least_min_coverage[i] -
                 stable_number_control_nonzero_editing_and_min_coverage[i]
             ]])
        stable_frequency_OR[i], stable_frequency_fishers_p_value[
            i] = fisher_exact(fisher_matrix)
        #print stable_frequency_OR[i]
        #print stable_frequency_fishers_p_value[i]
        stable_prevalence_effect_size[i] = np.absolute(
            stable_disease_prevalence[i] - stable_control_prevalence[i])

    #now put everything back together as a table
    header_info = editing_table[['chromosome', 'position', 'type_editing']]
    stats_table = pd.DataFrame(coverage_threshold_used)
    stats_table = stats_table.rename(
        columns={stats_table.columns[0]: 'coverage_threshold_used'})
    stats_table['stability_based_on'] = pd.DataFrame(stability_based_on)
    stats_table['stable_mean_disease_editing_level'] = pd.DataFrame(
        stable_mean_disease_editing_level)
    stats_table['stable_std_dev_disease_editing_level'] = pd.DataFrame(
        stable_std_dev_disease_editing_level)
    stats_table['stable_mean_control_editing_level'] = pd.DataFrame(
        stable_mean_control_editing_level)
    stats_table['stable_std_dev_control_editing_level'] = pd.DataFrame(
        stable_std_dev_control_editing_level)
    stats_table[
        'stable_number_disease_with_at_least_min_coverage'] = pd.DataFrame(
            stable_number_disease_with_at_least_min_coverage)
    stats_table[
        'stable_number_disease_nonzero_editing_and_min_coverage'] = pd.DataFrame(
            stable_number_disease_nonzero_editing_and_min_coverage)
    stats_table['stable_disease_prevalence'] = pd.DataFrame(
        stable_disease_prevalence)
    stats_table[
        'stable_number_control_with_at_least_min_coverage'] = pd.DataFrame(
            stable_number_control_with_at_least_min_coverage)
    stats_table[
        'stable_number_control_nonzero_editing_and_min_coverage'] = pd.DataFrame(
            stable_number_control_nonzero_editing_and_min_coverage)
    stats_table['stable_control_prevalence'] = pd.DataFrame(
        stable_control_prevalence)
    stats_table[
        'stable_total_number_individuals_nonzero_editing_and_min_coverage'] = pd.DataFrame(
            stable_total_number_individuals_nonzero_editing_and_min_coverage)
    stats_table['stable_mann_whitney_p_value'] = pd.DataFrame(
        stable_mann_whitney_p_value)
    stats_table['stable_editing_level_effect_size'] = pd.DataFrame(
        stable_editing_level_effect_size)
    stats_table['stable_frequency_fishers_p_value'] = pd.DataFrame(
        stable_frequency_fishers_p_value)
    stats_table['stable_frequency_OR'] = pd.DataFrame(stable_frequency_OR)
    stats_table['stable_prevalence_effect_size'] = pd.DataFrame(
        stable_prevalence_effect_size)

    full_table = pd.concat(
        [header_info, stats_table, editing_table[all_people]], axis=1)

    #write the full_table to output
    full_table.to_csv(output_file, sep='\t', index=False)

    print "job completed\n"
    ]

    missing_effect_1 = ce.constant_effect(magnitude)
    missing_effect_2 = ce.bell_shaped_effect(magnitude, 30, 15, 15)
    sim_effects = [
        *null_effect,
        constant_effect,
        early_effect,
        intermediate_effect,
        *late_effects,
        missing_effect_1,
        missing_effect_2,
    ]
    n_features = len(sim_effects)
    coeffs = [np.log(s) for s in sim_effects]
    simu_n_lags = np.repeat(49, n_features).astype("uint64")

    n_missing_features = 2
    hidden_features = [n_features - (i + 1) for i in range(n_missing_features)]
    sim = SimuSCCS(
        int(n_cases),
        n_intervals,
        n_features,
        simu_n_lags,
        time_drift=time_drift,
        n_correlations=n_features,
        coeffs=coeffs,
        seed=seed,
        verbose=False,
        hidden_features=hidden_features,
    )
예제 #52
0
파일: fields.py 프로젝트: mingrener/sfepy
    def set_dofs(self, fun=0.0, region=None, dpn=None, warn=None):
        """
        Set the values of DOFs given by the `region` using a function of space
        coordinates or value `fun`.

        If `fun` is a function, the l2 projection that is global for all region
        facets is used to set the DOFs.

        If `dpn > 1`, and `fun` is a function, it has to return the values
        DOF-by-DOF, i.e. a single one-dimensional vector with all values of the
        first component, then of the second one etc. concatenated
        together.

        Parameters
        ----------
        fun : float or array of length dpn or callable
            The DOF values.
        region : Region
            The region containing the DOFs.
        dpn : int, optional
            The DOF-per-node count. If not given, the number of field
            components is used.
        warn : str, optional
            The warning message printed when the region selects no DOFs.

        Returns
        -------
        nods : array, shape (n_dof,)
            The field DOFs (or node indices) given by the region.
        vals : array, shape (dpn, n_dof)
            The values of the DOFs, DOF-by-DOF when raveled in C (row-major)
            order.
        """
        if region is None:
            region = self.region

        if dpn is None:
            dpn = self.n_components

        nods = []
        vals = []

        aux = self.get_dofs_in_region(region)
        nods = nm.unique(aux)

        if nm.isscalar(fun):
            vals = nm.repeat([fun], nods.shape[0] * dpn)

        elif isinstance(fun, nm.ndarray):
            assert_(len(fun) == dpn)
            vals = nm.repeat(fun, nods.shape[0])

        elif callable(fun):
            import scipy.sparse as sps
            from sfepy.solvers.ls import solve
            from sfepy.discrete.integrals import Integral
            from sfepy.discrete.fem.utils import prepare_remap
            import sfepy.discrete.iga as iga
            from sfepy.discrete.iga.extmods.igac import eval_mapping_data_in_qp

            nurbs = self.nurbs
            facets = self._get_facets(region.kind_tdim)

            # Region facet connectivity.
            rconn = self.get_econn('surface', region)

            # Local connectivity.
            remap = prepare_remap(nods, nods.max() + 1)
            lconn = [remap[ii] for ii in rconn]

            # Cell and face(cell) ids for each facet.
            fis = region.get_facet_indices()

            # Integral given by max. NURBS surface degree.
            fdegrees = iga.get_surface_degrees(nurbs.degrees)
            order = fdegrees.max()
            integral = Integral('i', order=2*order)
            vals, weights = integral.get_qp(self.domain.gel.surface_facet_name)

            # Boundary QP - use tensor product structure.
            bvals = iga.create_boundary_qp(vals, region.tdim)

            # Compute facet basis, jacobians and physical BQP.
            n_dof = len(nods)
            rhs = nm.zeros((dpn, n_dof), dtype=nm.float64)
            rows, cols, mvals = [], [], []
            all_qp = []
            all_fbfs = []
            all_dets = []
            for ii, (ie, ifa) in enumerate(fis):
                qp_coors = bvals[ifa]

                bfs, _, dets = eval_mapping_data_in_qp(qp_coors, nurbs.cps,
                                                       nurbs.weights,
                                                       nurbs.degrees,
                                                       nurbs.cs,
                                                       nurbs.conn,
                                                       nm.array([ie]))
                # Facet basis.
                fbfs = bfs[..., facets[ifa]][0, :, 0, :]

                # Weight Jacobians by quadrature point weights.
                dets = nm.abs(dets) * weights[None, :, None, None]
                dets = dets[0, :, 0, :]

                # Physical BQP.
                fcps = nurbs.cps[nurbs.conn[ie, facets[ifa]]]
                qp = nm.dot(fbfs, fcps)

                all_qp.append(qp)
                all_fbfs.append(fbfs)
                all_dets.append(dets)

            # DOF values in the physical BQP.
            qps = nm.concatenate(all_qp)
            vals = nm.asarray(fun(qps))
            vals.shape = (dpn, qps.shape[0])

            n_qp_face = len(bvals[0])

            # Assemble l2 projection system.
            for ii, (ie, ifa) in enumerate(fis):
                # Assembling indices.
                elc = lconn[ii]

                fvals = vals[:, n_qp_face * ii : n_qp_face * (ii + 1)]

                fbfs = all_fbfs[ii]
                dets = all_dets[ii]

                # Local projection system.
                for idof in range(dpn):
                    lrhs = (fbfs * (fvals[idof, :, None] * dets)).sum(0)
                    rhs[idof, elc] += lrhs

                lmtx = ((fbfs[..., None] * fbfs[:, None, :])
                        * dets[..., None]).sum(0)

                er, ec = nm.meshgrid(elc, elc)
                rows.append(er.ravel())
                cols.append(ec.ravel())
                mvals.append(lmtx.ravel())

            rows = nm.concatenate(rows)
            cols = nm.concatenate(cols)
            mvals = nm.concatenate(mvals)
            mtx = sps.coo_matrix((mvals, (rows, cols)), shape=(n_dof, n_dof))

            vals = nm.zeros((n_dof, dpn), dtype=nm.float64)

            # Solve l2 projection system.
            for idof in range(dpn):
                dofs = solve(mtx, rhs[idof, :])
                vals[remap[nods], idof] = dofs

        else:
            raise ValueError('unknown function/value type! (%s)' % type(fun))

        vals.shape = (len(nods), -1)

        return nods, vals
예제 #53
0
파일: EP2_SO.py 프로젝트: jturney/psi4numpy
print('\nTotal time taken for ERI integrals: %.3f seconds.\n' %
      (time.time() - t))

#Make spin-orbital MO
t = time.time()
print('Starting AO -> spin-orbital MO transformation...')
nso = nmo * 2

MO = np.einsum('rJ,pqrs->pqJs', C, I)
MO = np.einsum('pI,pqJs->IqJs', C, MO)
MO = np.einsum('sB,IqJs->IqJB', C, MO)
MO = np.einsum('qA,IqJB->IAJB', C, MO)

# Tile MO array so that we have alternating alpha/beta spin orbitals
MO = np.repeat(MO, 2, axis=0)
MO = np.repeat(MO, 2, axis=1)
MO = np.repeat(MO, 2, axis=2)
MO = np.repeat(MO, 2, axis=3)

# Build spin mask
spin_ind = np.arange(nso, dtype=np.int) % 2
spin_mask = (spin_ind.reshape(-1, 1, 1, 1) == spin_ind.reshape(-1, 1, 1))
spin_mask = spin_mask * (spin_ind.reshape(-1, 1) == spin_ind)

# compute antisymmetrized MO integrals
MO *= spin_mask
MO = MO - MO.swapaxes(1, 3)
MO = MO.swapaxes(1, 2)
print('..finished transformation in %.3f seconds.\n' % (time.time() - t))
예제 #54
0
def test_plot_topomap():
    """Test topomap plotting
    """
    import matplotlib.pyplot as plt
    from matplotlib.patches import Circle
    # evoked
    warnings.simplefilter('always')
    res = 16
    evoked = read_evokeds(evoked_fname, 'Left Auditory',
                          baseline=(None, 0))
    ev_bad = evoked.pick_types(meg=False, eeg=True, copy=True)
    ev_bad.pick_channels(ev_bad.ch_names[:2])
    ev_bad.plot_topomap(times=ev_bad.times[:2] - 1e-6)  # auto, should plot EEG
    assert_raises(ValueError, ev_bad.plot_topomap, ch_type='mag')
    assert_raises(TypeError, ev_bad.plot_topomap, head_pos='foo')
    assert_raises(KeyError, ev_bad.plot_topomap, head_pos=dict(foo='bar'))
    assert_raises(ValueError, ev_bad.plot_topomap, head_pos=dict(center=0))
    assert_raises(ValueError, ev_bad.plot_topomap, times=[-100])  # bad time
    assert_raises(ValueError, ev_bad.plot_topomap, times=[[0]])  # bad time

    evoked.plot_topomap(0.1, layout=layout, scale=dict(mag=0.1))
    plt.close('all')
    axes = [plt.subplot(221), plt.subplot(222)]
    evoked.plot_topomap(axes=axes, colorbar=False)
    plt.close('all')
    evoked.plot_topomap(times=[-0.1, 0.2])
    plt.close('all')
    mask = np.zeros_like(evoked.data, dtype=bool)
    mask[[1, 5], :] = True
    evoked.plot_topomap(None, ch_type='mag', outlines=None)
    times = [0.1]
    evoked.plot_topomap(times, ch_type='eeg', res=res, scale=1)
    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res)
    evoked.plot_topomap(times, ch_type='planar1', res=res)
    evoked.plot_topomap(times, ch_type='planar2', res=res)
    evoked.plot_topomap(times, ch_type='grad', mask=mask, res=res,
                        show_names=True, mask_params={'marker': 'x'})
    plt.close('all')
    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
                  res=res, average=-1000)
    assert_raises(ValueError, evoked.plot_topomap, times, ch_type='eeg',
                  res=res, average='hahahahah')

    p = evoked.plot_topomap(times, ch_type='grad', res=res,
                            show_names=lambda x: x.replace('MEG', ''),
                            image_interp='bilinear')
    subplot = [x for x in p.get_children() if
               isinstance(x, matplotlib.axes.Subplot)][0]
    assert_true(all('MEG' not in x.get_text()
                    for x in subplot.get_children()
                    if isinstance(x, matplotlib.text.Text)))

    # Test title
    def get_texts(p):
        return [x.get_text() for x in p.get_children() if
                isinstance(x, matplotlib.text.Text)]

    p = evoked.plot_topomap(times, ch_type='eeg', res=res, average=0.01)
    assert_equal(len(get_texts(p)), 0)
    p = evoked.plot_topomap(times, ch_type='eeg', title='Custom', res=res)
    texts = get_texts(p)
    assert_equal(len(texts), 1)
    assert_equal(texts[0], 'Custom')
    plt.close('all')

    # delaunay triangulation warning
    with warnings.catch_warnings(record=True):  # can't show
        warnings.simplefilter('always')
        evoked.plot_topomap(times, ch_type='mag', layout=None, res=res)
    assert_raises(RuntimeError, plot_evoked_topomap, evoked, 0.1, 'mag',
                  proj='interactive')  # projs have already been applied

    # change to no-proj mode
    evoked = read_evokeds(evoked_fname, 'Left Auditory',
                          baseline=(None, 0), proj=False)
    with warnings.catch_warnings(record=True):
        warnings.simplefilter('always')
        evoked.plot_topomap(0.1, 'mag', proj='interactive', res=res)
    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
                  np.repeat(.1, 50))
    assert_raises(ValueError, plot_evoked_topomap, evoked, [-3e12, 15e6])

    with warnings.catch_warnings(record=True):  # file conventions
        warnings.simplefilter('always')
        projs = read_proj(ecg_fname)
    projs = [pp for pp in projs if pp['desc'].lower().find('eeg') < 0]
    plot_projs_topomap(projs, res=res)
    plt.close('all')
    ax = plt.subplot(111)
    plot_projs_topomap([projs[0]], res=res, axes=ax)  # test axes param
    plt.close('all')
    for ch in evoked.info['chs']:
        if ch['coil_type'] == FIFF.FIFFV_COIL_EEG:
            if ch['eeg_loc'] is not None:
                ch['eeg_loc'].fill(0)
            ch['loc'].fill(0)

    # Remove extra digitization point, so EEG digitization points
    # correspond with the EEG electrodes
    del evoked.info['dig'][85]

    pos = make_eeg_layout(evoked.info).pos
    pos, outlines = _check_outlines(pos, 'head')
    assert_true('head' in outlines.keys())
    assert_true('nose' in outlines.keys())
    assert_true('ear_left' in outlines.keys())
    assert_true('ear_right' in outlines.keys())
    assert_true('autoshrink' in outlines.keys())
    assert_true(outlines['autoshrink'])
    assert_true('clip_radius' in outlines.keys())
    assert_array_equal(outlines['clip_radius'], 0.5)

    pos, outlines = _check_outlines(pos, 'skirt')
    assert_true('head' in outlines.keys())
    assert_true('nose' in outlines.keys())
    assert_true('ear_left' in outlines.keys())
    assert_true('ear_right' in outlines.keys())
    assert_true('autoshrink' in outlines.keys())
    assert_true(not outlines['autoshrink'])
    assert_true('clip_radius' in outlines.keys())
    assert_array_equal(outlines['clip_radius'], 0.625)

    pos, outlines = _check_outlines(pos, 'skirt',
                                    head_pos={'scale': [1.2, 1.2]})
    assert_array_equal(outlines['clip_radius'], 0.75)

    # Plot skirt
    evoked.plot_topomap(times, ch_type='eeg', outlines='skirt')

    # Pass custom outlines without patch
    evoked.plot_topomap(times, ch_type='eeg', outlines=outlines)
    plt.close('all')

    # Pass custom outlines with patch callable
    def patch():
        return Circle((0.5, 0.4687), radius=.46,
                      clip_on=True, transform=plt.gca().transAxes)
    outlines['patch'] = patch
    plot_evoked_topomap(evoked, times, ch_type='eeg', outlines=outlines)

    # Remove digitization points. Now topomap should fail
    evoked.info['dig'] = None
    assert_raises(RuntimeError, plot_evoked_topomap, evoked,
                  times, ch_type='eeg')
    plt.close('all')
def print_calipso_stats_ctype(caObj, statfile, val_subset, low_medium_high_class):
    if config.CCI_CLOUD_VALIDATION :
        logger.info("Cloudtype validation not useful for CCI validation")
        return
    if caObj.avhrr.cloudtype is None:
        logger.warning("There are no cloudtype data.")
        return
    # CLOUD TYPE EVALUATION - Based exclusively on CALIPSO data (Vertical Feature Mask)
    # =======================
    calipso_low = np.logical_and(low_medium_high_class['low_clouds'],
                                 val_subset)
    calipso_medium = np.logical_and(low_medium_high_class['medium_clouds'],
                                    val_subset)
    calipso_high = np.logical_and(low_medium_high_class['high_clouds'],
                                 val_subset)

    if  caObj.avhrr.cloudtype_conditions is not None: 
        logger.info("Assuming cloudtype structure from pps v2014")
        avhrr_low = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,5),
                           np.less_equal(caObj.avhrr.cloudtype,6)),
            val_subset)
        avhrr_medium = np.logical_and(
            np.equal(caObj.avhrr.cloudtype,7), val_subset)
        avhrr_high_op = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,8),
                           np.less_equal(caObj.avhrr.cloudtype,9)),
            val_subset)
        avhrr_high_semi = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,11),
                           np.less_equal(caObj.avhrr.cloudtype,15)),
            val_subset)
        avhrr_high = np.logical_or(avhrr_high_op,avhrr_high_semi)
        avhrr_frac = np.logical_and(np.equal(caObj.avhrr.cloudtype,10), 
                                    val_subset)

    else:
        logger.info("Assuming cloudtype structure from pps v2012")
        avhrr_low = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,5),
                           np.less_equal(caObj.avhrr.cloudtype,8)),
            val_subset)
        avhrr_medium = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,9),
                           np.less_equal(caObj.avhrr.cloudtype,10)),
            val_subset)
        avhrr_high = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,11),
                           np.less_equal(caObj.avhrr.cloudtype,18)),
            val_subset)
        avhrr_frac = np.logical_and(
            np.logical_and(np.greater_equal(caObj.avhrr.cloudtype,19),
                           np.less_equal(caObj.avhrr.cloudtype,19)),
            val_subset)

    calipso_clear = np.logical_and(
        np.less(caObj.calipso.cloud_fraction,0.34),val_subset)
    calipso_cloudy = np.logical_and(
        np.greater(caObj.calipso.cloud_fraction,0.66),val_subset)
    avhrr_clear = np.logical_and(
        np.logical_and(np.less_equal(caObj.avhrr.cloudtype,4),
                       np.greater(caObj.avhrr.cloudtype,0)),
        val_subset)
    
    
    # Notice that we have unfortunately changed order in notation compared to cloud mask
    # Here the PPS category is mentioned first and then the CALIOP category 

    n_low_low = np.repeat(
        avhrr_low,
        np.logical_and(calipso_low,avhrr_low)).shape[0]
    n_low_medium = np.repeat(
        avhrr_low,
        np.logical_and(calipso_medium,avhrr_low)).shape[0]
    n_low_high = np.repeat(
        avhrr_low,
        np.logical_and(calipso_high,avhrr_low)).shape[0]
    n_medium_low = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_low,avhrr_medium)).shape[0]
    n_medium_medium = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_medium,avhrr_medium)).shape[0]
    n_medium_high = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_high,avhrr_medium)).shape[0]
    n_high_low = np.repeat(
        avhrr_high, 
        np.logical_and(calipso_low,avhrr_high)).shape[0]
    n_high_medium = np.repeat(
        avhrr_high,
        np.logical_and(calipso_medium,avhrr_high)).shape[0]
    n_high_high = np.repeat(
        avhrr_high,
        np.logical_and(calipso_high,avhrr_high)).shape[0]
    n_frac_low = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_low,avhrr_frac)).shape[0]
    n_frac_medium = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_medium,avhrr_frac)).shape[0]
    n_frac_high = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_high,avhrr_frac)).shape[0]

    n_clear_low = np.repeat(
        avhrr_clear,
        np.logical_and(calipso_low,avhrr_clear)).shape[0]
    n_clear_medium = np.repeat(
        avhrr_clear,
        np.logical_and(calipso_medium,avhrr_clear)).shape[0]
    n_clear_high = np.repeat(
        avhrr_clear,
        np.logical_and(calipso_high,avhrr_clear)).shape[0]
    n_low_clear = np.repeat(
        avhrr_low,
        np.logical_and(calipso_clear,avhrr_low)).shape[0]
    n_medium_clear = np.repeat(
        avhrr_medium,
        np.logical_and(calipso_clear,avhrr_medium)).shape[0]
    n_high_clear = np.repeat(
        avhrr_high,
        np.logical_and(calipso_clear,avhrr_high)).shape[0]
    n_frac_clear = np.repeat(
        avhrr_frac,
        np.logical_and(calipso_clear,avhrr_frac)).shape[0]

    if (n_low_low+n_medium_low+n_high_low+n_frac_low) > 0:
        pod_low = float(n_low_low + n_frac_low)/(n_low_low+n_medium_low+n_high_low+n_frac_low)
        far_low = float(n_medium_low+n_high_low)/(n_low_low+n_medium_low+n_high_low+n_frac_low)
    else:
        pod_low = -9.0
        far_low = -9.0
    if (n_low_medium+n_medium_medium+n_high_medium+n_frac_medium) > 0:
        pod_medium = float(n_medium_medium)/(n_low_medium+n_medium_medium+n_high_medium+n_frac_medium)
        far_medium = float(n_low_medium+n_high_medium+n_frac_medium)/(n_low_medium+n_medium_medium+n_high_medium+n_frac_medium)
    else:
        pod_medium =-9.0
        far_medium =-9.0
    if (n_low_high+n_medium_high+n_high_high+n_frac_high) > 0:
        pod_high = float(n_high_high)/(n_low_high+n_medium_high+n_high_high+n_frac_high)
        far_high = float(n_low_high+n_medium_high+n_frac_high)/(n_low_high+n_medium_high+n_high_high+n_frac_high)
    else:
        pod_high =-9.0
        far_high =-9.0

    statfile.write("CLOUD TYPE %s-IMAGER TABLE: %s %s %s %s %s %s %s %s %s %s %s %s \n" % (caObj.truth_sat.upper(),n_low_low,n_low_medium,n_low_high,n_medium_low,n_medium_medium,n_medium_high,n_high_low,n_high_medium,n_high_high,n_frac_low,n_frac_medium,n_frac_high))
    statfile.write("CLOUD TYPE %s-IMAGER PROB: %f %f %f %f %f %f \n" % (caObj.truth_sat.upper(),pod_low,pod_medium,pod_high,far_low,far_medium,far_high))
    statfile.write("CLOUD TYPE %s-IMAGER TABLE MISSED: %s %s %s %s %s %s %s \n" % (caObj.truth_sat.upper(),n_clear_low,n_clear_medium,n_clear_high,n_low_clear,n_medium_clear,n_high_clear,n_frac_clear))
예제 #56
0
def andrews_curves(data,
                   class_column,
                   samples=200,
                   alpha=0.5,
                   width=600,
                   height=300,
                   cmap=None,
                   colormap=None,
                   **kwds):
    """
    Andrews curve plot.

    Parameters
    ----------
    frame: DataFrame
    class_column: str
        Column name containing class names
    samples: int, optional
        Number of samples to draw
    alpha: float, optional
        The transparency of the lines
    cmap/colormap: str or colormap object
        Colormap to use for groups

    Returns
    -------
    obj : HoloViews object
        The HoloViews representation of the plot.

    See Also
    --------
    pandas.plotting.parallel_coordinates : matplotlib version of this routine
    """
    t = np.linspace(-np.pi, np.pi, samples)
    vals = data.drop(class_column, axis=1).values.T

    curves = np.outer(vals[0], np.ones_like(t))
    for i in range(1, len(vals)):
        ft = ((i + 1) // 2) * t
        if i % 2 == 1:
            curves += np.outer(vals[i], np.sin(ft))
        else:
            curves += np.outer(vals[i], np.cos(ft))

    df = pd.DataFrame({
        't':
        np.tile(np.arange(samples), curves.shape[0]),
        'sample':
        np.repeat(np.arange(curves.shape[0]), curves.shape[1]),
        'value':
        curves.ravel(),
        class_column:
        np.repeat(data[class_column], samples)
    })

    labelled = ['x']
    options = {
        'Overlay':
        dict(legend_limit=5000),
        'Curve':
        dict(kwds,
             labelled=labelled,
             alpha=alpha,
             width=width,
             height=height,
             **kwds)
    }
    dataset = hv.Dataset(df)
    groups = dataset.to(hv.Curve, 't', 'value').overlay('sample').items()

    if cmap and colormap:
        raise TypeError("Only specify one of `cmap` and `colormap`.")
    cmap = cmap or colormap or cc.palette['glasbey_category10']
    colors = hv.plotting.util.process_cmap(cmap,
                                           categorical=True,
                                           ncolors=len(groups))

    return hv.Overlay([
        curve.relabel(k).options('Curve', color=c)
        for c, (k, v) in zip(colors, groups) for curve in v
    ]).options(options)
예제 #57
0
    def evaluateImg(self, imgId, catId, aRng, maxDet):
        '''
        perform evaluation for single category and image
        :return: dict (single image results)
        '''
        p = self.params
        if p.useCats:
            gt = self._gts[imgId,catId]
            dt = self._dts[imgId,catId]
        else:
            gt = [_ for cId in p.catIds for _ in self._gts[imgId,cId]]
            dt = [_ for cId in p.catIds for _ in self._dts[imgId,cId]]
        if len(gt) == 0 and len(dt) ==0:
            return None

        for g in gt:
            if g['ignore'] or (g['area']<aRng[0] or g['area']>aRng[1]):
                g['_ignore'] = 1
            else:
                g['_ignore'] = 0

        # sort dt highest score first, sort gt ignore last
        gtind = np.argsort([g['_ignore'] for g in gt], kind='mergesort')
        gt = [gt[i] for i in gtind]
        dtind = np.argsort([-d['score'] for d in dt], kind='mergesort')
        dt = [dt[i] for i in dtind[0:maxDet]]
        iscrowd = [int(o['iscrowd']) for o in gt]
        # load computed ious
        ious = self.ious[imgId, catId][:, gtind] if len(self.ious[imgId, catId]) > 0 else self.ious[imgId, catId]

        T = len(p.iouThrs)
        G = len(gt)
        D = len(dt)
        gtm  = np.zeros((T,G))
        dtm  = np.zeros((T,D))
        gtIg = np.array([g['_ignore'] for g in gt])
        dtIg = np.zeros((T,D))
        if not len(ious)==0:
            for tind, t in enumerate(p.iouThrs):
                for dind, d in enumerate(dt):
                    # information about best match so far (m=-1 -> unmatched)
                    iou = min([t,1-1e-10])
                    m   = -1
                    for gind, g in enumerate(gt):
                        # if this gt already matched, and not a crowd, continue
                        if gtm[tind,gind]>0 and not iscrowd[gind]:
                            continue
                        # if dt matched to reg gt, and on ignore gt, stop
                        if m>-1 and gtIg[m]==0 and gtIg[gind]==1:
                            break
                        # continue to next gt unless better match made
                        if ious[dind,gind] < iou:
                            continue
                        # if match successful and best so far, store appropriately
                        iou=ious[dind,gind]
                        m=gind
                    # if match made store id of match for both dt and gt
                    if m ==-1:
                        continue
                    dtIg[tind,dind] = gtIg[m]
                    dtm[tind,dind]  = gt[m]['id']
                    gtm[tind,m]     = d['id']
        # set unmatched detections outside of area range to ignore
        a = np.array([d['area']<aRng[0] or d['area']>aRng[1] for d in dt]).reshape((1, len(dt)))
        dtIg = np.logical_or(dtIg, np.logical_and(dtm==0, np.repeat(a,T,0)))
        # store results for given image and category
        return {
                'image_id':     imgId,
                'category_id':  catId,
                'aRng':         aRng,
                'maxDet':       maxDet,
                'dtIds':        [d['id'] for d in dt],
                'gtIds':        [g['id'] for g in gt],
                'dtMatches':    dtm,
                'gtMatches':    gtm,
                'dtScores':     [d['score'] for d in dt],
                'gtIgnore':     gtIg,
                'dtIgnore':     dtIg,
            }
예제 #58
0
    def __init__(self,
                 input_fname,
                 montage=None,
                 eog=None,
                 misc=None,
                 include=None,
                 exclude=None,
                 preload=False,
                 channel_naming='E%d',
                 verbose=None):
        """Init the RawMff class."""
        logger.info('Reading EGI MFF Header from %s...' % input_fname)
        egi_info = _read_header(input_fname)
        if eog is None:
            eog = []
        if misc is None:
            misc = np.where(
                np.array(egi_info['chan_type']) != 'eeg')[0].tolist()

        logger.info('    Reading events ...')
        egi_events, egi_info = _read_events(input_fname, egi_info)
        gains = _get_gains(op.join(input_fname, egi_info['info_fname']))
        if egi_info['value_range'] != 0 and egi_info['bits'] != 0:
            cals = [
                egi_info['value_range'] / 2**egi_info['bits']
                for i in range(len(egi_info['chan_type']))
            ]
        else:
            cal_scales = {'uV': 1e-6, 'V': 1}
            cals = [cal_scales[t] for t in egi_info['chan_unit']]
        if 'gcal' in gains:
            cals *= gains['gcal']
        if 'ical' in gains:
            pass  # XXX: currently not used
        logger.info('    Assembling measurement info ...')
        if egi_info['n_events'] > 0:
            event_codes = list(egi_info['event_codes'])
            if include is None:
                exclude_list = ['sync', 'TREV'] if exclude is None else exclude
                exclude_inds = [
                    i for i, k in enumerate(event_codes) if k in exclude_list
                ]
                more_excludes = []
                if exclude is None:
                    for ii, event in enumerate(egi_events):
                        if event.sum() <= 1 and event_codes[ii]:
                            more_excludes.append(ii)
                if len(exclude_inds) + len(more_excludes) == len(event_codes):
                    warn(
                        'Did not find any event code with more than one '
                        'event.', RuntimeWarning)
                else:
                    exclude_inds.extend(more_excludes)

                exclude_inds.sort()
                include_ = [
                    i for i in np.arange(egi_info['n_events'])
                    if i not in exclude_inds
                ]
                include_names = [
                    k for i, k in enumerate(event_codes) if i in include_
                ]
            else:
                include_ = [
                    i for i, k in enumerate(event_codes) if k in include
                ]
                include_names = include

            for kk, v in [('include', include_names), ('exclude', exclude)]:
                if isinstance(v, list):
                    for k in v:
                        if k not in event_codes:
                            raise ValueError('Could find event named "%s"' % k)
                elif v is not None:
                    raise ValueError('`%s` must be None or of type list' % kk)
            logger.info('    Synthesizing trigger channel "STI 014" ...')
            logger.info('    Excluding events {%s} ...' % ", ".join(
                [k for i, k in enumerate(event_codes) if i not in include_]))
            events_ids = np.arange(len(include_)) + 1
            self._new_trigger = _combine_triggers(egi_events[include_],
                                                  remapping=events_ids)
            self.event_id = dict(
                zip([e for e in event_codes if e in include_names],
                    events_ids))
            if self._new_trigger is not None:
                egi_events = np.vstack([egi_events, self._new_trigger])
        else:
            # No events
            self.event_id = None
            event_codes = []
        info = _empty_info(egi_info['sfreq'])
        my_time = datetime.datetime(egi_info['year'], egi_info['month'],
                                    egi_info['day'], egi_info['hour'],
                                    egi_info['minute'], egi_info['second'])
        my_timestamp = time.mktime(my_time.timetuple())
        info['meas_date'] = (my_timestamp, 0)
        ch_names = [
            channel_naming % (i + 1) for i in range(egi_info['n_channels'])
        ]
        ch_names.extend(list(egi_info['event_codes']))
        if hasattr(self, '_new_trigger') and self._new_trigger is not None:
            ch_names.append('STI 014')  # channel for combined events
        ch_coil = FIFF.FIFFV_COIL_EEG
        ch_kind = FIFF.FIFFV_EEG_CH
        cals = np.concatenate(
            [cals,
             np.repeat(1,
                       len(event_codes) + 1 + len(misc) + len(eog))])
        if 'pns_names' in egi_info:
            ch_names.extend(egi_info['pns_names'])
            cals = np.concatenate(
                [cals, np.repeat(1, len(egi_info['pns_names']))])
        chs = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, (), (), misc)
        chs = _read_locs(input_fname, chs, egi_info)
        sti_ch_idx = [
            i for i, name in enumerate(ch_names)
            if name.startswith('STI') or name in event_codes
        ]
        for idx in sti_ch_idx:
            chs[idx].update({
                'unit_mul': 0,
                'cal': cals[idx],
                'kind': FIFF.FIFFV_STIM_CH,
                'coil_type': FIFF.FIFFV_COIL_NONE,
                'unit': FIFF.FIFF_UNIT_NONE
            })
        if 'pns_names' in egi_info:
            for i_ch, ch_name in enumerate(egi_info['pns_names']):
                idx = ch_names.index(ch_name)
                ch_type = egi_info['pns_types'][i_ch]
                ch_kind = FIFF.FIFFV_BIO_CH
                if ch_type == 'ecg':
                    ch_kind = FIFF.FIFFV_ECG_CH
                elif ch_type == 'emg':
                    ch_kind = FIFF.FIFFV_EMG_CH
                ch_unit = FIFF.FIFF_UNIT_V
                ch_cal = 1e-6
                if egi_info['pns_units'][i_ch] != 'uV':
                    ch_unit = FIFF.FIFF_UNIT_NONE
                    ch_cal = 1.0

                chs[idx].update({
                    'cal': ch_cal,
                    'kind': ch_kind,
                    'coil_type': FIFF.FIFFV_COIL_NONE,
                    'unit': ch_unit
                })

        info['chs'] = chs
        info._update_redundant()
        _check_update_montage(info, montage)
        file_bin = op.join(input_fname, egi_info['eeg_fname'])
        egi_info['egi_events'] = egi_events

        if 'pns_names' in egi_info:
            egi_info['pns_filepath'] = op.join(input_fname,
                                               egi_info['pns_fname'])

        self._filenames = [file_bin]
        self._raw_extras = [egi_info]

        super(RawMff, self).__init__(info,
                                     preload=preload,
                                     orig_format='float',
                                     filenames=[file_bin],
                                     last_samps=[egi_info['n_samples'] - 1],
                                     raw_extras=[egi_info],
                                     verbose=verbose)
예제 #59
0
 def predict(self, X):
     if None in [self.most_frequent_class, self.n_classes]:
         raise ValueError
     y = np.zeros((1, self.n_classes))
     y[:, self.most_frequent_class] = 1
     return np.repeat(y, X.shape[0], axis=0)
예제 #60
0
def ap(S, maxits, convits, dampfact):
		n=S.shape[0]

		#Create empty Availability and Responsibility matrix and Exemplars list
		A=np.zeros((n, n))
		R=np.zeros((n, n))
		exemplars=[]
		count=0

		#start iterations
		for m in range(0, maxits):
		      # Compute responsibilities
			Rold = R
			AS = A + S
			Y= AS.max(1)
			I= AS.argmax(1)
			for i in range(n) :
				AS[i,I[i]] = -1000000
			Y2 = AS.max(1)
			I2 = AS.argmax(1)
			temp=np.repeat(Y, n).reshape(n, n)
			R = S - temp
			for i in range(n) :
				R[i,I[i]] = S[i,I[i]]-Y2[i]
			R = (1-dampfact)*R+dampfact*Rold


			# Compute availabilities
			Aold = A
			Rp = np.maximum(R,0)
			for i in range(n) :
				Rp[i,i] = R[i,i]
			temp2=np.ones((n,1))
			temp3=Rp.sum(0)
			A = np.kron(temp2, temp3)
			A= A-Rp
			diag = np.diag(A)
			A = np.minimum(A,0)
			for i in range(n) :
				A[i,i] = diag[i]
			A = (1-dampfact)*A + dampfact*Aold


			tempexemplars= []
			for i in range(0, n):
				if (R[i,i]+A[i,i])>0:
					tempexemplars.append(i)

			if(tempexemplars==exemplars):
				count=count+1
				if(count==convits):
					break
			else:
				count=0
				exemplars=list(tempexemplars)

		#Assigning datapoints to Exemplar
		assignment= np.zeros(n)

		for i in range(0,n):
			closest=0;
			currentbest=-1000000
			for j in range(0, len(exemplars)):
				if S[i,exemplars[j]]>currentbest:
					currentbest=S[i,exemplars[j]]
					closest=exemplars[j]
				if i==exemplars[j]:
					closest=exemplars[j]
					break
			assignment[i]=closest


		return assignment