예제 #1
0
def geomean(x,w=None,axis=None,NN=True):
    """Computes the geometric mean of the input array.
    
    Parameters:
        x : ndarray or ArrayOfErr
            The data which will be averaged.
        w : ndarray
            Optional.  The weights corresponding to each data point.  Must be
            same shape as x or broadcastable to that shape.
        axis : integer
            The axis over which the geometric mean is to be taken.  If none is
            given then the geometric mean will be taken over the entire array.
        NN : boolean
            If True (default) nan values in x will not be ignored and so nan
            will be returned if they are present.  If False, then nan values
            will be ignored in x and weights of nan will be treated as a weight
            of 0.
    Returns:
        result : float or ndarray
            The geometric mean of x.  If axis is None then a single float is
            returned.  Otherwise a ndarray containing the geometric means 
            evaluated along the axis is returned.
    """
    x,w = weighttest(x,w)
    if NN:
        result = numpy.product(x**w,axis=axis)**(1./numpy.sum(w,axis=axis))
    else:
        are_nan = numpy.isnan(x)
        x[are_nan] = 1
        result = numpy.product(x**w,axis=axis)**(1./numpy.nansum(w,axis=axis))
    return result
예제 #2
0
파일: utils.py 프로젝트: tacaswell/bluesky
def snake_cyclers(cyclers, snake_booleans):
    """
    Combine cyclers with a 'snaking' back-and-forth order.

    Parameters
    ----------
    cyclers : cycler.Cycler
        or any iterable that yields dictionaries of lists
    snake_booleans : list
        a list of the same length as cyclers indicating whether each cycler
        should 'snake' (True) or not (False). Note that the first boolean
        does not make a difference because the first (slowest) dimension
        does not repeat.

    Returns
    -------
    result : cycler
    """
    if len(cyclers) != len(snake_booleans):
        raise ValueError("number of cyclers does not match number of booleans")
    lengths = []
    new_cyclers = []
    for c in cyclers:
        lengths.append(len(c))
    total_length = np.product(lengths)
    for i, (c, snake) in enumerate(zip(cyclers, snake_booleans)):
        num_tiles = np.product(lengths[:i])
        num_repeats = np.product(lengths[i+1:])
        for k, v in c._transpose().items():
            if snake:
                v = v + v[::-1]
            v2 = np.tile(np.repeat(v, num_repeats), num_tiles)
            expanded = v2[:total_length]
            new_cyclers.append(cycler(k, expanded))
    return reduce(operator.add, new_cyclers)
예제 #3
0
파일: nn.py 프로젝트: MultiPath/ladder
    def fit(self, n_components, data):
        if len(data.shape) == 2:
            self.reshape = None
        else:
            assert n_components == np.product(data.shape[1:]), \
                'ZCA whitening components should be %d for convolutional data'\
                % np.product(data.shape[1:])
            self.reshape = data.shape[1:]

        data = self._flatten_data(data)
        assert len(data.shape) == 2
        n, m = data.shape
        self.mean = np.mean(data, axis=0)

        bias = self.filter_bias * scipy.sparse.identity(m, 'float32')
        cov = np.cov(data, rowvar=0, bias=1) + bias
        eigs, eigv = scipy.linalg.eigh(cov)

        assert not np.isnan(eigs).any()
        assert not np.isnan(eigv).any()
        assert eigs.min() > 0

        if self.n_components:
            eigs = eigs[-self.n_components:]
            eigv = eigv[:, -self.n_components:]

        sqrt_eigs = np.sqrt(eigs)
        self.P = np.dot(eigv * (1.0 / sqrt_eigs), eigv.T)
        assert not np.isnan(self.P).any()
        self.P_inv = np.dot(eigv * sqrt_eigs, eigv.T)

        self.P = np.float32(self.P)
        self.P_inv = np.float32(self.P_inv)

        self.is_fit = True
예제 #4
0
    def points_to_basis_dists(self, points):
        assert is_mat(points)
        assert is_float(points)
        (N, D) = points.shape
        assert D == self.grid.get_dim()

        G = self.grid

        # Get indices
        cell_coords = G.points_to_cell_coords(points)

        # Get rel distances
        rel_dist = G.points_to_low_vertex_rel_distance(points, cell_coords)
        assert (N, D) == rel_dist.shape

        # Get the vertices
        vertices = self.grid.cell_coords_to_vertex_indices(cell_coords)
        assert (N, 2 ** D) == vertices.shape

        # Calculate multilinear interp weights from distances
        weights = np.empty((N, 2 ** D))
        for (i, bin_vertex) in enumerate(itertools.product([0, 1], repeat=D)):
            vert_mask = np.array(bin_vertex, dtype=bool)
            weights[:, i] = np.product(rel_dist[:, vert_mask], axis=1) * np.product(
                1.0 - rel_dist[:, ~vert_mask], axis=1
            )

        point_dist = self.convert_to_sparse_matrix(cell_coords, vertices, weights)
        return point_dist
def fftconvolve(in1, in2, in3=None, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    copied from scipy, but here used to try out inverse filter
    doesn't work or I can't get it to work
    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1

    # Always use 2**n-sized FFT
    fsize = 2**np.ceil(np.log2(size))
    IN1 = fftn(in1,fsize)
    #IN1 *= fftn(in2,fsize)
    IN1 /= fftn(in2,fsize)  # use inverse filter
    # note the inverse is elementwise not matrix inverse
    # is this correct, NO  doesn't seem to work
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
예제 #6
0
def _validate(name, m, n, k, C, C_valid):
  """
  Validate by comparing the contents of C to the contents of C_valid
  The eq matrix should contain all 1s if the matrices are the same.
  """

  # Compare the matrices and flatten the results
  eq = C == C_valid
  eq.shape = (eq.shape[0] * eq.shape[1],)

  # Reduce the results: 1 is valid, 0 is not
  if numpy.product(eq) == 0:
    eq.shape = C.shape
    for i, row in enumerate(eq[:]):
      # or i == 3 or i == 4
      # (i == 0 or i == 1 or i == 2 ) and 
      if numpy.product(row) == 0:
        if True or (0 <= i <= 3): # (i == 99999):
          print 'row', i, 'failed'
          print C[i,:4]
          print C_valid[i,:4]
          print row#[:4]
      else:
        print 'row', i, 'succeeded'
    raise Exception("Algorithm '%s' failed validation for %d x %d x %d matrices" %
                    (name, m, k, n))
  print name, 'passed'
  return
    def get_dataset_slice(self, in_dataset_obj, dataset_info, in_data_idx, out_shape, inp_filename=""):
        """Copys dataset values from one dataset object to another, but only certain indexes along a
        specific dimension of the data"""

        # Determine how to extact data other than the splice dimension
        in_dataset_indexes = dataset_info.input_data_indexes(in_dataset_obj, in_data_idx)

        # Obtain selected data for copying into output dataset
        try:
            if len(in_dataset_indexes) == 1 and not isinstance(in_dataset_indexes[0], slice):
                in_data = in_dataset_obj[:][numpy.array(in_dataset_indexes[0])]
            else:
                in_data = in_dataset_obj[:][tuple(in_dataset_indexes)]
        except IOError as exc:
            raise IOError("Can not read dataset %s from file %s: %s" % (dataset_info.inp_name, inp_filename, exc))

        # Set sliced data into output dataset
        if numpy.product(in_data.shape) > numpy.product(out_shape):
            self.logger.warning("Dataset %s requires destructive resizing" % (dataset_info.out_name))
            self.logger.debug("At indexes %s resizing source data of shape %s to %s." % (in_data_idx, in_data.shape, out_shape)) 
            stored_data = numpy.resize(in_data, out_shape)
        else:
            stored_data = in_data.reshape(out_shape)

        return stored_data
예제 #8
0
파일: load.py 프로젝트: cossatot/halfspace
def half_fft_convolve(in1, in2, size, mode = 'full', return_type='real'):
    """
    Rewrite of fftconvolve from scipy.signal ((c) Travis Oliphant 1999-2002)
    to deal with fft convolution where one signal is not fft transformed
    and the other one is.  Application is, for example, in a loop where
    convolution happens repeatedly with different kernels over the same
    signal.  First input is not transformed, second input is.
    """
    s1 = np.array(in1.shape)
    s2 = size - s1 + 1
    complex_result = (np.issubdtype( in1.dtype, np.complex) or
                      np.issubdtype( in2.dtype, np.complex) )

    # Always use 2**n-sized FFT
    fsize = 2 **np.ceil( np.log2( size) )
    IN1 = fftn(in1, fsize)
    IN1 *= in2
    fslice = tuple( [slice( 0, int(sz)) for sz in size] )
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if return_type == 'real':
        ret = ret.real
    if mode == 'full':
        return ret
    elif mode == 'same':
        if np.product(s1, axis=0) > np.product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret, osize)
    elif mode == 'valid':
        return _centered(ret, abs(s2 - s1) + 1)
예제 #9
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1+s2-1
    IN1 = fftn(in1,size)
    IN1 *= fftn(in2,size)
    ret = ifftn(IN1)
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1,axis=0) > product(s2,axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret,osize)
    elif mode == "valid":
        return _centered(ret,abs(s2-s1)+1)
예제 #10
0
def apply_along_axis_with_idx(func1d,axis,arr,*args):
    """ Execute func1d(arr[i], i, *args) where func1d takes 1-D arrays
        and arr is an N-d array.  i varies so as to apply the function
        along the given axis for each 1-d subarray in arr.
    """
    arr = np.asarray(arr)
    nd = arr.ndim
    if axis < 0:
        axis += nd
    if (axis >= nd):
        raise ValueError("axis must be less than arr.ndim; axis=%d, rank=%d."
            % (axis,nd))
    ind = [0]*(nd-1)
    i = np.zeros(nd,'O')
    indlist = range(nd)
    indlist.remove(axis)
    i[axis] = slice(None,None)
    outshape = np.asarray(arr.shape).take(indlist)
    i.put(indlist, ind)
    res = func1d(arr[tuple(i.tolist())], tuple(i.tolist()), *args)
    #  if res is a number, then we have a smaller output array
    if isscalar(res):
        outarr = np.zeros(outshape,np.asarray(res).dtype)
        outarr[tuple(ind)] = res
        Ntot = np.product(outshape)
        k = 1
        while k < Ntot:
            # increment the index
            ind[-1] += 1
            n = -1
            while (ind[n] >= outshape[n]) and (n > (1-nd)):
                ind[n-1] += 1
                ind[n] = 0
                n -= 1
            i.put(indlist,ind)
            res = func1d(arr[tuple(i.tolist())], tuple(i.tolist()), *args)
            outarr[tuple(ind)] = res
            k += 1
        return outarr
    else:
        Ntot = np.product(outshape)
        holdshape = outshape
        outshape = list(arr.shape)
        outshape[axis] = len(res)
        outarr = np.zeros(outshape,np.asarray(res).dtype)
        outarr[tuple(i.tolist())] = res
        k = 1
        while k < Ntot:
            # increment the index
            ind[-1] += 1
            n = -1
            while (ind[n] >= holdshape[n]) and (n > (1-nd)):
                ind[n-1] += 1
                ind[n] = 0
                n -= 1
            i.put(indlist, ind)
            res = func1d(arr[tuple(i.tolist())], tuple(i.tolist()), *args)
            outarr[tuple(i.tolist())] = res
            k += 1
        return outarr
예제 #11
0
def maxprod(data, num):
    
    def diagdirection(largest):
        for i in xrange(-Len+num, Len-num+1):
            diag = data.diagonal(i)
            size = diag.size
            for j in xrange(size-num+1):
                dp = np.product(diag[j:j+num])
                if dp > largest:
                    largest = dp
        return largest
    
    largest = 0
    Len = data.shape[0]
    for i in xrange(Len):
        row = data[i,:]
        col = data[:,i]
        for j in xrange(Len-num+1):
            rp = np.product(row[j:j+num])
            cp = np.product(col[j:j+num])
            if rp > largest:
                largest = rp
            if cp > largest:
                largest = cp
    
    largest = diagdirection(largest)
    data = data[:,::-1]
    largest = diagdirection(largest)
    
    return largest
예제 #12
0
def main():

    numberString = "08 02 22 97 38 15 00 40 00 75 04 05 07 78 52 12 50 77 91 08\
 49 49 99 40 17 81 18 57 60 87 17 40 98 43 69 48 04 56 62 00\
 81 49 31 73 55 79 14 29 93 71 40 67 53 88 30 03 49 13 36 65\
 52 70 95 23 04 60 11 42 69 24 68 56 01 32 56 71 37 02 36 91\
 22 31 16 71 51 67 63 89 41 92 36 54 22 40 40 28 66 33 13 80\
 24 47 32 60 99 03 45 02 44 75 33 53 78 36 84 20 35 17 12 50\
 32 98 81 28 64 23 67 10 26 38 40 67 59 54 70 66 18 38 64 70\
 67 26 20 68 02 62 12 20 95 63 94 39 63 08 40 91 66 49 94 21\
 24 55 58 05 66 73 99 26 97 17 78 78 96 83 14 88 34 89 63 72\
 21 36 23 09 75 00 76 44 20 45 35 14 00 61 33 97 34 31 33 95\
 78 17 53 28 22 75 31 67 15 94 03 80 04 62 16 14 09 53 56 92\
 16 39 05 42 96 35 31 47 55 58 88 24 00 17 54 24 36 29 85 57\
 86 56 00 48 35 71 89 07 05 44 44 37 44 60 21 58 51 54 17 58\
 19 80 81 68 05 94 47 69 28 73 92 13 86 52 17 77 04 89 55 40\
 04 52 08 83 97 35 99 16 07 97 57 32 16 26 26 79 33 27 98 66\
 88 36 68 87 57 62 20 72 03 46 33 67 46 55 12 32 63 93 53 69\
 04 42 16 73 38 25 39 11 24 94 72 18 08 46 29 32 40 62 76 36\
 20 69 36 41 72 30 23 88 34 62 99 69 82 67 59 85 74 04 36 16\
 20 73 35 29 78 31 90 01 74 31 49 71 48 86 81 16 23 57 05 54\
 01 70 54 71 83 51 54 69 16 92 33 48 61 43 52 01 89 19 67 48"

    array = np.array(map(int, numberString.split(' '))).reshape(20, 20)
    maxProduct = 0

    for i in xrange(array.shape[0]):
        for j in xrange(array.shape[1]):

            try:
                if np.product(array[i:i+4, j]) > maxProduct:
                    maxProduct = np.product(array[i:i+4, j])
            except IndexError:
                pass

            try:
                if np.product(array[i, j:j+4]) > maxProduct:
                    maxProduct = np.product(array[i, j:j+4])
            except IndexError:
                pass

            try:
                product = 1
                for k in range(4):
                    product *= array[i+k, j+k]
                if product > maxProduct:
                    maxProduct = product
            except IndexError:
                pass

            try:
                product = 1
                for k in range(4):
                    product *= array[i+k, j-k]
                if product > maxProduct:
                    maxProduct = product
            except IndexError:
                pass

    print maxProduct
예제 #13
0
파일: nplm.py 프로젝트: SigmaQuan/dl4nlp
    def predict(self, context):
        if self.dictionary is None or self.parameters is None:
            print('Train before predict!')
            return
        context = context[-self.context_size:]
        input = []
        for word in context:
            if word in self.dictionary:
                input.append(self.dictionary[word])
            else:
                input.append(0)
        W_size = np.product(self.W_shape)
        U_size = np.product(self.U_shape)
        H_size = np.product(self.H_shape)
        split_indices = [W_size, W_size + U_size, W_size + U_size + H_size]
        W, U, H, C = np.split(self.parameters, split_indices)
        W = W.reshape(self.W_shape)
        U = U.reshape(self.U_shape)
        H = H.reshape(self.H_shape)
        C = C.reshape(self.C_shape)

        x = np.concatenate([C[input[i]] for i in range(self.context_size)])
        x = np.append(x, 1.)    # Append bias term
        x = x.reshape(-1, 1)
        y = W.dot(x) + U.dot(np.tanh(H.dot(x)))

        # You don't want to predict unknown words (index 0)
        prediction = np.argmax(y[1:]) + 1
        return self.reverse_dictionary[prediction]
예제 #14
0
 def fit(self, n_components, data):
     if len(data.shape) == 2:
         self.reshape = None
     else:
         assert n_components == np.product(data.shape[1:]), \
             'ZCA whitening components should be %d for convolutional data'\
             % np.product(data.shape[1:])
         self.reshape = data.shape[1:]
     data = self._flatten_data(data)
     assert len(data.shape) == 2
     n, m = data.shape
     self.mean = np.mean(data, axis=0)
     bias_filter = self.filter_bias * np.identity(m, 'float64')
     cov = np.cov(data, rowvar=0, bias=1) + bias_filter
     eigs, eigv = np.linalg.eig(cov.astype(np.float64))
     assert not np.isnan(eigs).any()
     assert not np.isnan(eigv).any()
     print 'eigenvals larger than bias', np.sum(eigs > 0.1)/3072.
     print 'min eigenval: ', eigs.min(), 'max eigenval: ', eigs.max()
     assert eigs.min() > 0
     if self.n_components:
         eigs = eigs[-self.n_components:]
         eigv = eigv[:, -self.n_components:]
     sqrt_eigs = np.sqrt(eigs)
     self.P = np.dot(eigv * (1.0 / sqrt_eigs), eigv.T)
     assert not np.isnan(self.P).any()
     self.P_inv = np.dot(eigv * sqrt_eigs, eigv.T)
     self.P = np.float32(self.P)
     self.P_inv = np.float32(self.P_inv)
     self.is_fit = True
예제 #15
0
def weighted_variance(image, mask, binary_image):
    """Compute the log-transformed variance of foreground and background
    
    image - intensity image used for thresholding
    
    mask - mask of ignored pixels
    
    binary_image - binary image marking foreground and background
    """
    if not np.any(mask):
        return 0
    #
    # Clamp the dynamic range of the foreground
    #
    minval = np.max(image[mask])/256
    if minval == 0:
        return 0
    
    fg = np.log2(np.maximum(image[binary_image & mask], minval))
    bg = np.log2(np.maximum(image[(~ binary_image) & mask], minval))
    nfg = np.product(fg.shape)
    nbg = np.product(bg.shape)
    if nfg == 0:
        return np.var(bg)
    elif nbg == 0:
        return np.var(fg)
    else:
        return (np.var(fg) * nfg + np.var(bg)*nbg) / (nfg+nbg)
def dirichlet(mu, alpha):
    mu = np.array(mu)
    alpha = np.array(alpha)
    product = np.product(mu ** (alpha - 1))
    normaliser = gamma(alpha.sum())/np.product(gamma(alpha))
    result = product * normaliser
    return result
예제 #17
0
def fftconvolve(in1, in2, mode="full"):
    """Convolve two N-dimensional arrays using FFT. See convolve.

    """
    s1 = array(in1.shape)
    s2 = array(in2.shape)
    complex_result = (np.issubdtype(in1.dtype, np.complex) or
                      np.issubdtype(in2.dtype, np.complex))
    size = s1 + s2 - 1

    # Always use 2**n-sized FFT
    fsize = 2 ** np.ceil(np.log2(size))
    IN1 = fftn(in1, fsize)
    IN1 *= fftn(in2, fsize)
    fslice = tuple([slice(0, int(sz)) for sz in size])
    ret = ifftn(IN1)[fslice].copy()
    del IN1
    if not complex_result:
        ret = ret.real
    if mode == "full":
        return ret
    elif mode == "same":
        if product(s1, axis=0) > product(s2, axis=0):
            osize = s1
        else:
            osize = s2
        return _centered(ret, osize)
    elif mode == "valid":
        return _centered(ret, abs(s2 - s1) + 1)
예제 #18
0
def power_spectrum_nd(input_array, box_dims=None):
	''' 
	Calculate the power spectrum of input_array and return it as an n-dimensional array,
	where n is the number of dimensions in input_array
	box_side is the size of the box in comoving Mpc. If this is set to None (default),
	the internal box size is used
	
	Parameters:
		* input_array (numpy array): the array to calculate the 
			power spectrum of. Can be of any dimensions.
		* box_dims = None (float or array-like): the dimensions of the 
			box. If this is None, the current box volume is used along all
			dimensions. If it is a float, this is taken as the box length
			along all dimensions. If it is an array-like, the elements are
			taken as the box length along each axis.
	
	Returns:
		The power spectrum in the same dimensions as the input array.		
	'''

	box_dims = _get_dims(box_dims, input_array.shape)

	print_msg( 'Calculating power spectrum...')
	ft = fftpack.fftshift(fftpack.fftn(input_array.astype('float64')))
	power_spectrum = np.abs(ft)**2
	print_msg( '...done')

	# scale
	boxvol = np.product(map(float,box_dims))
	pixelsize = boxvol/(np.product(input_array.shape))
	power_spectrum *= pixelsize**2/boxvol
	
	return power_spectrum
예제 #19
0
파일: model.py 프로젝트: Naereen/nipy
def estimate_pooled_covariance(resid, ARtarget=[0.3], mask=None):
    """
    Use SPM's REML implementation to estimate a pooled covariance matrix.
    
    Thresholds an F statistic at a marginal pvalue to estimate
    covariance matrix.

    """
    resid 
    n = resid[:].shape[0]
    components = correlation.ARcomponents(ARtarget, n)

    raw_sigma = 0
    nvox = 0
    for i in range(resid.shape[1]):
        d = np.asarray(resid[:,i])
        d.shape = (d.shape[0], np.product(d.shape[1:]))
        keep = np.asarray(mask[i])
        keep.shape = np.product(keep.shape)
        d = d.compress(keep, axis=1)
        raw_sigma += np.dot(d, d.T)
        nvox += d.shape[1]
    raw_sigma /= nvox
    C, h, _ = reml.reml(raw_sigma,
                        components,
                        n=nvox)
    return C
예제 #20
0
파일: attrs.py 프로젝트: CaptainAL/Spyder
    def modify(self, name, value):
        """ Change the value of an attribute while preserving its type.

        Differs from __setitem__ in that if the attribute already exists, its
        type is preserved.  This can be very useful for interacting with
        externally generated files.

        If the attribute doesn't exist, it will be automatically created.
        """
        with phil:
            if not name in self:
                self[name] = value
            else:
                value = numpy.asarray(value, order='C')

                attr = h5a.open(self._id, self._e(name))

                if attr.get_space().get_simple_extent_type() == h5s.NULL:
                    raise IOError("Empty attributes can't be modified")

                # Allow the case of () <-> (1,)
                if (value.shape != attr.shape) and not \
                   (numpy.product(value.shape) == 1 and numpy.product(attr.shape) == 1):
                    raise TypeError("Shape of data is incompatible with existing attribute")
                attr.write(value)
예제 #21
0
파일: tools.py 프로젝트: neishm/pygeode
def partial_nan_sum (arr, sl, bigout, bigcount, iaxis, outmap):
# {{{
  import numpy as np

#  out = np.zeros(arr.shape[:iaxis] + (bigout.shape[iaxis],) + arr.shape[iaxis+1:], dtype=bigout.dtype)
  out = np.zeros(arr.shape[:iaxis] + (bigout.shape[iaxis],) + arr.shape[iaxis+1:], dtype=arr.dtype)
  count = np.zeros(arr.shape[:iaxis] + (bigcount.shape[iaxis],) + arr.shape[iaxis+1:], dtype='int32')


  assert arr.ndim == out.ndim
#  assert arr.shape[:iaxis] == out.shape[:iaxis]
#  assert arr.shape[iaxis+1:] == out.shape[iaxis+1:]
  assert len(outmap) == arr.shape[iaxis]
#  uoutmap = np.unique(outmap)
  #assert len(uoutmap) == out.shape[iaxis], "%d != %d"%(len(uoutmap),out.shape[iaxis])
  assert count.shape == out.shape
  assert outmap.min() >= 0
  assert outmap.max() < out.shape[iaxis]

#  assert arr.dtype.name == out.dtype.name  # handled in new definition of out??
  assert count.dtype.name == outmap.dtype.name == 'int32', '? %s %s'%(count.dtype,outmap.dtype)
  nx = int(np.product(arr.shape[:iaxis]))
  nin = arr.shape[iaxis]
  nout = out.shape[iaxis]
  ny = int(np.product(arr.shape[iaxis+1:]))
  func = getattr(libmisc,'partial_nan_sum_'+arr.dtype.name)
  func (nx, nin, nout, ny, arr, out, count, outmap)

  bigout[sl] += out
  bigcount[sl] += count
예제 #22
0
def plot_hist_rmsRho_Levs012(rho_L0, rho_L1, rho_L2, tSim, fname):
    ''''''
    import pylab as py
    #make 6 hists
    rms = rho_L0
    lab = r"$\rho$, Lev 0"
    rms1d = np.reshape(rms, np.product(rms.shape))
    logrms= np.log10(rms1d)
    cnt,bins,patches = py.hist(logrms, bins=100, color="blue", alpha=0.5, label=lab)
    rms = rho_L1
    lab = r"$\rho$, Lev 1"
    rms1d = np.reshape(rms, np.product(rms.shape))
    logrms= np.log10(rms1d)
    cnt,bins,patches = py.hist(logrms, bins=100, color="red", alpha=0.5, label=lab)
    rms = rho_L2
    lab = r"$\rho$, Lev 2"
    rms1d = np.reshape(rms, np.product(rms.shape))
    logrms= np.log10(rms1d)
    #plot quantities
    Tratio = tSim / ic.tCr
    #plot
    cnt,bins,patches = py.hist(logrms, bins=100, color="green", alpha=0.5,label=lab)
    py.vlines(np.log10( ic.rho0 ), 0, cnt.max(), colors="black", linestyles='dashed',label=r"$\rho_{0}$ = %2.2g [g/cm^3]" % ic.rho0)
    #py.xlim([-13.,-9.])
    py.xlabel("Log10 Density [g/cm^3]")
    py.ylabel("count")
    py.title(r"$T/T_{\rmCross}$ = %g" % Tratio)
    py.legend(loc=0, fontsize="small")
    py.savefig(fname,format="pdf")
    py.close()
예제 #23
0
def SceneTrain(NumSub, ImagPerClassToRead, DataPath, TrainImgs, ResizeAmt):
	# Read Images and Generate Image Features XNow
	for i in range(0,NumSub):
		for j in range(0, ImagPerClassToRead):
			InitAll.tic()
			#print DataPath+str(i+1)+'frame'+str(TrainImgs[i][j])+'.png'
			XNow = cv2.imread(DataPath+str(i+1)+'frame'+str(TrainImgs[i][j])+'.png',0)
			XNow = cv2.resize(XNow, ResizeAmt, interpolation = cv2.INTER_CUBIC)
			XNow = InitAll.ComputeGIST(XNow)
			#print("Sub " + str(i+1) + " Image " + str(j+1))
			if(i==0 and j==0):
				X = np.reshape(XNow, (1,np.product(XNow.shape)))
			else:
				X = np.vstack((X,np.reshape(XNow, (1,np.product(XNow.shape)))))
			InitAll.toc()
		print "Subject " + str(i+1) + " done...."

	# Now Generate Class Labels Y
	# Class labels start from 1 and not 0
	Y = [i for i in range(1,NumSub+1)]*ImagPerClassToRead
	Y = list(np.sort(Y))

	SVMModel = svm.SVC()
	SVMModel.fit(X, Y)
	# Saving the objects:
	with open('SceneTrainedSVMModel'+strftime("%Y-%m-%d %H:%M:%S", gmtime())+'.pickle', 'w') as f:
		pickle.dump([X, Y, SVMModel], f)
	return SVMModel
예제 #24
0
파일: threshold.py 프로젝트: jjdmol/LOFAR
    def get_srcp(self, img):
        import sourcecounts as sc
        fwsig = const.fwsig
        cutoff = 5.0
        spin = -0.80
        freq = img.frequency
        bm = (img.beam[0], img.beam[1])
        cdelt = img.wcs_obj.acdelt[:2]
        x = 2.0*pi*N.product(bm)/abs(N.product(cdelt))/(fwsig*fwsig)*img.omega

        smin_L = img.clipped_rms*cutoff*((1.4e9/freq)**spin)
        scflux = sc.s
        scnum = sc.n
        index = 0
        for i,s in enumerate(scflux):
            if s < smin_L:
                index = i
                break
        n1 = scnum[index]; n2 = scnum[-1]
        s1 = scflux[index]; s2 = scflux[-1]
        alpha = 1.0-log(n1/n2)/log(s1/s2)
        A = (alpha-1.0)*n1/(s1**(1.0-alpha))
        source_p = x*A*((cutoff*img.clipped_rms)**(1.0-alpha)) \
                     /((1.0-alpha)*(1.0-alpha))

        return source_p
예제 #25
0
    def execute(self, solver, stream=None):
        slvr = solver

        # The gaussian shape array can be empty if
        # no gaussian sources were specified.
        gauss = np.intp(0) if np.product(slvr.gauss_shape.shape) == 0 \
            else slvr.gauss_shape

        sersic = np.intp(0) if np.product(slvr.sersic_shape.shape) == 0 \
            else slvr.sersic_shape

        self.kernel(slvr.uvw, slvr.brightness, gauss, sersic,
            slvr.wavelength, slvr.antenna1, slvr.antenna2,
            slvr.jones_scalar,
            slvr.flag, slvr.weight_vector,
            slvr.model_vis, slvr.observed_vis, slvr.chi_sqrd_result,
            **self.get_kernel_params(slvr))

        # Call the pycuda reduction kernel.
        # Divide by the single sigma squared value if a weight vector
        # is not required. Otherwise the kernel will incorporate the
        # individual sigma squared values into the sum
        gpu_sum = gpuarray.sum(slvr.chi_sqrd_result).get()

        if not self.weight_vector:
            slvr.set_X2(gpu_sum/slvr.sigma_sqrd)
        else:
            slvr.set_X2(gpu_sum)
예제 #26
0
    def check_rmsmap(self, img, rms):
        """Calculates the statistics of the rms map and decides, when
        rms_map=None, whether to take the map (if variance
        is significant) or a constant value
        """
    	from math import sqrt

        mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkrms  ")
        cdelt = img.wcs_obj.acdelt[:2]
    	bm = (img.beam[0], img.beam[1])
    	fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
    	if img.masked:
    	    unmasked = N.where(~img.mask_arr)
            stdsub = N.std(rms[unmasked])
            maxrms = N.max(rms[unmasked])
        else:
            stdsub = N.std(rms)
            maxrms = N.max(rms)

    	rms_expect = img.clipped_rms/sqrt(2)/img.rms_box[0]*fw_pix
        mylog.debug('%s %10.6f %s' % ('Standard deviation of rms image = ', stdsub*1000.0, 'mJy'))
        mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))
    	if stdsub > 1.1*rms_expect:
            img.use_rms_map = True
            mylogger.userinfo(mylog, 'Variation in rms image significant')
        else:
            img.use_rms_map = False
            mylogger.userinfo(mylog, 'Variation in rms image not significant')

        return img
예제 #27
0
    def check_meanmap(self, img, mean):
        """Calculates the statistics of the mean map and decides, when
        mean_map=None, whether to take the map (if variance
        is significant) or a constant value
        """
    	from math import sqrt

        mylog = mylogger.logging.getLogger("PyBDSM."+img.log+"Rmsimage.Checkmean ")
        cdelt = img.wcs_obj.acdelt[:2]
        bm = (img.beam[0], img.beam[1])
        fw_pix = sqrt(N.product(bm)/abs(N.product(cdelt)))
    	if img.masked:
            unmasked = N.where(~img.mask_arr)
            stdsub = N.std(mean[unmasked])
            maxmean = N.max(mean[unmasked])
        else:
            stdsub = N.std(mean)
            maxmean = N.max(mean)
        rms_expect = img.clipped_rms/img.rms_box[0]*fw_pix
        mylog.debug('%s %10.6f %s' % ('Standard deviation of mean image = ', stdsub*1000.0, 'mJy'))
        mylog.debug('%s %10.6f %s' % ('Expected standard deviation = ', rms_expect*1000.0, 'mJy'))

        # For mean map, use a higher threshold than for the rms map, as radio images
        # should rarely, if ever, have significant variations in the mean
        if stdsub > 5.0*rms_expect:
          img.mean_map_type = 'map'
          mylogger.userinfo(mylog, 'Variation in mean image significant')
        else:
          if img.confused:
            img.mean_map_type = 'zero'
          else:
            img.mean_map_type = 'const'
          mylogger.userinfo(mylog, 'Variation in mean image not significant')

        return img
예제 #28
0
파일: supercell.py 프로젝트: bs324/tmoxides
   def _MakeUnitCellPhasesForT_Restricted(self, ijkRow, iOpTs):
      # <r + T|op| s> = <r + T + S|op|s + S> for any unit-cell translation S,T
      # we know the op is only non-zero for a limited range of T, which we
      # have given via iOpTs.
      #
      # we have given the ijkRow (lhs T), and are now looking for the ijkCol
      # (right T) for which <r+Tr| op |r+Tc> is non-zero. That means that
      # Tr-Tc must lie within the Tc supplied in iOpTs.

      # FIXME: do this properly.
      if 0:
         I = []
         PF = []
         for ijkOp in (self.iTs[o] for o in iOpTs):
            for iTCol,ijkCol in enumerate(self.iTs):
               if np.all((ijkCol - ijkRow) % self.Size == ijkOp):
                  I.append(iTCol)
                  PF.append(np.product(self.PhaseShift**((ijkCol - ijkRow) / self.Size)))
         return np.array(PF), np.array(I)
      else:
         # well... that's not properly, but atm it's not the main problem in this form.
         I = []
         PF = []
         for ijkOp in (self.iTs[o] for o in iOpTs):
            ijkCol = ijkOp + ijkRow
            PF.append(np.product(self.PhaseShift**((ijkCol) / self.Size)))
            I.append(self.Fixme_iTsLookup[tuple(ijkCol % self.Size)])
         return np.array(PF), np.array(I)
예제 #29
0
def df_fromdict(data, repeat=1):
    """
    Produces a factorial DataFrame from a dict or list of tuples.

    For example, suppose you want to generate a DataFrame like this::

           a    b
        0  one  0
        1  one  1
        2  two  0
        3  two  1

    This function generates such output simply by providing the following:
    df_fromdict([('a', ['one', 'two']), ('b', [0, 1])])

    :Args:
        data: dict or a list of tuples
            Data used to produce a DataFrame. Keys specify column names, and
            values specify possible (unique) values.
    :Kwargs:
        repeat: int (default: 1)
            How many times everything should be repeated. Useful if you want to
            simulate multiple samples of each condition, for example.
    :Returns:
        pandas.DataFrame with data.items() column names
    """
    data = OrderedDict(data)
    count = map(len, data.values())
    df = {}
    for i, (key, vals) in enumerate(data.items()):
        rep = np.repeat(vals, np.product(count[i+1:]))
        tile = np.tile(rep, np.product(count[:i]))
        df[key] = np.repeat(tile, repeat)
    df = pandas.DataFrame(df, columns=data.keys())
    return df
예제 #30
0
def weighted_variance(image,mask,threshold):
    """Compute the log-transformed variance of foreground and background"""
    if not np.any(mask):
        return 0
    #
    # Clamp the dynamic range of the foreground
    #
    minval = np.max(image[mask])/256
    if minval == 0:
        return 0
    clamped_image = image[mask]
    clamped_image[clamped_image < minval] = minval
    
    if isinstance(threshold,np.ndarray):
        threshold = threshold[mask]
    fg = np.log2(clamped_image[clamped_image >=threshold])
    bg = np.log2(clamped_image[clamped_image < threshold])
    nfg = np.product(fg.shape)
    nbg = np.product(bg.shape)
    if nfg == 0:
        return np.var(bg)
    elif nbg == 0:
        return np.var(fg)
    else:
        return (np.var(fg) * nfg + np.var(bg)*nbg) / (nfg+nbg)
예제 #31
0
def boxSize(thisBox):
    """
    ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAAIAQC/RPJH+HUB5ZcSOv61j5AKWsnP6pwitgIsRHKQ5PxlrinTbKATjUDSLFLIs/cZxRb6Op+aRbssiZxfAHauAfpqoDOne5CP7WGcZIF5o5o+zYsJ1NzDUWoPQmil1ZnDCVhjlEB8ufxHaa/AFuFK0F12FlJOkgVT+abIKZ19eHi4C+Dck796/ON8DO8B20RPaUfetkCtNPHeb5ODU5E5vvbVaCyquaWI3u/uakYIx/OZ5aHTRoiRH6I+eAXxF1molVZLr2aCKGVrfoYPm3K1CzdcYAQKQCqMp7nLkasGJCTg1QFikC76G2uJ9QLJn4TPu3BNgCGwHj3/JkpKMgUpvS6IjNOSADYd5VXtdOS2xH2bfpiuWnkBwLi9PLWNyQR2mUtuveM2yHbuP13HsDM+a2w2uQwbZgHC2QVUE6QuSQITwY8RkReMKBJwg6ob2heIX+2JQUniF8GKRD7rYiSm7dJrYhQUBSt4T7zN4M5EDg5N5wAiT5hLumVqpAkU4JeJo5JopIohEBW/SknViyiXPqBfrsARC9onKSLp5hJMG1FAACezPAX8ByTOXh4r7rO0UPbZ1mqX1P6hMEkqb/Ut9iEr7fR/hX7WD1fpcOBbwksBidjs2rzwurVERQ0EQfjfw1di1uPR/yzLVfZ+FR2WfL+0FJX/sCrfhPU00y5Q4Te8XqrJwqkbVMZ8fuSBk+wQA5DZRNJJh9pmdoDBi/hNfvcgp9m1D7Z7bUbp2P5cQTgay+Af0P7I5+myCscLXefKSxXJHqRgvEDv/zWiNgqT9zdR3GoYVHR/cZ5XpZhyMpUIsFfDoWfAmHVxZNXF0lKzCEH4QXcfZJgfiPkyoubs9UDI7cC/v9ToCg+2SkvxBERAqlU4UkuOEkenRnP8UFejAuV535eE3RQbddnj9LmLT+Y/yRUuaB2pHmcQ2niT1eu6seXHDI1vyTioPCGSBxuJOciCcJBKDpKBOEdMb1nDGH1j+XpUGPtdEWd2IisgWsWPt3OPnnbEE+ZCRwcC3rPdyQWCpvndXCCX4+5dEfquFTMeU9LOnOiB1uZbnUez4AuicESbzR522iZZ+JdBk3bWyah2X8LW2QKP0YfZNAyOIufW4xSUCBljyIr9Z1/KhBFSMP2yibWDnOwQcK91Vh76AqmvaviTbZn9BrhzgndaODtWAyXtrWZX2iwo3lMpcx8qh3V9YeRB7sOYQVbtGhgDlY2jYv8fPWWaYGrNVvRm+vWUiSKdBgLR5mF0B/r7gC3FERNVecEHE1sMHIZmbd77QnGP9qlv/pP9x1RMHZVsvpSuAufaf6vqXQa5VwKEAt6CQwy7SpfTpBIcvH2qbSfVqPVewZ7ISg7UU+BvKZR5bwzTZSaLC2P4oPPAXeLCDDlC7+OFk3bJ/4Bq6v3NoqYh5d6o4C2lARUTYrwspWHrOTnd/4Osf3/YStqJ+CqdOxmu0xiX8bH+EJek5prI86iGYAJHttMFZcfXK+AJ2SOAJ0YIiV0YgQaeVc75KkNsRE6+mYjE1HZXKi6+wyHLSoJTGUv1WEpUdbGYJO32LVCGwDtG1qcSyVOgieHEwqB5W1qlZeoKLPUHWmziD09ojEsZurRtUKrvSGX/pwrKpDX2U229hJWXrTp13ZNHDdsLz+Brb8ZyGUb/o1aydw7O3ERvmB8drOeUP6PGgCkI26VjKIIEqXfTf8ciG1mssVcQolxNQT/ZZjo4JbhBpX+x6umLz3VDlOJNDnCXAK/+mmstw901weMrcK1cZwxM8GY2VGUErV3dG16h7CqRJpTLn0GxDkxaEiMItcPauV0g10VWNziTaP/wU3SOY5jV0z2WbmcZCLP40IaXXPL67qE3q1x/a18geSFKIM8vIHG8xNlllfJ60THP9X/Kj8GDpQIBvsaSiGh8z3XpxyuwbQIt/tND+i2FndrM0pBSqP8U3n7EzJfbYwEzqU9fJazWFoT4Lpv/mENaFGFe3pgUBv/qIoGqv2/G5u0RqdtToUA6gR9bIdiQpK3ZSNRMM2WG/rYs1c6FDP8ZGKBh+vzfA1zVEOKmJsunG0RU9yinFhotMlix14KhZMM6URZpDGN+zZ9lWMs6UMbfAwHMM+2MqTo6Se7var7uY5GDNXxQ9TTfDAWQw7ZAyzb0UR8kzQmeKrFbcPQ7uaIqV+HC4hj8COCqb/50xy6ZMwKVccw0mhVSt1NXZgoa6mx6cx251G9crWvxfPpvuYLH2NqnceoeADP8hTiia6N6iN3e4kBzDXHIrsgI6NFd6qW9p9HrFnDmHdakv3qfCJSY8acYdEe9ukRXvheyKGtvqmbMnS2RNDLcMwSQo9aypSPNpHMEXtvVp+vIuiWCR1fjgz8uY1f1Pa0SETX9jrLXfqq1zGeQTmFPR1/ANUbEz25nFIkwSUTr5YduvbFIruZ5cW8CySfKyiun+KclIwKhZVbHXcALjAOc//45HV0gdJfEEnhbUkQ+asWdf3Guyo6Eqd8g40X6XsJiFY5ah7Mc4IacNBzp3cHU3f0ODVjP9xTMMH+cNxq9IYvvhlVp38e8GydYCGoQ79jvKWHLbtsF+Z1j98o7xAxdBRKnCblSOE4anny07LCgm3U18Qft0HFEpIFATnLb3Yfjsjw1sE8Rdj9FBFApVvA3SvjGafvq5b7J9QnTWy80TjwL5zrix6vwxxClT/zjDNX+3PPXVr1FMF+Rhel58tJ8pMQ3TrzC1961GAp5eiYA1zGSyDPz+w== abc@defg
    """
    requires(isProperBox(thisBox))
    return np.product(thisBox[:, 1] - thisBox[:, 0])
예제 #32
0
def GatherArray(data, comm, root=0):
    """
    Gather the input data array from all ranks to the specified ``root``.

    This uses `Gatherv`, which avoids mpi4py pickling, and also
    avoids the 2 GB mpi4py limit for bytes using a custom datatype

    Parameters
    ----------
    data : array_like
        the data on each rank to gather
    comm : MPI communicator
        the MPI communicator
    root : int
        the rank number to gather the data to

    Returns
    -------
    recvbuffer : array_like, None
        the gathered data on root, and `None` otherwise
    """
    if not isinstance(data, numpy.ndarray):
        raise ValueError("`data` must by numpy array in GatherArray")

    # need C-contiguous order
    if not data.flags['C_CONTIGUOUS']:
        data = numpy.ascontiguousarray(data)
    local_length = data.shape[0]

    # check dtypes and shapes
    shapes = comm.gather(data.shape, root=root)
    dtypes = comm.allgather(data.dtype)

    # check for structured data
    if dtypes[0].char == 'V':

        # check for structured data mismatch
        names = set(dtypes[0].names)
        if any(set(dt.names) != names for dt in dtypes[1:]):
            raise ValueError(
                "mismatch between data type fields in structured data")

        # check for 'O' data types
        if any(dtypes[0][name] == 'O' for name in dtypes[0].names):
            raise ValueError(
                "object data types ('O') not allowed in structured data in GatherArray"
            )

        # compute the new shape for each rank
        newlength = comm.allreduce(local_length)
        newshape = list(data.shape)
        newshape[0] = newlength

        # the return array
        if comm.rank == root:
            recvbuffer = numpy.empty(newshape, dtype=dtypes[0], order='C')
        else:
            recvbuffer = None

        for name in dtypes[0].names:
            d = GatherArray(data[name], comm, root=root)
            if comm.rank == 0:
                recvbuffer[name] = d

        return recvbuffer

    # check for 'O' data types
    if dtypes[0] == 'O':
        raise ValueError(
            "object data types ('O') not allowed in structured data in GatherArray"
        )

    # check for bad dtypes and bad shapes
    if comm.rank == root:
        bad_shape = any(s[1:] != shapes[0][1:] for s in shapes[1:])
        bad_dtype = any(dt != dtypes[0] for dt in dtypes[1:])
    else:
        bad_shape = None
        bad_dtype = None

    bad_shape, bad_dtype = comm.bcast((bad_shape, bad_dtype))
    if bad_shape:
        raise ValueError(
            "mismatch between shape[1:] across ranks in GatherArray")
    if bad_dtype:
        raise ValueError("mismatch between dtypes across ranks in GatherArray")

    shape = data.shape
    dtype = data.dtype

    # setup the custom dtype
    duplicity = numpy.product(numpy.array(shape[1:], 'intp'))
    itemsize = duplicity * dtype.itemsize
    dt = MPI.BYTE.Create_contiguous(itemsize)
    dt.Commit()

    # compute the new shape for each rank
    newlength = comm.allreduce(local_length)
    newshape = list(shape)
    newshape[0] = newlength

    # the return array
    if comm.rank == root:
        recvbuffer = numpy.empty(newshape, dtype=dtype, order='C')
    else:
        recvbuffer = None

    # the recv counts
    counts = comm.allgather(local_length)
    counts = numpy.array(counts, order='C')

    # the recv offsets
    offsets = numpy.zeros_like(counts, order='C')
    offsets[1:] = counts.cumsum()[:-1]

    # gather to root
    comm.Barrier()
    comm.Gatherv([data, dt], [recvbuffer, (counts, offsets), dt], root=root)
    dt.Free()

    return recvbuffer
예제 #33
0
def main():
    options = parse_inputs()
    c = color_codes()

    # Prepare the net architecture parameters
    dfactor = options['dfactor']
    # Prepare the net hyperparameters
    num_classes = 4
    epochs = options['epochs']
    patch_width = options['patch_width']
    patch_size = (patch_width, patch_width, patch_width)
    batch_size = options['batch_size']
    dense_size = options['dense_size']
    conv_blocks = options['conv_blocks']
    n_filters = options['n_filters']
    filters_list = n_filters if len(n_filters) > 1 else n_filters * conv_blocks
    conv_width = options['conv_width']
    kernel_size_list = conv_width if isinstance(
        conv_width, list) else [conv_width] * conv_blocks
    balanced = options['balanced']
    val_rate = options['val_rate']
    # Data loading parameters
    preload = options['preload']
    queue = options['queue']

    # Prepare the sufix that will be added to the results for the net and images
    path = options['dir_name']
    filters_s = 'n'.join(['%d' % nf for nf in filters_list])
    conv_s = 'c'.join(['%d' % cs for cs in kernel_size_list])
    ub_s = '.ub' if not balanced else ''
    params_s = (ub_s, dfactor, patch_width, conv_s, filters_s, dense_size,
                epochs)
    sufix = '%s.D%d.p%d.c%s.n%s.d%d.e%d.' % params_s
    n_channels = 4
    preload_s = ' (with ' + c['b'] + 'preloading' + c['nc'] + c[
        'c'] + ')' if preload else ''

    print(c['c'] + '[' + strftime("%H:%M:%S") + '] ' + 'Starting training' +
          preload_s + c['nc'])
    # N-fold cross validation main loop (we'll do 2 training iterations with testing for each patient)
    train_data, train_labels = get_names_from_path(options)

    print(c['c'] + '[' + strftime("%H:%M:%S") + ']  ' + c['nc'] + c['g'] +
          'Number of training images (%d=%d)' %
          (len(train_data), len(train_labels)) + c['nc'])
    #  Also, prepare the network
    net_name = os.path.join(path, 'CBICA-brats2017' + sufix)

    print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
          'Creating and compiling the model ' + c['nc'])
    input_shape = (train_data.shape[1], ) + patch_size

    # Sequential model that merges all 4 images. This architecture is just a set of convolutional blocks
    #  that end in a dense layer. This is supposed to be an original baseline.
    inputs = Input(shape=input_shape, name='merged_inputs')
    conv = inputs
    for filters, kernel_size in zip(filters_list, kernel_size_list):
        conv = Conv3D(filters,
                      kernel_size=kernel_size,
                      activation='relu',
                      data_format='channels_first')(conv)
        conv = Dropout(0.5)(conv)

    full = Conv3D(dense_size,
                  kernel_size=(1, 1, 1),
                  data_format='channels_first')(conv)
    full = PReLU()(full)
    full = Conv3D(2, kernel_size=(1, 1, 1), data_format='channels_first')(full)

    rf = concatenate([conv, full], axis=1)

    while np.product(K.int_shape(rf)[2:]) > 1:
        rf = Conv3D(dense_size,
                    kernel_size=(3, 3, 3),
                    data_format='channels_first')(rf)
        rf = Dropout(0.5)(rf)

    full = Reshape((2, -1))(full)
    full = Permute((2, 1))(full)
    full_out = Activation('softmax', name='fc_out')(full)

    tumor = Dense(2, activation='softmax', name='tumor')(rf)

    outputs = [tumor, full_out]

    net = Model(inputs=inputs, outputs=outputs)

    net.compile(optimizer='adadelta',
                loss='categorical_crossentropy',
                loss_weights=[0.8, 1.0],
                metrics=['accuracy'])

    fc_width = patch_width - sum(kernel_size_list) + conv_blocks
    fc_shape = (fc_width, ) * 3

    checkpoint = net_name + '{epoch:02d}.{val_tumor_acc:.2f}.hdf5'
    callbacks = [
        EarlyStopping(monitor='val_tumor_loss', patience=options['patience']),
        ModelCheckpoint(os.path.join(path, checkpoint),
                        monitor='val_tumor_loss',
                        save_best_only=True)
    ]

    for i in range(options['r_epochs']):
        try:
            net = load_model(net_name + ('e%d.' % i) + 'mdl')
        except IOError:
            train_centers = get_cnn_centers(train_data[:, 0],
                                            train_labels,
                                            balanced=balanced)
            train_samples = len(train_centers) / dfactor
            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Loading data ' + c['b'] + '(%d centers)' %
                  (len(train_centers) / dfactor) + c['nc'])
            x, y = load_patches_train(image_names=train_data,
                                      label_names=train_labels,
                                      centers=train_centers,
                                      size=patch_size,
                                      fc_shape=fc_shape,
                                      nlabels=2,
                                      dfactor=dfactor,
                                      preload=preload,
                                      split=True,
                                      iseg=False,
                                      experimental=1,
                                      datatype=np.float32)

            print(c['c'] + '[' + strftime("%H:%M:%S") + ']    ' + c['g'] +
                  'Training the model for ' + c['b'] +
                  '(%d parameters)' % net.count_params() + c['nc'])
            print(net.summary())

            net.fit(x,
                    y,
                    batch_size=batch_size,
                    validation_split=val_rate,
                    epochs=epochs,
                    callbacks=callbacks)
            net.save(net_name + ('e%d.' % i) + 'mdl')
예제 #34
0
 def size(self):
     "size() - number of elements in tensor generated by iterator"
     return np.asarray(np.product(self.shape()))
예제 #35
0
 def __call__(self, shape: Sequence[int], dtype: Any) -> jnp.ndarray:
     input_size = np.product(shape[:-1])
     max_val = np.sqrt(3 / input_size) * self.scale
     return RandomUniform(-max_val, max_val)(shape, dtype)
예제 #36
0
def create_control_info(x, vanilla=0, return_object=False):
    """Determine which protocol to use for communication:
       (numpy) arrays, strings, or vanilla based x's type.

       There are three protocols:
       'array':   numpy arrays of type 'i', 'l', 'f', 'd', 'F' or 'D' can be
                  communicated  with mpiext.send_array and mpiext.receive_array.
       'string':  Text strings can be communicated with mpiext.send_string and
                  mpiext.receive_string.
       'vanilla': All other types can be communicated as string representations
                  provided that the objects
                  can be serialised using pickle (or cPickle).
                  The latter mode is less efficient than the
                  first two but it can handle general structures.

       Rules:
       If keyword argument vanilla == 1, vanilla is chosen regardless of 
       x's type.
       Otherwise if x is a string, the string protocol is chosen
       If x is an array, the 'array' protocol is chosen provided that x has one
       of the admissible typecodes.

       The optional argument return_object asks to return object as well.
       This is useful in case it gets modified as in the case of general structures
       using the vanilla protocol.
    """

    import types

    #Default values
    protocol = 'vanilla'
    typecode = ' '
    size = 0
    shape = ()

    #Determine protocol in case
    if not vanilla:
        if type(x) == types.StringType:
            protocol = 'string'
            typecode = 'c'
            size = len(x)
        elif type(x).__name__ == 'ndarray':  #numpy isn't imported yet
            try:
                import numpy
            except:
                print "WARNING (pypar.py): numpy module could not be imported,",
                print "reverting to vanilla mode"
                protocol = 'vanilla'
            else:
                typecode = x.dtype.char
                if typecode in ['i', 'l', 'f', 'd', 'F', 'D']:
                    protocol = 'array'
                    shape = x.shape
                    size = product(shape)
                else:
                    print "WARNING (pypar.py): numpy object type %s is not supported."\
                          %(x.dtype.char)
                    print "Only types 'i', 'l', 'f', 'd', 'F', 'D' are supported,",
                    print "Reverting to vanilla mode."
                    protocol = 'vanilla'

    #Pickle general structures using the vanilla protocol
    if protocol == 'vanilla':
        from cPickle import dumps
        x = dumps(x, 1)
        size = len(x)  # Let count be length of pickled object

    #Return
    if return_object:
        return [protocol, typecode, size, shape], x
    else:
        return [protocol, typecode, size, shape]
예제 #37
0
def doit(ds):

    # a FFT operates on uniformly gridded data.  We'll use the yt
    # covering grid for this.

    max_level = ds.index.max_level

    ref = int(np.product(ds.ref_factors[0:max_level]))

    low = ds.domain_left_edge
    dims = ds.domain_dimensions * ref

    nx, ny, nz = dims

    nindex_rho = 1. / 3.

    Kk = np.zeros((nx / 2 + 1, ny / 2 + 1, nz / 2 + 1))

    for vel in [("gas", "velocity_x"), ("gas", "velocity_y"),
                ("gas", "velocity_z")]:

        Kk += 0.5 * fft_comp(ds, ("gas", "density"), vel, nindex_rho,
                             max_level, low, dims)

    # wavenumbers
    L = (ds.domain_right_edge - ds.domain_left_edge).d

    kx = np.fft.rfftfreq(nx) * nx / L[0]
    ky = np.fft.rfftfreq(ny) * ny / L[1]
    kz = np.fft.rfftfreq(nz) * nz / L[2]

    # physical limits to the wavenumbers
    kmin = np.min(1.0 / L)
    kmax = np.min(0.5 * dims / L)

    kbins = np.arange(kmin, kmax, kmin)
    N = len(kbins)

    # bin the Fourier KE into radial kbins
    kx3d, ky3d, kz3d = np.meshgrid(kx, ky, kz, indexing="ij")
    k = np.sqrt(kx3d**2 + ky3d**2 + kz3d**2)

    whichbin = np.digitize(k.flat, kbins)
    ncount = np.bincount(whichbin)

    E_spectrum = np.zeros(len(ncount) - 1)

    for n in range(1, len(ncount)):
        E_spectrum[n - 1] = np.sum(Kk.flat[whichbin == n])

    k = 0.5 * (kbins[0:N - 1] + kbins[1:N])
    E_spectrum = E_spectrum[1:N]

    index = np.argmax(E_spectrum)
    kmax = k[index]
    Emax = E_spectrum[index]

    plt.loglog(k, E_spectrum)
    plt.loglog(k, Emax * (k / kmax)**(-5. / 3.), ls=":", color="0.5")

    plt.xlabel(r"$k$")
    plt.ylabel(r"$E(k)dk$")

    plt.savefig("spectrum.png")
예제 #38
0
def linear_regression(inst, design_matrix, names=None):
    """Fit Ordinary Least Squares (OLS) regression.

    Parameters
    ----------
    inst : instance of Epochs | iterable of SourceEstimate
        The data to be regressed. Contains all the trials, sensors, and time
        points for the regression. For Source Estimates, accepts either a list
        or a generator object.
    design_matrix : ndarray, shape (n_observations, n_regressors)
        The regressors to be used. Must be a 2d array with as many rows as
        the first dimension of the data. The first column of this matrix will
        typically consist of ones (intercept column).
    names : array-like | None
        Optional parameter to name the regressors (i.e., the columns in the
        design matrix). If provided, the length must correspond to the number
        of columns present in design matrix (including the intercept, if
        present). Otherwise, the default names are ``'x0'``, ``'x1'``,
        ``'x2', …, 'x(n-1)'`` for ``n`` regressors.

    Returns
    -------
    results : dict of namedtuple
        For each regressor (key), a namedtuple is provided with the
        following attributes:

            - ``beta`` : regression coefficients
            - ``stderr`` : standard error of regression coefficients
            - ``t_val`` : t statistics (``beta`` / ``stderr``)
            - ``p_val`` : two-sided p-value of t statistic under the t
              distribution
            - ``mlog10_p_val`` : -log₁₀-transformed p-value.

        The tuple members are numpy arrays. The shape of each numpy array is
        the shape of the data minus the first dimension; e.g., if the shape of
        the original data was ``(n_observations, n_channels, n_timepoints)``,
        then the shape of each of the arrays will be
        ``(n_channels, n_timepoints)``.
    """
    if names is None:
        names = ['x%i' % i for i in range(design_matrix.shape[1])]

    if isinstance(inst, BaseEpochs):
        picks = pick_types(inst.info,
                           meg=True,
                           eeg=True,
                           ref_meg=True,
                           stim=False,
                           eog=False,
                           ecg=False,
                           emg=False,
                           exclude=['bads'])
        if [inst.ch_names[p] for p in picks] != inst.ch_names:
            warn('Fitting linear model to non-data or bad channels. '
                 'Check picking')
        msg = 'Fitting linear model to epochs'
        data = inst.get_data()
        out = EvokedArray(np.zeros(data.shape[1:]), inst.info, inst.tmin)
    elif isgenerator(inst):
        msg = 'Fitting linear model to source estimates (generator input)'
        out = next(inst)
        data = np.array([out.data] + [i.data for i in inst])
    elif isinstance(inst, list) and isinstance(inst[0], SourceEstimate):
        msg = 'Fitting linear model to source estimates (list input)'
        out = inst[0]
        data = np.array([i.data for i in inst])
    else:
        raise ValueError('Input must be epochs or iterable of source '
                         'estimates')
    logger.info(msg + ', (%s targets, %s regressors)' %
                (np.product(data.shape[1:]), len(names)))
    lm_params = _fit_lm(data, design_matrix, names)
    lm = namedtuple('lm', 'beta stderr t_val p_val mlog10_p_val')
    lm_fits = {}
    for name in names:
        parameters = [p[name] for p in lm_params]
        for ii, value in enumerate(parameters):
            out_ = out.copy()
            if not isinstance(out_, (SourceEstimate, Evoked)):
                raise RuntimeError('Invalid container.')
            out_._data[:] = value
            parameters[ii] = out_
        lm_fits[name] = lm(*parameters)
    logger.info('Done')
    return lm_fits
def _main(args):
    config_path = os.path.expanduser(args.config_path)
    weights_path = os.path.expanduser(args.weights_path)
    assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
        config_path)
    assert weights_path.endswith(
        '.weights'), '{} is not a .weights file'.format(weights_path)

    output_path = os.path.expanduser(args.output_path)
    assert output_path.endswith(
        '.h5'), 'output path {} is not a .h5 file'.format(output_path)
    output_root = os.path.splitext(output_path)[0]

    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    weights_header = np.ndarray(
        shape=(4, ), dtype='int32', buffer=weights_file.read(16))
    print('Weights Header: ', weights_header)
    # TODO: Check transpose flag when implementing fully connected layers.
    # transpose = (weight_header[0] > 1000) or (weight_header[1] > 1000)
    
    #eat the mothafukers!
    weights_file.read(4)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)

    print('Creating Keras model.')
    if args.fully_convolutional:
        image_height, image_width = None, None
    else:
        image_height = int(cfg_parser['net_0']['height'])
        image_width = int(cfg_parser['net_0']['width'])
    prev_layer = Input(shape=(image_height, image_width, 3))
    all_layers = [prev_layer]

    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0
    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]

            # padding='same' is equivalent to Darknet pad=1
            padding = 'same' if pad == 1 else 'valid'

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            # TODO: This assumes channel last dim_ordering.
            weights_shape = (size, size, prev_layer_shape[-1], filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn'
                  if batch_normalize else '  ', activation, weights_shape)

            conv_bias = np.ndarray(
                shape=(filters, ),
                dtype='float32',
                buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(
                    shape=(3, filters),
                    dtype='float32',
                    buffer=weights_file.read(filters * 12))
                count += 3 * filters

                # TODO: Keras BatchNormalization mistakenly refers to var
                # as std.
                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(
                shape=darknet_w_shape,
                dtype='float32',
                buffer=weights_file.read(weights_size * 4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            # TODO: Add check for Theano dim ordering.
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation == 'leaky':
                pass  # Add advanced activation later.
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))

            # Create Conv2D layer
            conv_layer = (Conv2D(
                filters, (size, size),
                strides=(stride, stride),
                kernel_regularizer=l2(weight_decay),
                use_bias=not batch_normalize,
                weights=conv_weights,
                activation=act_fn,
                padding=padding))(prev_layer)

            if batch_normalize:
                conv_layer = (BatchNormalization(
                    weights=bn_weight_list))(conv_layer)
            prev_layer = conv_layer

            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)

        elif section.startswith('maxpool'):
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            all_layers.append(
                MaxPooling2D(
                    padding='same',
                    pool_size=(size, size),
                    strides=(stride, stride))(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('avgpool'):
            if cfg_parser.items(section) != []:
                raise ValueError('{} with params unsupported.'.format(section))
            all_layers.append(GlobalAveragePooling2D()(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            layers = [all_layers[i] for i in ids]
            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = concatenate(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer

        elif section.startswith('reorg'):
            block_size = int(cfg_parser[section]['stride'])
            assert block_size == 2, 'Only reorg with stride 2 supported.'
            all_layers.append(
                Lambda(
                    space_to_depth_x2,
                    output_shape=space_to_depth_x2_output_shape,
                    name='space_to_depth_x2')(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('region'):
            with open('{}_anchors.txt'.format(output_root), 'w') as f:
                print(cfg_parser[section]['anchors'], file=f)

        elif (section.startswith('net') or section.startswith('cost') or
              section.startswith('softmax')):
            pass  # Configs not currently handled during model definition.

        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    model = Model(inputs=all_layers[0], outputs=all_layers[-1])
    print(model.summary())
    model.save('{}'.format(output_path))
    print('Saved Keras model to {}'.format(output_path))
    # Check to see if all weights have been read.
    remaining_weights = len(weights_file.read()) / 4
    weights_file.close()
    print('Read {} of {} from Darknet weights.'.format(count, count +
                                                       remaining_weights))
    if remaining_weights > 0:
        print('Warning: {} unused weights'.format(remaining_weights))

    if args.plot_model:
        plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
        print('Saved model plot to {}.png'.format(output_root))
예제 #40
0
    def create(self, name, data, shape=None, dtype=None):
        """ Create a new attribute, overwriting any existing attribute.

        name
            Name of the new attribute (required)
        data
            An array to initialize the attribute (required)
        shape
            Shape of the attribute.  Overrides data.shape if both are
            given, in which case the total number of points must be unchanged.
        dtype
            Data type of the attribute.  Overrides data.dtype if both
            are given.
        """

        with phil:
            if dtype is None:  # Guess dtype before modifying data
                dtype = base.guess_dtype(data)

            # First, make sure we have a NumPy array.  We leave the data type
            # conversion for HDF5 to perform (other than the below exception).
            if not isinstance(data, Empty):
                is_list_or_tuple = isinstance(data, (list, tuple))
                data = numpy.asarray(data, order='C')
                # If we were passed a list or tuple, then we do not need to respect the
                # datatype of the numpy array. If it is U type, convert to vlen unicode
                # strings:
                if is_list_or_tuple and data.dtype.type == numpy.unicode_:
                    data = numpy.array(data, dtype=h5t.string_dtype())

            if shape is None:
                shape = data.shape

            use_htype = None  # If a committed type is given, we must use it
            # in the call to h5a.create.

            if isinstance(dtype, Datatype):
                use_htype = dtype.id
                dtype = dtype.dtype
            elif dtype is None:
                dtype = data.dtype
            else:
                dtype = numpy.dtype(
                    dtype)  # In case a string, e.g. 'i8' is passed

            original_dtype = dtype  # We'll need this for top-level array types

            # Where a top-level array type is requested, we have to do some
            # fiddling around to present the data as a smaller array of
            # subarrays.
            if dtype.subdtype is not None:

                subdtype, subshape = dtype.subdtype

                # Make sure the subshape matches the last N axes' sizes.
                if shape[-len(subshape):] != subshape:
                    raise ValueError(
                        "Array dtype shape %s is incompatible with data shape %s"
                        % (subshape, shape))

                # New "advertised" shape and dtype
                shape = shape[0:len(shape) - len(subshape)]
                dtype = subdtype

            # Not an array type; make sure to check the number of elements
            # is compatible, and reshape if needed.
            else:

                if shape is not None and numpy.product(
                        shape, dtype=numpy.ulonglong) != numpy.product(
                            data.shape, dtype=numpy.ulonglong):
                    raise ValueError(
                        "Shape of new attribute conflicts with shape of data")

                if shape != data.shape:
                    data = data.reshape(shape)

            # We need this to handle special string types.
            if not isinstance(data, Empty):
                data = numpy.asarray(data, dtype=dtype)

            # Make HDF5 datatype and dataspace for the H5A calls
            if use_htype is None:
                htype = h5t.py_create(original_dtype, logical=True)
                htype2 = h5t.py_create(
                    original_dtype
                )  # Must be bit-for-bit representation rather than logical
            else:
                htype = use_htype
                htype2 = None

            if isinstance(data, Empty):
                space = h5s.create(h5s.NULL)
            else:
                space = h5s.create_simple(shape)

            # This mess exists because you can't overwrite attributes in HDF5.
            # So we write to a temporary attribute first, and then rename.

            tempname = uuid.uuid4().hex

            try:
                attr = h5a.create(self._id, self._e(tempname), htype, space)
            except:
                raise
            else:
                try:
                    if not isinstance(data, Empty):
                        attr.write(data, mtype=htype2)
                except:
                    attr.close()
                    h5a.delete(self._id, self._e(tempname))
                    raise
                else:
                    try:
                        # No atomic rename in HDF5 :(
                        if h5a.exists(self._id, self._e(name)):
                            h5a.delete(self._id, self._e(name))
                        h5a.rename(self._id, self._e(tempname), self._e(name))
                    except:
                        attr.close()
                        h5a.delete(self._id, self._e(tempname))
                        raise
예제 #41
0
    def encode_grib2_data(self):
        """
        Encodes deterministic member predictions to GRIB2 format.

        Returns:
            Series of GRIB2 messages
        """
        lscale = 1e6
        grib_id_start = [7, 0, 14, 14, 2]
        gdsinfo = np.array([0, np.product(self.data.shape[-2:]), 0, 0, 30],
                           dtype=np.int32)
        lon_0 = self.proj_dict["lon_0"]
        sw_lon = self.grid_dict["sw_lon"]
        if lon_0 < 0:
            lon_0 += 360
        if sw_lon < 0:
            sw_lon += 360
        gdtmp1 = [
            1, 0, self.proj_dict['a'], 0,
            float(self.proj_dict['a']), 0,
            float(self.proj_dict['b']), self.data.shape[-1],
            self.data.shape[-2], self.grid_dict["sw_lat"] * lscale,
            sw_lon * lscale, 0, self.proj_dict["lat_0"] * lscale,
            lon_0 * lscale, self.grid_dict["dx"] * 1e3,
            self.grid_dict["dy"] * 1e3, 0b00000000, 0b01000000,
            self.proj_dict["lat_1"] * lscale, self.proj_dict["lat_2"] * lscale,
            -90 * lscale, 0
        ]
        pdtmp1 = np.array(
            [
                1,  # parameter category Moisture
                31,  # parameter number Hail
                4,  # Type of generating process Ensemble Forecast
                0,  # Background generating process identifier
                31,  # Generating process or model from NCEP
                0,  # Hours after reference time data cutoff
                0,  # Minutes after reference time data cutoff
                1,  # Forecast time units Hours
                0,  # Forecast time
                1,  # Type of first fixed surface Ground
                1,  # Scale value of first fixed surface
                0,  # Value of first fixed surface
                1,  # Type of second fixed surface
                1,  # Scale value of 2nd fixed surface
                0,  # Value of 2nd fixed surface
                0,  # Derived forecast type
                1  # Number of ensemble members
            ],
            dtype=np.int32)
        grib_objects = pd.Series(index=self.times,
                                 data=[None] * self.times.size,
                                 dtype=object)
        drtmp1 = np.array([0, 0, 4, 8, 0], dtype=np.int32)
        for t, time in enumerate(self.times):
            time_list = list(self.run_date.utctimetuple()[0:6])
            if grib_objects[time] is None:
                grib_objects[time] = Grib2Encode(
                    0,
                    np.array(grib_id_start + time_list + [2, 1],
                             dtype=np.int32))
                grib_objects[time].addgrid(gdsinfo, gdtmp1)
            pdtmp1[8] = (time.to_pydatetime() -
                         self.run_date).total_seconds() / 3600.0
            data = self.data[t] / 1000.0
            data[np.isnan(data)] = 0
            masked_data = np.ma.array(data, mask=data <= 0)
            pdtmp1[-2] = 0
            grib_objects[time].addfield(1, pdtmp1, 0, drtmp1, masked_data)
        return grib_objects
예제 #42
0
def arr_from_img(im, shift=0):
    w, h = im.size
    arr = im.getdata()
    c = np.product(arr.size) / (w * h)
    return np.asarray(arr, dtype=np.float32).reshape(
        (h, w, c)).transpose(2, 1, 0) / 255. - shift
예제 #43
0
파일: fcnet.py 프로젝트: wallacetroy/ray
    def __init__(self, obs_space, action_space, num_outputs, model_config,
                 name):
        TorchModelV2.__init__(self, obs_space, action_space, num_outputs,
                              model_config, name)
        nn.Module.__init__(self)

        activation = get_activation_fn(model_config.get("fcnet_activation"),
                                       framework="torch")
        hiddens = model_config.get("fcnet_hiddens")
        no_final_linear = model_config.get("no_final_linear")

        # TODO(sven): implement case: vf_shared_layers = False.
        # vf_share_layers = model_config.get("vf_share_layers")

        logger.debug("Constructing fcnet {} {}".format(hiddens, activation))
        layers = []
        prev_layer_size = int(np.product(obs_space.shape))
        self._logits = None

        # Create layers 0 to second-last.
        for size in hiddens[:-1]:
            layers.append(
                SlimFC(in_size=prev_layer_size,
                       out_size=size,
                       initializer=normc_initializer(1.0),
                       activation_fn=activation))
            prev_layer_size = size

        # The last layer is adjusted to be of size num_outputs, but it's a
        # layer with activation.
        if no_final_linear and self.num_outputs:
            layers.append(
                SlimFC(in_size=prev_layer_size,
                       out_size=self.num_outputs,
                       initializer=normc_initializer(1.0),
                       activation_fn=activation))
            prev_layer_size = self.num_outputs
        # Finish the layers with the provided sizes (`hiddens`), plus -
        # iff num_outputs > 0 - a last linear layer of size num_outputs.
        else:
            if len(hiddens) > 0:
                layers.append(
                    SlimFC(in_size=prev_layer_size,
                           out_size=hiddens[-1],
                           initializer=normc_initializer(1.0),
                           activation_fn=activation))
                prev_layer_size = hiddens[-1]
            if self.num_outputs:
                self._logits = SlimFC(in_size=prev_layer_size,
                                      out_size=self.num_outputs,
                                      initializer=normc_initializer(0.01),
                                      activation_fn=None)
            else:
                self.num_outputs = ([np.product(obs_space.shape)] +
                                    hiddens[-1:-1])[-1]

        self._hidden_layers = nn.Sequential(*layers)

        # TODO(sven): Implement non-shared value branch.
        self._value_branch = SlimFC(in_size=prev_layer_size,
                                    out_size=1,
                                    initializer=normc_initializer(1.0),
                                    activation_fn=None)
        # Holds the current value output.
        self._cur_value = None
예제 #44
0
def derivative(func, x0, dx=1.0, n=1, args=(), order=3):
    """
    Find the n-th derivative of a function at a point.

    Given a function, use a central difference formula with spacing `dx` to
    compute the `n`-th derivative at `x0`.

    Parameters
    ----------
    func : function
        Input function.
    x0 : float
        The point at which `n`-th derivative is found.
    dx : int, optional
        Spacing.
    n : int, optional
        Order of the derivative. Default is 1.
    args : tuple, optional
        Arguments
    order : int, optional
        Number of points to use, must be odd.

    Notes
    -----
    Decreasing the step size too small can result in round-off error.

    Examples
    --------
    >>> def f(x):
    ...     return x**3 + x**2
    ...
    >>> derivative(f, 1.0, dx=1e-6)
    4.9999999999217337

    """
    if order < n + 1:
        raise ValueError(
            "'order' (the number of points used to compute the derivative), "
            "must be at least the derivative order 'n' + 1.")
    if order % 2 == 0:
        raise ValueError(
            "'order' (the number of points used to compute the derivative) "
            "must be odd.")
    # pre-computed for n=1 and 2 and low-order for speed.
    if n == 1:
        if order == 3:
            weights = array([-1, 0, 1]) / 2.0
        elif order == 5:
            weights = array([1, -8, 0, 8, -1]) / 12.0
        elif order == 7:
            weights = array([-1, 9, -45, 0, 45, -9, 1]) / 60.0
        elif order == 9:
            weights = array([3, -32, 168, -672, 0, 672, -168, 32, -3]) / 840.0
        else:
            weights = central_diff_weights(order, 1)
    elif n == 2:
        if order == 3:
            weights = array([1, -2.0, 1])
        elif order == 5:
            weights = array([-1, 16, -30, 16, -1]) / 12.0
        elif order == 7:
            weights = array([2, -27, 270, -490, 270, -27, 2]) / 180.0
        elif order == 9:
            weights = array(
                [-9, 128, -1008, 8064, -14350, 8064, -1008, 128, -9]) / 5040.0
        else:
            weights = central_diff_weights(order, 2)
    else:
        weights = central_diff_weights(order, n)
    val = 0.0
    ho = order >> 1
    for k in range(order):
        val += weights[k] * func(x0 + (k - ho) * dx, *args)
    return val / product((dx, ) * n, axis=0)
예제 #45
0
def xeb_fidelity(
    circuit: Circuit,
    bitstrings: Sequence[int],
    qubit_order: QubitOrderOrList = QubitOrder.DEFAULT,
    amplitudes: Optional[Mapping[int, complex]] = None,
    estimator: Callable[[int, Sequence[float]],
                        float] = linear_xeb_fidelity_from_probabilities,
) -> float:
    """Estimates XEB fidelity from one circuit using user-supplied estimator.

    Fidelity quantifies the similarity of two quantum states. Here, we estimate
    the fidelity between the theoretically predicted output state of circuit and
    the state producted in its experimental realization. Note that we don't know
    the latter state. Nevertheless, we can estimate the fidelity between the two
    states from the knowledge of the bitstrings observed in the experiment.

    In order to make the estimate more robust one should average the estimates
    over many random circuits. The API supports per-circuit fidelity estimation
    to enable users to examine the properties of estimate distribution over
    many circuits.

    See https://arxiv.org/abs/1608.00263 for more details.

    Args:
        circuit: Random quantum circuit which has been executed on quantum
            processor under test.
        bitstrings: Results of terminal all-qubit measurements performed after
            each circuit execution as integer array where each integer is
            formed from measured qubit values according to `qubit_order` from
            most to least significant qubit, i.e. in the order consistent with
            `cirq.final_wavefunction`.
        qubit_order: Qubit order used to construct bitstrings enumerating
            qubits starting with the most sigificant qubit.
        amplitudes: Optional mapping from bitstring to output amplitude.
            If provided, simulation is skipped. Useful for large circuits
            when an offline simulation had already been peformed.
        estimator: Fidelity estimator to use, see above. Defaults to the
            linear XEB fidelity estimator.
    Returns:
        Estimate of fidelity associated with an experimental realization of
        circuit which yielded measurements in bitstrings.
    Raises:
        ValueError: Circuit is inconsistent with qubit order or one of the
            bitstrings is inconsistent with the number of qubits.
    """
    dim = np.product(circuit.qid_shape())

    if isinstance(bitstrings, tuple):
        bitstrings = list(bitstrings)

    for bitstring in bitstrings:
        if not 0 <= bitstring < dim:
            raise ValueError(
                f'Bitstring {bitstring} could not have been observed '
                f'on {len(circuit.qid_shape())} qubits.')

    if amplitudes is None:
        output_state = final_wavefunction(circuit, qubit_order=qubit_order)
        output_probabilities = np.abs(output_state)**2
        bitstring_probabilities = output_probabilities[bitstrings]
    else:
        bitstring_probabilities = np.abs(
            [amplitudes[bitstring] for bitstring in bitstrings])**2
    return estimator(dim, bitstring_probabilities)
예제 #46
0
def _main(args):
    config_path = os.path.expanduser(args.config_path)
    weights_path = os.path.expanduser(args.weights_path)
    assert config_path.endswith('.cfg'), '{} is not a .cfg file'.format(
        config_path)
    assert weights_path.endswith(
        '.weights'), '{} is not a .weights file'.format(weights_path)

    output_path = os.path.expanduser(args.output_path)
    assert output_path.endswith(
        '.h5'), 'output path {} is not a .h5 file'.format(output_path)
    output_root = os.path.splitext(output_path)[0]

    # Load weights and config.
    print('Loading weights.')
    weights_file = open(weights_path, 'rb')
    major, minor, revision = np.ndarray(shape=(3, ),
                                        dtype='int32',
                                        buffer=weights_file.read(12))
    if (major * 10 + minor) >= 2 and major < 1000 and minor < 1000:
        seen = np.ndarray(shape=(1, ),
                          dtype='int64',
                          buffer=weights_file.read(8))
    else:
        seen = np.ndarray(shape=(1, ),
                          dtype='int32',
                          buffer=weights_file.read(4))
    print('Weights Header: ', major, minor, revision, seen)

    print('Parsing Darknet config.')
    unique_config_file = unique_config_sections(config_path)
    cfg_parser = configparser.ConfigParser()
    cfg_parser.read_file(unique_config_file)

    print('Creating Keras model.')
    input_layer = Input(shape=(None, None, 3))
    prev_layer = input_layer
    all_layers = []

    weight_decay = float(cfg_parser['net_0']['decay']
                         ) if 'net_0' in cfg_parser.sections() else 5e-4
    count = 0
    out_index = []
    for section in cfg_parser.sections():
        print('Parsing section {}'.format(section))
        if section.startswith('convolutional'):
            filters = int(cfg_parser[section]['filters'])
            size = int(cfg_parser[section]['size'])
            stride = int(cfg_parser[section]['stride'])
            pad = int(cfg_parser[section]['pad'])
            activation = cfg_parser[section]['activation']
            batch_normalize = 'batch_normalize' in cfg_parser[section]

            padding = 'same' if pad == 1 and stride == 1 else 'valid'

            # Setting weights.
            # Darknet serializes convolutional weights as:
            # [bias/beta, [gamma, mean, variance], conv_weights]
            prev_layer_shape = K.int_shape(prev_layer)

            weights_shape = (size, size, prev_layer_shape[-1], filters)
            darknet_w_shape = (filters, weights_shape[2], size, size)
            weights_size = np.product(weights_shape)

            print('conv2d', 'bn' if batch_normalize else '  ', activation,
                  weights_shape)

            conv_bias = np.ndarray(shape=(filters, ),
                                   dtype='float32',
                                   buffer=weights_file.read(filters * 4))
            count += filters

            if batch_normalize:
                bn_weights = np.ndarray(shape=(3, filters),
                                        dtype='float32',
                                        buffer=weights_file.read(filters * 12))
                count += 3 * filters

                bn_weight_list = [
                    bn_weights[0],  # scale gamma
                    conv_bias,  # shift beta
                    bn_weights[1],  # running mean
                    bn_weights[2]  # running var
                ]

            conv_weights = np.ndarray(shape=darknet_w_shape,
                                      dtype='float32',
                                      buffer=weights_file.read(weights_size *
                                                               4))
            count += weights_size

            # DarkNet conv_weights are serialized Caffe-style:
            # (out_dim, in_dim, height, width)
            # We would like to set these to Tensorflow order:
            # (height, width, in_dim, out_dim)
            conv_weights = np.transpose(conv_weights, [2, 3, 1, 0])
            conv_weights = [conv_weights] if batch_normalize else [
                conv_weights, conv_bias
            ]

            # Handle activation.
            act_fn = None
            if activation == 'leaky':
                pass  # Add advanced activation later.
            elif activation != 'linear':
                raise ValueError(
                    'Unknown activation function `{}` in section {}'.format(
                        activation, section))

            # Create Conv2D layer
            if stride > 1:
                # Darknet uses left and top padding instead of 'same' mode
                prev_layer = ZeroPadding2D(((1, 0), (1, 0)))(prev_layer)
            conv_layer = (Conv2D(filters, (size, size),
                                 strides=(stride, stride),
                                 kernel_regularizer=l2(weight_decay),
                                 use_bias=not batch_normalize,
                                 weights=conv_weights,
                                 activation=act_fn,
                                 padding=padding))(prev_layer)

            if batch_normalize:
                conv_layer = (BatchNormalization(
                    weights=bn_weight_list))(conv_layer)
            prev_layer = conv_layer

            if activation == 'linear':
                all_layers.append(prev_layer)
            elif activation == 'leaky':
                act_layer = LeakyReLU(alpha=0.1)(prev_layer)
                prev_layer = act_layer
                all_layers.append(act_layer)

        elif section.startswith('route'):
            ids = [int(i) for i in cfg_parser[section]['layers'].split(',')]
            layers = [all_layers[i] for i in ids]
            if len(layers) > 1:
                print('Concatenating route layers:', layers)
                concatenate_layer = Concatenate()(layers)
                all_layers.append(concatenate_layer)
                prev_layer = concatenate_layer
            else:
                skip_layer = layers[0]  # only one layer to route
                all_layers.append(skip_layer)
                prev_layer = skip_layer

        elif section.startswith('shortcut'):
            index = int(cfg_parser[section]['from'])
            activation = cfg_parser[section]['activation']
            assert activation == 'linear', 'Only linear activation supported.'
            all_layers.append(Add()([all_layers[index], prev_layer]))
            prev_layer = all_layers[-1]

        elif section.startswith('upsample'):
            stride = int(cfg_parser[section]['stride'])
            assert stride == 2, 'Only stride=2 supported.'
            all_layers.append(UpSampling2D(stride)(prev_layer))
            prev_layer = all_layers[-1]

        elif section.startswith('yolo'):
            out_index.append(len(all_layers) - 1)
            all_layers.append(None)
            prev_layer = all_layers[-1]

        elif section.startswith('net'):
            pass

        else:
            raise ValueError(
                'Unsupported section header type: {}'.format(section))

    # Create and save model.
    model = Model(inputs=input_layer,
                  outputs=[all_layers[i] for i in out_index])
    print(model.summary())
    model.save('{}'.format(output_path))
    print('Saved Keras model to {}'.format(output_path))
    # Check to see if all weights have been read.
    remaining_weights = len(weights_file.read()) / 4
    weights_file.close()
    print('Read {} of {} from Darknet weights.'.format(
        count, count + remaining_weights))
    if remaining_weights > 0:
        print('Warning: {} unused weights'.format(remaining_weights))

    if args.plot_model:
        plot(model, to_file='{}.png'.format(output_root), show_shapes=True)
        print('Saved model plot to {}.png'.format(output_root))
예제 #47
0
    def _mbb_area(self):
        """
        Area of minimum bounding box
        """

        return np.product(self.mbb[[2, 3]] - self.mbb[[0, 1]])
예제 #48
0
def prepare_networks(
    encoder,
    decoder,
    n_components,
    dims,
    n_data,
    parametric_embedding,
    parametric_reconstruction,
    init_embedding=None,
):
    """
    Generates a set of keras networks for the encoder and decoder if one has not already
    been predefined. 

    Parameters
    ----------
    encoder : tf.keras.Sequential
        The encoder Keras network
    decoder : tf.keras.Sequential
        the decoder Keras network
    n_components : int
        the dimensionality of the latent space
    dims : tuple of shape (dim1, dim2, dim3...)
        dimensionality of data
    n_data : number of elements in dataset
        # of elements in training dataset
    parametric_embedding : bool
        Whether the embedder is parametric or non-parametric
    parametric_reconstruction : bool
        Whether the decoder is parametric or non-parametric
    init_embedding : array (optional, default None)
        The initial embedding, for nonparametric embeddings

    Returns
    -------
    encoder: tf.keras.Sequential
        encoder keras network
    decoder: tf.keras.Sequential
        decoder keras network
    """

    if parametric_embedding:
        if encoder is None:
            encoder = tf.keras.Sequential([
                tf.keras.layers.InputLayer(input_shape=dims),
                tf.keras.layers.Flatten(),
                tf.keras.layers.Dense(units=100, activation="relu"),
                tf.keras.layers.Dense(units=100, activation="relu"),
                tf.keras.layers.Dense(units=100, activation="relu"),
                tf.keras.layers.Dense(units=n_components, name="z"),
            ])
    else:
        embedding_layer = tf.keras.layers.Embedding(n_data,
                                                    n_components,
                                                    input_length=1)
        embedding_layer.build(input_shape=(1, ))
        embedding_layer.set_weights([init_embedding])
        encoder = tf.keras.Sequential([embedding_layer])

    if decoder is None:
        if parametric_reconstruction:
            decoder = tf.keras.Sequential([
                tf.keras.layers.InputLayer(input_shape=n_components),
                tf.keras.layers.Dense(units=100, activation="relu"),
                tf.keras.layers.Dense(units=100, activation="relu"),
                tf.keras.layers.Dense(units=100, activation="relu"),
                tf.keras.layers.Dense(units=np.product(dims),
                                      name="recon",
                                      activation=None),
                tf.keras.layers.Reshape(dims),
            ])

    return encoder, decoder
예제 #49
0
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450

Find the thirteen adjacent digits in the 1000-digit number that have the greatest product. What is the value of this product?
"""


from numpy import multiply, product

# '\n' replaced with ''
INPUT = """7316717653133062491922511967442657474235534919493496983520312774506326239578318016984801869478851843858615607891129494954595017379583319528532088055111254069874715852386305071569329096329522744304355766896648950445244523161731856403098711121722383113622298934233803081353362766142828064444866452387493035890729629049156044077239071381051585930796086670172427121883998797908792274921901699720888093776657273330010533678812202354218097512545405947522435258490771167055601360483958644670632441572215539753697817977846174064955149290862569321978468622482839722413756570560574902614079729686524145351004748216637048440319989000889524345065854122758866688116427171479924442928230863465674813919123162824586178664583591245665294765456828489128831426076900422421902267105562632111110937054421750694165896040807198403850962455444362981230987879927244284909188845801561660979191338754992005240636899125607176060588611646710940507754100225698315520005593572972571636269561882670428252483600823257530420752963450"""
DIGIT_COUNT = 13
# Could optimize by splitting on '0' and remove the n-closest digits, since all those would give 0

max_product = 0
for i, char in enumerate(INPUT):
    digits_list = [int(d) for d in INPUT[i - DIGIT_COUNT:i]]  # Get the i-digits_count before and including the i-th index
    prod = product(digits_list)
    if prod > max_product:
        max_product = prod
        print prod, digits_list
예제 #50
0
    def ln_likelihood(self, params, nparams_g, mlims_g, mlims_r, T_g, T_r, t0,
                      p_d_g, p_d_r, P_f_g, P_f_r, P_f_bar_g, P_f_bar_r,
                      P_tot_gbar, P_tot_rbar, mask_g, mask_r, P_A, P_T,
                      plims_f_bar, plims_T, mlow_g, mhigh_g, mlow_r, mhigh_r):
        """
        Returns the single event log-likelihood for a model draw.

        Parameters
        ----------
            params : list
                Set of model parameters for a given sample. Assume g-band
                parameters are assigned first in the array.
            nparams_g : int
                Number of parameters for g_band evolution.
            mlims_g : array
                Array of observed g-band limiting magnitudes (corrected for
                extinction). Shape = (nfields, nobs)
            mlims_r : array
                Array of observed r-band limiting magnitudes (corrected for
                extinction). Shape = (nfields, nobs)
            T_g : array
                Array of observation times in g-band (shape same as mlims_g).
            T_r : array
                Array of observation times in r-band (shape same as mlims_r).
            t0 : float
                Time of the event.
            p_d_g : array
                Array of probability density functions of the distance for
                each field in the g-band. Shape = (nfields,)
            p_d_r : array
                Array of probability density functions of the distance for
                each field in the r-band. Shape = (nfields,)
            P_f_g : array
                Array of total enclosed probabilities for the event to be in a
                field in the g-band. Shape = (nfields,)
            P_f_r : array
                Array of total enclosed probabilities for the event to be in a
                field in the r-band. Shape = (nfields,)
            P_f_bar_g : array
                Array of the complementary number of observations in the
                g-band in all fields except the current one.
                Shape = (nfields,)
            P_f_bar_r : array
                Array of the complementary number of observations in the
                r-band in all fields except the current one.
                Shape = (nfields,)
            P_tot_gbar : float
                Total probability of observations in the g-band being
                unassociated with the kilonova.
            P_tot_rbar : float
                Total probability of observations in the r-band being
                unassociated with the kilonova.
            mask_g : array
                Array of booleans indicating which observations in the g-band
                have corresponding r-band observations, in the same field.
                Shape = (nfields,)
            mask_r : array
                Array of booleans indicating which observations in the r-band
                have corresponding g-band observations, in the same field.
                Shape = (nfields,)
            P_A : float
                Probability of the event being astrophysical.
            P_T : float
                Probability of the event being terrestrial.
            plims_f_bar : float
                The likelihood of observations under the hypothesis that the
                event is outside the searched area.
            plims_T : float
                The likelihood of observations under the hypothesis that the
                event is terrestrial.
            mlow_g : array
                Array containing the field-specific lower limit of the
                limiting magnitude distribution for the g-band.
                Shape = (nfields,)
            mhigh_g : array
                Array containing the field-specific upper limit of the
                limiting magnitude distribution for the g-band.
                Shape = (nfields,)
            mlow_r : array
                Array containing the field-specific lower limit of the
                limiting magnitude distribution for the r-band.
                Shape = (nfields,)
            mhigh_r : array
                Array containing the field-specific upper limit of the
                limiting magnitude distribution for the r-band.
                Shape = (nfields,)

        Returns
        -------
            ln_likelihood : float
                The log-likelihood for the given model draw.
        """
        g_params = params[:nparams_g]
        r_params = params[nparams_g:]
        M_g = np.array([self.lc_model_g(*g_params, t_0=t0, t=t) for t in T_g])
        M_r = np.array([self.lc_model_r(*r_params, t_0=t0, t=t) for t in T_r])
        dlims_g = np.array(list(map(self.dlim, mlims_g, M_g)))
        dlims_r = np.array(list(map(self.dlim, mlims_r, M_r)))
        plims_f_t_g = np.array(
            list(
                map(np.vectorize(self.create_mlim_pdf), M_g, dlims_g, p_d_g,
                    mlow_g, mhigh_g)))
        plims_f_t_r = np.array(
            list(
                map(np.vectorize(self.create_mlim_pdf), M_r, dlims_r, p_d_r,
                    mlow_r, mhigh_r)))
        plims_f_g = np.array([np.product(p) for p in plims_f_t_g])
        plims_f_r = np.array([np.product(p) for p in plims_f_t_r])
        plims_r_g = (plims_f_g * P_f_g * P_f_bar_g)[mask_g] \
            * (plims_f_r * P_f_bar_r)[mask_r]
        plims_g = (plims_f_g * P_f_g * P_f_bar_g)[~mask_g] * P_tot_rbar
        plims_r = (plims_f_r * P_f_r * P_f_bar_r)[~mask_r] * P_tot_gbar
        return np.log((np.sum(plims_r_g) + np.sum(plims_g) + np.sum(plims_r) +
                       plims_f_bar) * P_A + plims_T * P_T)
예제 #51
0
    def from_file(fid, mask_infeasible=True, return_comments=False):
        """
        Read frequency spectrum from file.

        fid: string with file name to read from or an open file object.
        mask_infeasible: If True, mask the infeasible entries in the triallelic spectrum.
        return_comments: If true, the return value is (fs, comments), where
                         comments is a list of strings containing the comments
                         from the file (without #'s).

        See to_file method for details on the file format.
        """
        newfile = False
        # Try to read from fid. If we can't, assume it's something that we can
        # use to open a file.
        if not hasattr(fid, 'read'):
            newfile = True
            fid = file(fid, 'r')

        line = fid.readline()
        # Strip out the comments
        comments = []
        while line.startswith('#'):
            comments.append(line[1:].strip())
            line = fid.readline()

        # Read the shape of the data
        shape, folded_major, folded_ancestral, extrap_x, extrap_t = line.split(
        )
        shape = [int(shape) + 1, int(shape) + 1]

        data = numpy.fromstring(fid.readline().strip(),
                                count=numpy.product(shape),
                                sep=' ')
        # fromfile returns a 1-d array. Reshape it to the proper form.
        data = data.reshape(*shape)

        maskline = fid.readline().strip()
        mask = numpy.fromstring(maskline, count=numpy.product(shape), sep=' ')
        mask = mask.reshape(*shape)

        if folded_major == 'folded_major':
            folded_major = True
        else:
            folded_major = False
        if folded_ancestral == 'folded_ancestral':
            folded_ancestral = True
        else:
            folded_ancestral = False
        if extrap_x == 'None':
            extrap_x = None
        else:
            extrap_x = float(extrap_x)
        if extrap_t == 'None':
            extrap_t = None
        else:
            extrap_t = float(extrap_t)

        # If we opened a new file, clean it up.
        if newfile:
            fid.close()

        fs = TriSpectrum(data,
                         mask,
                         mask_infeasible,
                         data_folded_ancestral=folded_ancestral,
                         data_folded_major=folded_major)
        fs.extrap_x = extrap_x
        fs.extrap_t = extrap_t
        if not return_comments:
            return fs
        else:
            return fs, comments
예제 #52
0
                                            int(x.split(':')[1].split(' or ')[1].strip().split('-')[1])+1))
                                           ) for x in input_data.split('\n\n')[0].split('\n')}
    nearby_tickets_input = {i:[int(y) for y in x.split(',')] for i, x in enumerate(input_data.split('\n\n')[2].split('\n')[1:])}
    your_ticket_input = [int(x) for x in input_data.split('\n\n')[1].split('\n')[1].split(',')]
    return valid_ranges_input, nearby_tickets_input, your_ticket_input

valid_ranges, nearby_tickets, your_ticket = format_data(data)

# part 1
invalid_items = [val for val in list(chain(*list(nearby_tickets.values()))) if all([True if val not in ticket_range else False for ticket_range in list(chain(*list(valid_ranges.values())))])]
print(sum(invalid_items)) # 20058

# part 2
valid_tickets = {k:v for k, v in nearby_tickets.items() if not set(v).intersection(set(invalid_items))}
valid_ticket_values = np.array(list(valid_tickets.values()))

# find all the rows that fit the conditions for each property
col_mapping_dict = defaultdict(str)
for k, v in valid_ranges.items():
    col_mapping_dict[k] = [i for i in range(len(valid_ticket_values[0])) if all(elem in list(chain(*v)) for elem in valid_ticket_values[:,i])]

# find the number of occurances for each column
occurances_dict = {k: v for k, v in sorted(Counter(list(chain(*list(col_mapping_dict.values())))).items(), key=lambda item: item[1])}

seat_positions = {}
for col, col_count in occurances_dict.items():
    seat_positions.update({col:[k for k, v in col_mapping_dict.items() if col in v][0]})
    col_mapping_dict = {k:v for k,v in col_mapping_dict.items() if col not in v}
seat_positions = {k: v for k, v in sorted(seat_positions.items(), key=lambda item: item[0])}
print(np.product([your_ticket[k] for k,v in seat_positions.items() if 'departure' in v])) # 366871907221
예제 #53
0
    def readData(self, mem, cIxToTarg=None, cIxToRaw=None, selRange=None, selRangeSz=None, outSize=None):

        if outSize is None:
            selRange[0] = -1
            selRange[1] = -1
            outSize = self.dataSize[0:2] + [len(mem)]
            selRangeSz = outSize
            cIxToTarg = range(0, selRangeSz[2])
            cIxToRaw = cIxToTarg
        else:
            if selRange[0] == list(range(0, int(self.dataSize[0]))):
                selRange[0] = -1
            if selRange[1] == list(range(0, int(self.dataSize[1]))):
                selRange[1] = -1
        out = np.zeros(outSize, dtype=np.complex)
        out = out.reshape(selRangeSz[0], selRangeSz[1], -1)

        if mem.size == 0:
            out = out.reshape(outSize)
            return

        cIxToTarg = self.cast2MinimalUint(cIxToTarg)

        szScanHeader = self.freadInfo['szScanHeader']
        readSize = self.freadInfo['sz']
        readShape = self.freadInfo['shape']
        readCut = self.freadInfo['cut']
        keepOS = list(range(int(self.NCol/4))) + list(range(int(self.NCol*3/4), int(self.NCol)))
        bRemoveOS = self.arg['removeOS']
        bIsReflected = self.IsReflected[cIxToRaw]
        bRegrid = self.flagRampSampRegrid and len(self.rampSampTrj)
        slicedata = self.slicePos[cIxToRaw, :].T

        # SRY store information about raw data correction
        bDoRawDataCorrect = self.arg['doRawDataCorrect']
        bIsRawDataCorrect = self.IsRawDataCorrect[cIxToRaw]
        isBrokenRead = False
        if bDoRawDataCorrect:
            rawDataCorrect = self.arg['rawDataCorrectionFactors']

        # MiV??: Raw data are read line-by-line in portions of 2xNColxNCha float32 points (2 for complex).
        # Computing and sorting(!) on these small portions is quite expensive, esp. when
        # it employs non-sequential memory paths. Examples are non-linear k-space acquisition
        # or reflected lines.
        # This can be sped up if slightly larger blocks of raw data are collected, first.
        # Whenever a block is full, we do all those operations and save it in the final "out" array.
        # What's a good block size? Depends on data size and machine (probably L2/L3/L4 cache sizes).
        # So...? Start with a small block, measure the time-per-line and double block size until
        # a minimum is found. Seems sufficiently robust to end up in a close-to-optimal size for every
        # machine and data.

        blockSz = 2 # size of blocks; must be 2^n; will be increased
        doLockblockSz = False # whether blockSZ should be left untouched
        tprev = float('inf') # previous time-per-line
        blockCtr = 0
        blockInit = np.full((int(readShape[0]), int(readShape[1]), blockSz), -np.inf)
        blockInit = blockInit.astype(np.complex)
        block = blockInit

        if bRegrid:
            v1 = list(range(selRangeSz[1]))
            v2 = list(range(blockSz))
            rsTrj = [self.rampSampTrj, v1, v2]
            trgTrj = np.linspace(start=min(self.rampSampTrj), stop=max(self.rampSampTrj), num=self.NCol)
            trgTrj = [trgTrj, v1, v2]

        # counter for proper scaling of averages/segments
        count_ave = np.zeros((1, 1, out.shape[2]))
        kMax = len(mem) # max loop index

        fid = self.fileopen()

        for k in range(kMax):
            # skip scan header
            fid.seek(int(mem[k] + szScanHeader), 0)

            raw = np.fromfile(fid, dtype=np.float32, count=int(np.product(readSize)))
            raw = raw.reshape(readSize, order='F').T

            # MiV??: With incomplete files fread() returns less than readSize points. The subsequent reshape will therefore error out.
            # We could check if len(raw) == np.prod(readSize), but people recommend exception handling for performance
            # reasons. Do it.
            try:
                raw_tmp = np.empty(raw.shape[:-1], dtype=np.complex)
                raw_tmp.real = raw[:,0]
                raw_tmp.imag = raw[:, 1]
                raw = raw_tmp.reshape(readShape, order='F')
            except:
                offset_bytes = mem[k] + szScanHeader
                raise Warning('An unexpected read error occurred at this byte offset: {0}. Will ignore this line and stop reading'.format(offset_bytes))

                # Reject this data fragment. To do so, init with the values of blockInit
                raw = np.full(shape=readShape, fill_value=-np.inf)
                isBrokenRead = true # remember it and bail out later

            block[:, :, blockCtr] = raw # fast serial storage in a cache array
            blockCtr = blockCtr + 1

            # Do expensive computations and reorderings on the gathered block.
            # Unfortunately, a lot of code is necessary, but that is executed much less
            # frequent, so its worthwhile for speed.
            # (was a MATLAB comment) TODO: Do *everything* block-by-block

            if (blockCtr == blockSz) or (k == kMax) or (isBrokenRead and blockCtr > 1):
                # remove MDH data from block:
                block = block[readCut,:,:]

                if bRegrid:
                    # correct for readout shifts
                    # the nco frequency is always scaled to the max.
                    # gradient amp and does account for ramp-sampling
                    ro_shift = self.calcOffcenterShiftRO(slicedata[:, k])
                    # TODO: come back here

                ix = list(range(1 + k - blockCtr, k + 1))

                if blockCtr != blockSz:
                    block = block[:, :, 0:blockCtr]

                if bRemoveOS: # remove oversampling in read
                    block = np.fft.ifft(block, axis=0)
                    block = np.fft.fft(block[keepOS, :, :], axis=0)

                if bDoRawDataCorrect and bIsRawDataCorrect[k]:
                    # SRY apply raw data correction if necessary
                    block = np.multiply(block, rawDataCorrect)

                isRefl = bIsReflected[ix]
                block[:, :, isRefl] = block[list(range(block.shape[0]-1, -1, -1)), :, :][:, :, isRefl]

                if (selRange[0] != -1) or (selRange[1] != -1):
                    block = block[selRange[0], selRange[1], :]

                I = np.argsort(cIxToTarg[ix])
                sortIdx = np.sort(cIxToTarg[ix])
                block = block[:, :, I] # reorder according to sorted target indices

                # Mark duplicate indices with 1; we'll have to treat them special for proper averaging
                # Bonus: The very first storage can be made much faster, because it's in-place.
                isDupe = np.array([False] + (np.diff(sortIdx)==0).tolist())

                idx1 = sortIdx[~isDupe] # acquired once in this block
                idxN = sortIdx[isDupe] # acquired multiple times

                count_ave[:, :, idx1] = count_ave[:, :, idx1] + 1

                if idxN.size == 0:
                    # no duplicates
                    if np.all(count_ave[:, :, idx1] == 1): # first acquisition of this line
                        out[:, :, idx1] = block

                    else:
                        out[:, :, idx1] = out[:, :, idx1] + block

                else:
                    out[:, :, idx1] = out[:, :, idx1] + block[:, :, ~isDupe]

                    block = block[:, :, isDupe]
                    for n in range(len(idxN)):
                        out[:, :, idxN[n]] = out[:, :, idxN[n]] + block[:, :, n]
                        count_ave[:, :, idxN[n]] = count_ave[:, :, idxN[n]] + 1

                # At the first few iterations, evaluate the spent time-per-line and decide
                # what to do with the block size.
                if ~doLockblockSz: # TODO: if speed problem -> refer to this portion of MATLAB code
                    # regression; reset size and lock it
                    blockSz = max(int(blockSz/2), 1)
                    blockInit = blockInit[:,:, 0:blockSz]
                    doLockblockSz = True

                    if bRegrid:
                        rsTrj[2] = list(range(blockSz))
                        trgTrj[2] = trgTrj[2]

                blockCtr = 0
                block = blockInit # reset to garbage

            if isBrokenRead:
                self.isBrokenFile = True
                break

        fid.close()

        # proper scaling (we don't want to sum our data but average it)
        # For large "out" bsxfun(@rdivide,out,count_ave) is incredibly faster than
        # bsxfun(@times,out,count_ave)!
        # @rdivide is also running in parallel, while @times is not. :-/

        if np.any(count_ave.reshape(-1, 1) > 1):
            count_ave = max(1, count_ave)
            out = np.divide(out, count_ave)

        out = out.reshape(outSize)
        out = out.squeeze()
        return out
예제 #54
0
    def plot_slice(self, cut_mapping, n=None):
        """Plot a 1D or 2D interpolated slice of a N-dimensional function.

        Parameters
        ----------
        cut_mapping : dict (int → float)
            for each fixed dimension the value, the other dimensions
            are interpolated. e.g. ``cut_mapping = {0: 1}``, so from
            dimension 0 ('x') to value 1.
        n : int
            the number of boxes in the interpolation grid along each axis
        """
        hv = ensure_holoviews()
        plot_dim = self.ndim - len(cut_mapping)
        if plot_dim == 1:
            if not self.data:
                return hv.Scatter([]) * hv.Path([])
            elif self.vdim > 1:
                raise NotImplementedError(
                    "multidimensional output not yet supported by `plot_slice`"
                )
            n = n or 201
            values = [
                cut_mapping.get(i, np.linspace(*self._bbox[i], n))
                for i in range(self.ndim)
            ]
            ind = next(i for i in range(self.ndim) if i not in cut_mapping)
            x = values[ind]
            y = self._ip()(*values)
            p = hv.Path((x, y))

            # Plot with 5% margins such that the boundary points are visible
            margin = 0.05 / self._transform[ind, ind]
            plot_bounds = (x.min() - margin, x.max() + margin)
            return p.redim(x=dict(range=plot_bounds))

        elif plot_dim == 2:
            if self.vdim > 1:
                raise NotImplementedError(
                    "holoviews currently does not support 3D surface plots in bokeh."
                )
            if n is None:
                # Calculate how many grid points are needed.
                # factor from A=√3/4 * a² (equilateral triangle)
                scale_factor = np.product(np.diag(self._transform))
                a_sq = np.sqrt(np.min(self.tri.volumes()) * scale_factor)
                n = max(10, int(0.658 / a_sq))

            xs = ys = np.linspace(0, 1, n)
            xys = [xs[:, None], ys[None, :]]
            values = [
                cut_mapping[i] if i in cut_mapping else xys.pop(0) *
                (b[1] - b[0]) + b[0] for i, b in enumerate(self._bbox)
            ]

            lbrt = [
                b for i, b in enumerate(self._bbox) if i not in cut_mapping
            ]
            lbrt = np.reshape(lbrt, (2, 2)).T.flatten().tolist()

            if len(self.data) >= 4:
                z = self._ip()(*values).squeeze()
                im = hv.Image(np.rot90(z), bounds=lbrt)
            else:
                im = hv.Image([], bounds=lbrt)

            return im.opts(style=dict(cmap="viridis"))
        else:
            raise ValueError("Only 1 or 2-dimensional plots can be generated.")
예제 #55
0
 def _get_final_layer(self, in_shape):
     linear = nn.Linear(int(np.product(in_shape)), int(np.product(self.out_shape)))
     return nn.Sequential(nn.Flatten(), linear)
예제 #56
0
def load_image_sequence(ImageSequence,all_images,frames=None,monochrome=False,\
                        dtype=None,use_magick=True):
    
    # Attempt setup of parallel file I/O.
    if ImageSequence.IO_threads > 1:
        try:
            from joblib import Parallel, delayed
        except ImportError:
            print("Error, joblib is not installed. Multithreaded file I/O will be disabled.")
            ImageSequence.IO_threads=1

    # Attempt to import PythonMagick if requested
    if use_magick:
        try:
            from PythonMagick import Image
            imageHandler=__magick_load_wrapper__
        except ImportError:
            print("PythonMagick library is not installed.")
            print("Falling back to Pillow (fewer file formats supported)")
            use_magick = False
    
    # Attempt to import Pillow if requested
    if not use_magick:
        try:
            from PIL import Image
            imageHandler=__pil_load_wrapper__
        except ImportError:
            raise ImportError("Pillow library is not installed. Try `pip install pillow'")


    # Reduce range of frames?
    if frames is not None:
        if len(all_images)>frames[1]:
            all_images=all_images[frames[0]:frames[1]]

    # Use first image to set dtype and size.
    # Read with Pillow?
    if not use_magick:
        try:
            I0 = Image.open(all_images[0])
            ImageSequence.mode = I0.mode
            #print('\t',I0)  # Debugging, check PIL mode
            I0_dtype = np.array(I0).dtype
            if dtype is None: ImageSequence.dtype = I0_dtype
            else: ImageSequence.dtype=dtype
            print("\tPIL thinks the bit depth is %s" % I0_dtype)
            bits_per_pixel = np.dtype(I0_dtype).itemsize*8
            ImageSequence.width = I0.width
            ImageSequence.height = I0.height
        except IOError as e:
            if os.path.isfile(all_images[0]) and not use_magick:
                # Format unrecognized.
                print("\tThe image format was not recognized by PIL! Trying ImageMagick")
                use_magick=True
                try:
                    from PythonMagick import Image
                    imageHandler=__magick_load_wrapper__
                except ImportError:
                    print("PythonMagick library is not installed. Cannot load image sequence.")
                    return
            else:
                # Possible filesystem error
                raise IOError("File %s could not be opened." % all_images[0])

    # Read with PythonMagick?
    # Seperate 'if' block allows PIL failure to then try this one.
    if use_magick:
        try:
            I0 = Image(all_images[0])
            ImageSequence.width = I0.size().width()
            ImageSequence.height = I0.size().height()
            ImageSequence.mode = str(I0.colorSpace())
            # Source bit depth
            bits_per_pixel = I0.depth()
            if bits_per_pixel==8: I0_dtype=np.uint8
            elif bits_per_pixel==12: I0_dtype='uint12'
            elif bits_per_pixel==16: I0_dtype=np.uint16
            elif bits_per_pixel==32: I0_dtype=np.uint32
            elif bits_per_pixel==64: I0_dtype=np.uint64
            else: raise ValueError
            print("\tPythonMagick thinks the bit depth is %s" % I0_dtype)
            # Determine minimum acceptable destination bit depth
            # (unless overridden by user kwargs)
            if dtype is None:
                if type(I0_dtype) is type: ImageSequence.dtype=I0_dtype
                elif I0_dtype == 'uint12': ImageSequence.dtype=np.uint16
                else: raise ValueError
            else:
                ImageSequence.dtype=dtype
        except IOError as e:
            # Possible filesystem error
            raise IOError("File %s could not be opened." % all_images[0])
        except ValueError:
            # bad bit depth / not supported
            raise ValueError("Bit depth %i for source image not currently supported!" % bits)

    if not monochrome and not 'RGB' in ImageSequence.mode:
        # Force mono flag if there is no colour data
        monochrome=True

    if monochrome and (dtype is None):
        # If mono flag set and dtype is autodetected,
        # allow more space for colour information in mono channel
        # so there's no overflowing when we do summation.
        ImageSequence.increase_dtype()

    # Number of parallel workers.
    n_jobs = int(ImageSequence.IO_threads)
    if n_jobs > len(all_images): n_jobs = len(all_images)
    if n_jobs <= 1: n_jobs = 1
    
    # Chunk size for parallel I/O.
    # To get a single task run on each processor we would set the chunk size
    b=len(all_images)/n_jobs
    # However this can produce tasks that have to each have a very large amount of RAM
    # and return a very large array to the parent, which could generate IOError: bad message length.
    # On macOS 10.13.6, I get this error when the child returns more than 300 MB.
    # Therefore we will reduce the chunk size if it is too large.
    if b>10*n_jobs: b=int(b/10)
    # Ensure b>=1!
    if b<1: b=1
    
    print("\tReading files into memory...")
    t0=time.time()
    if n_jobs > 1:
        # Read image sequence in parallel
        if ImageSequence.Joblib_Verbosity >= 1: print("%i tasks on %i processors" % (len(all_images)/b,n_jobs))
        L = Parallel(n_jobs=n_jobs,verbose=ImageSequence.Joblib_Verbosity)(delayed(imageHandler)(all_images[a:a+int(b)],ImageSequence.width,ImageSequence.height,ImageSequence.dtype,I0_dtype,monochrome) for a in range(0,len(all_images),int(b)))
    else:
        # Plain list. might have to rearrange this if it consumes too much RAM.
        L = [imageHandler(all_images[a:a+int(b)],ImageSequence.width,ImageSequence.height,ImageSequence.dtype,\
                 I0_dtype,monochrome) for a in range(0,len(all_images),int(b))]
    
    # Repack list of results into a single numpy array.
    if len(L[0].shape) == 3:
        # monochrome arrays
        ImageSequence.arr = np.dstack(L)
        ImageSequence.arr=ImageSequence.arr.swapaxes(2,0).swapaxes(1,2)
    else:
        # colour arrays
        ImageSequence.arr = np.concatenate(L,axis=3)
        ImageSequence.arr = np.rollaxis(np.rollaxis(ImageSequence.arr,3,0),3,1)

    ImageSequence.src_bpp = bits_per_pixel
    read_nbytes = bits_per_pixel * np.product(ImageSequence.arr.shape) / 8
    print('Read %.1f MiB in %.1f sec' % (read_nbytes/1048576,time.time()-t0))

    return
예제 #57
0
    def paint(self):
        self.setupGLState()

        self.parseMeshData()

        if self.opts['drawFaces']:
            with self.shader():
                verts = self.vertexes
                norms = self.normals
                color = self.colors
                faces = self.faces
                if verts is None:
                    return
                glEnableClientState(GL_VERTEX_ARRAY)
                try:
                    glVertexPointerf(verts)

                    if self.colors is None:
                        color = self.opts['color']
                        if isinstance(color, QtGui.QColor):
                            glColor4f(*color.getRgbF())
                        else:
                            glColor4f(*color)
                    else:
                        glEnableClientState(GL_COLOR_ARRAY)
                        glColorPointerf(color)

                    if norms is not None:
                        glEnableClientState(GL_NORMAL_ARRAY)
                        glNormalPointerf(norms)

                    if faces is None:
                        glDrawArrays(GL_TRIANGLES, 0,
                                     np.product(verts.shape[:-1]))
                    else:
                        faces = faces.astype(np.uint32).flatten()
                        glDrawElements(GL_TRIANGLES, faces.shape[0],
                                       GL_UNSIGNED_INT, faces)
                finally:
                    glDisableClientState(GL_NORMAL_ARRAY)
                    glDisableClientState(GL_VERTEX_ARRAY)
                    glDisableClientState(GL_COLOR_ARRAY)

        if self.opts['drawEdges']:
            verts = self.edgeVerts
            edges = self.edges
            glEnableClientState(GL_VERTEX_ARRAY)
            try:
                glVertexPointerf(verts)

                if self.edgeColors is None:
                    color = self.opts['edgeColor']
                    if isinstance(color, QtGui.QColor):
                        glColor4f(*color.getRgbF())
                    else:
                        glColor4f(*color)
                else:
                    glEnableClientState(GL_COLOR_ARRAY)
                    glColorPointerf(color)
                edges = edges.flatten()
                glDrawElements(GL_LINES, edges.shape[0], GL_UNSIGNED_INT,
                               edges)
            finally:
                glDisableClientState(GL_VERTEX_ARRAY)
                glDisableClientState(GL_COLOR_ARRAY)
예제 #58
0
    def __call__(self, *args, **kwargs):
        if 'shape' in kwargs.keys():
            shape = kwargs['shape']
        else:
            shape = None
        if 'squeeze' in kwargs.keys():
            bSqueeze = kwargs['squeeze']
        else:
            bSqueeze = False

        selRange, selRangeSz, outSize = self.calcRange(bSqueeze, shape=shape)

        # calculate page table (virtual to physical addresses)
        # this is now done every time, i.e. result is no longer saved in
        # a property - slower but safer (and easier to keep track of updates)
        ixToRaw, ixToTarget = self.calcIndices()
        tmp = np.reshape(list(range(np.product(self.fullSize[2:]).astype(np.int))), np.array(self.fullSize[2:], dtype=np.int), order='F')
        tmp = tmp.flatten(order='F')
        tmp = tmp[:selRangeSz[2:].prod()]
        tmp = tmp.reshape(selRangeSz[2:])
        # tmp = tmp[tuple(selRange[2:])] # Doesn't always work, so instead replaced with above 3 steps
        # tmp = tmp.squeeze()
        ixToRaw = ixToRaw[tmp]

        # delete all entries that point to zero (the "NULL"-pointer)
        notAcquired = (ixToRaw == 0)
        ixToRaw = ixToRaw[~notAcquired]
        ixToRaw = ixToRaw.astype(np.int)

        # calculate ixToTarg for possibly smaller, shifted + segmented
        # target matrix:
        cIx = np.ones((14, len(ixToRaw)))
        if ~self.flagAverageDim[2]:
            cIx[0, :] = self.Lin[ixToRaw] - self.skipLin
        if ~self.flagAverageDim[3]:
            cIx[1, :] = self.Par[ixToRaw] - self.skipPar
        if ~self.flagAverageDim[4]:
            cIx[2, :] = self.Sli[ixToRaw]
        if ~self.flagAverageDim[5]:
            cIx[3, :] = self.Ave[ixToRaw]
        if ~self.flagAverageDim[6]:
            cIx[4, :] = self.Phs[ixToRaw]
        if ~self.flagAverageDim[7]:
            cIx[5, :] = self.Eco[ixToRaw]
        if ~self.flagAverageDim[8]:
            cIx[6, :] = self.Rep[ixToRaw]
        if ~self.flagAverageDim[9]:
            cIx[7, :] = self.Set[ixToRaw]
        if ~self.flagAverageDim[10]:
            cIx[8, :] = self.Seg[ixToRaw]
        if ~self.flagAverageDim[11]:
            cIx[9, :] = self.Ida[ixToRaw]
        if ~self.flagAverageDim[12]:
            cIx[10, :] = self.Idb[ixToRaw]
        if ~self.flagAverageDim[13]:
            cIx[11, :] = self.Idc[ixToRaw]
        if ~self.flagAverageDim[14]:
            cIx[12, :] = self.Idd[ixToRaw]
        if ~self.flagAverageDim[15]:
            cIx[13, :] = self.Ide[ixToRaw]

        # make sure that indices fit inside selection range
        for k in range(2, len(selRange)):
            tmp = cIx[k - 2,:]
            for L in range(0, len(selRange[k])):
                cIx[k - 2, tmp==selRange[k][L]] = L

        sz = selRangeSz[2:]
        ixToTarg = self.sub2ind_double(sz, cIx[0, :], cIx[1, :], cIx[2, :],
                                       cIx[3, :], cIx[4, :], cIx[5, :], cIx[6, :],
                                       cIx[7, :], cIx[8, :], cIx[9, :], cIx[10, :],
                                       cIx[11, :], cIx[12, :], cIx[13, :])

        mem = self.memPos[ixToRaw]
        # sort mem for quicker access, sort cIxToTarg/Raw accordingly
        ix = np.argsort(mem)
        mem = np.sort(mem)
        ixToTarg = ixToTarg[ix]
        ixToRaw = ixToRaw[ix]

        varargout = self.readData(mem,ixToTarg,ixToRaw,selRange,selRangeSz,outSize)
        return varargout
예제 #59
0
 def state_nbits(self):
     return np.product(self.zero_state()[0].shape) + np.product(
         self.zero_state()[1].shape)
예제 #60
0
def RGB_colourspace_volume_MonteCarlo(
        colourspace,
        samples=10e6,
        limits=np.array([[0, 100], [-150, 150], [-150, 150]]),
        illuminant_Lab=ILLUMINANTS.get(
            'CIE 1931 2 Degree Standard Observer').get('D50'),
        chromatic_adaptation_method='CAT02',
        random_generator=random_triplet_generator,
        random_state=None,
        processes=None):
    """
    Performs given *RGB* colourspace volume computation using *Monte Carlo*
    method and multiprocessing.

    Parameters
    ----------
    colourspace : RGB_Colourspace
        *RGB* colourspace to compute the volume of.
    samples : numeric, optional
        Samples count.
    limits : array_like, optional
        *Lab* colourspace volume.
    illuminant_Lab : array_like, optional
        *Lab* colourspace *illuminant* chromaticity coordinates.
    chromatic_adaptation_method : unicode, optional
        **{'CAT02', 'XYZ Scaling', 'Von Kries', 'Bradford', 'Sharp',
        'Fairchild, 'CMCCAT97', 'CMCCAT2000', 'CAT02_BRILL_CAT', 'Bianco',
        'Bianco PC'}**,
        *Chromatic adaptation* method.
    random_generator : generator, optional
        Random triplet generator providing the random samples within the *Lab*
        colourspace volume.
    random_state : RandomState, optional
        Mersenne Twister pseudo-random number generator to use in the random
        number generator.
    processes : integer, optional
        Processes count, default to :func:`multiprocessing.cpu_count`
        definition.

    Returns
    -------
    float
        *RGB* colourspace volume.

    Notes
    -----
    The doctest is assuming that :func:`np.random.RandomState` definition will
    return the same sequence no matter which *OS* or *Python* version is used.
    There is however no formal promise about the *prng* sequence
    reproducibility of either *Python* or *Numpy* implementations: Laurent.
    (2012). Reproducibility of python pseudo-random numbers across systems and
    versions? Retrieved January 20, 2015, from http://stackoverflow.com/\
questions/8786084/reproducibility-of-python-pseudo-random-numbers-\
across-systems-and-versions

    Examples
    --------
    >>> from colour import sRGB_COLOURSPACE as sRGB
    >>> prng = np.random.RandomState(2)
    >>> processes = 1
    >>> RGB_colourspace_volume_MonteCarlo(  # doctest: +ELLIPSIS
    ...     sRGB, 10e3, random_state=prng, processes=processes)
    859...
    """

    cpu_count = processes if processes else multiprocessing.cpu_count()
    pool = multiprocessing.Pool(processes=cpu_count)

    process_samples = int(np.round(samples / cpu_count))

    arguments = [
        colourspace, process_samples, limits, illuminant_Lab,
        chromatic_adaptation_method, random_generator, random_state
    ]

    results = pool.map(_wrapper_RGB_colourspace_volume_MonteCarlo,
                       [arguments for _ in range(cpu_count)])

    Lab_volume = np.product([np.sum(np.abs(x)) for x in limits])

    return Lab_volume * np.sum(results) / (process_samples * cpu_count)