コード例 #1
0
ファイル: geods.py プロジェクト: TheCapsLock/YunoSeeMe
def compute_offset(transform, ds_x, ds_y):
    """
    Compute the image offset based on the projected coordinates and the transformation.

    The transformation performed is the invert transformation that is described on the transform object.

    The resulting offsets are floored to int.

    Results are valid as long as the transformation is linear (tranform[2] and tranform[4] are 0).

    :param transform: the transformation obtained from Dataset::GetGeoTransform.
    :param ds_x: the projected x-coordinate
    :param ds_y: the projected y-coordinate
    :return: the couple of offsets (x, y)
    """
    # TODO is this exception really useful?
    if transform is None:
        raise Exception("Can only handle 'Affine GeoTransforms'")

    # TODO tranform[2] and tranform[4] should be checked as equal to 0 (unless raise an error)
    # http://www.gdal.org/classGDALDataset.html#af9593cc241e7d140f5f3c4798a43a668
    origin_x = transform[0]
    origin_y = transform[3]
    pixel_width = transform[1]
    pixel_height = transform[5]

    # do the inverse geo transform, flooring to the int
    # TODO better approximation than flooring to the int ?
    offset_x = np.floor_divide(ds_x - origin_x, pixel_width).astype(int)
    offset_y = np.floor_divide(ds_y - origin_y, pixel_height).astype(int)

    return offset_x, offset_y
コード例 #2
0
ファイル: _colors.py プロジェクト: wright-group/WrightTools
def get_color_cycle(n, cmap="rainbow", rotations=3):
    """Get a list of RGBA colors following a colormap.

    Useful for plotting lots of elements, keeping the color of each unique.

    Parameters
    ----------
    n : integer
        The number of colors to return.
    cmap : string (optional)
        The colormap to use in the cycle. Default is rainbow.
    rotations : integer (optional)
        The number of times to repeat the colormap over the cycle. Default is 3.

    Returns
    -------
    list
        List of RGBA lists.
    """
    cmap = colormaps[cmap]
    if np.mod(n, rotations) == 0:
        per = np.floor_divide(n, rotations)
    else:
        per = np.floor_divide(n, rotations) + 1
    vals = list(np.linspace(0, 1, per))
    vals = vals * rotations
    vals = vals[:n]
    out = cmap(vals)
    return out
コード例 #3
0
ファイル: test_umath.py プロジェクト: jarrodmillman/numpy
 def test_floor_division_complex(self):
     # check that implementation is correct
     msg = "Complex floor division implementation check"
     x = np.array([0.9 + 1j, -0.1 + 1j, 0.9 + 0.5 * 1j, 0.9 + 2.0 * 1j], dtype=np.complex128)
     y = np.array([0.0, -1.0, 0.0, 0.0], dtype=np.complex128)
     assert_equal(np.floor_divide(x ** 2, x), y, err_msg=msg)
     # check overflow, underflow
     msg = "Complex floor division overflow/underflow check"
     x = np.array([1.0e110, 1.0e-110], dtype=np.complex128)
     y = np.floor_divide(x ** 2, x)
     assert_equal(y, [1.0e110, 0], err_msg=msg)
コード例 #4
0
ファイル: dtype.py プロジェクト: benlongo/scikit-image
 def _scale(a, n, m, copy=True):
     # Scale unsigned/positive integers from n to m bits
     # Numbers can be represented exactly only if m is a multiple of n
     # Output array is of same kind as input.
     kind = a.dtype.kind
     if n > m and a.max() < 2 ** m:
         mnew = int(np.ceil(m / 2) * 2)
         if mnew > m:
             dtype = "int%s" % mnew
         else:
             dtype = "uint%s" % mnew
         n = int(np.ceil(n / 2) * 2)
         msg = ("Downcasting %s to %s without scaling because max "
                "value %s fits in %s" % (a.dtype, dtype, a.max(), dtype))
         warn(msg)
         return a.astype(_dtype2(kind, m))
     elif n == m:
         return a.copy() if copy else a
     elif n > m:
         # downscale with precision loss
         prec_loss()
         if copy:
             b = np.empty(a.shape, _dtype2(kind, m))
             np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
                             casting='unsafe')
             return b
         else:
             a //= 2**(n - m)
             return a
     elif m % n == 0:
         # exact upscale to a multiple of n bits
         if copy:
             b = np.empty(a.shape, _dtype2(kind, m))
             np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
             return b
         else:
             a = np.array(a, _dtype2(kind, m, a.dtype.itemsize), copy=False)
             a *= (2**m - 1) // (2**n - 1)
             return a
     else:
         # upscale to a multiple of n bits,
         # then downscale with precision loss
         prec_loss()
         o = (m // n + 1) * n
         if copy:
             b = np.empty(a.shape, _dtype2(kind, o))
             np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
             b //= 2**(o - m)
             return b
         else:
             a = np.array(a, _dtype2(kind, o, a.dtype.itemsize), copy=False)
             a *= (2**o - 1) // (2**n - 1)
             a //= 2**(o - m)
             return a
コード例 #5
0
ファイル: test_lib.py プロジェクト: FilipeMaia/afnumpy
def test_floor_divide():
    a = afnumpy.random.random((2,3))
    b = numpy.array(a)
    fassert(afnumpy.floor_divide(a,a), numpy.floor_divide(b,b))
    a = afnumpy.array(2)
    b = numpy.array(a)
    ao = afnumpy.array(0)
    bo = numpy.array(0)
    fassert(afnumpy.floor_divide(a,a), numpy.floor_divide(b,b))
    fassert(afnumpy.floor_divide(a,a, out=ao), numpy.floor_divide(b,b, out = bo))
    fassert(ao, bo)
コード例 #6
0
def main():
    """
    universal function(ufunc)
    Function that returns the result of the operation of each element.
    Arg is ndarray, returns a ndarray
    """
    arr = np.arange(10)

    print_headline('arg is ndarray')
    print 'integer'
    print np.sqrt(arr)
    print np.exp(arr)
    print np.square(arr)
    print np.log10(arr)

    print ''
    print 'float'
    arr = np.array([-2, -1.6, -1.4, 0, 1.4, 1.6, 2])
    print np.sign(arr)
    print np.ceil(arr)
    print np.floor(arr)
    print np.rint(arr) # round
    print np.sin(arr) # cos, tan, arcxxx
    print np.logical_not([arr >= 1]) # and, or, xor

    print ''
    print 'NaN, inf'
    nan_inf = np.array([0, 1, np.NaN, np.inf, -np.inf])
    print nan_inf
    print np.isnan(nan_inf)
    print np.isinf(nan_inf)

    print_headline('args are ndarray')
    x = np.arange(8)
    y = np.arange(8)[::-1]
    print x, y
    print np.add(x, y)
    print np.subtract(x, y)
    print np.multiply(x, y)
    print np.divide(x, y)
    print np.floor_divide(x, y)
    print np.power(x, y)
    print np.mod(x, y)
    print np.maximum(x, y)
    print np.minimum(x, y)
    print np.greater_equal(x, y)

    print_headline('returns are ndarray')
    arr = np.random.randn(10)
    # divide integer ndarray and float ndarray
    modf = np.modf(arr)
    print arr
    print modf[0]
    print modf[1]
コード例 #7
0
ファイル: main.py プロジェクト: eric-wieser/codejam
def solve(groups, P):
    groups = np.array(groups)

    ms = np.sum(groups % P == np.arange(P)[:,None], axis=1)

    if P == 2:
        U = np.array([
            [2]
        ])
    elif P == 3:
        U = np.array([
            #1, 2
            [3, 0],
            [0, 3],
            [1, 1]
        ])
    elif P == 4:
        U = np.array([
            #1, 2, 3
            [4, 0, 0],
            [0, 2, 0],
            [0, 0, 4],
            [2, 1, 0],
            [1, 0, 1],
            [0, 1, 2]
        ])
    else:
        raise ValueError

    m = ms[1:]

    mtot = np.sum(m)

    k_max = np.ones(U.shape, int) * mtot
    np.floor_divide(m, U, out=k_max, where=U!=0)
    k_max = k_max.min(axis=1)

    ks = np.indices(k_max + 1).reshape(U.shape[0], -1)

    n_groups = U.T @ ks  #[size,ki]

    invalid = np.any(n_groups > m[:,None], axis=0, keepdims=True)
    happy = ks.sum(axis=0, keepdims=True)
    happy[invalid] = 0

    best_i = np.argmax(happy.ravel())
    best_happy = happy[:,best_i].squeeze()

    n = ms[0] + best_happy

    if n_groups[:,best_i].sum() != mtot:
        n += 1

    return n
コード例 #8
0
ファイル: submitv1.py プロジェクト: kanhua/LSHTC
def simscore(trainMtx,testMtx,sparseMode=True,split=2,split2=2):
      
      if sparseMode==True:    
            scorevec=np.zeros((testMtx.shape[0],trainMtx.shape[0]));
            maxveclength=np.min([testMtx.shape[1],trainMtx.shape[1]]);
          #setup an array for string the norm of each row
            rownorm=np.zeros((trainMtx.shape[0],));        
            for i in range(trainMtx.shape[0]):
                w=trainMtx[i,:];
                rownorm[i]=np.sqrt(w.dot(w.T))[0,0];
            
            for i in range(testMtx.shape[0]):
                w=testMtx[i,:];
                wnorm=np.sqrt(w.dot(w.T))[0,0];
                print "calculating",i;
                for j in range(trainMtx.shape[0]):
                    v=trainMtx[j,:];
                    vnorm=np.sqrt(v.dot(v.T))[0,0]
                    ip=w[0,0:maxveclength].multiply(v[0,0:maxveclength])/(wnorm*vnorm);
                    scorevec[i,j]=ip[0,0];
           
      else:
          #setup an array for storing the results       
          scorevec=np.zeros((testMtx.shape[0],trainMtx.shape[0]));  
             
          if split==0:
                prd=sparsemtxproduct(trainMtx, testMtx);
                scorevec=np.transpose(np.asarray(prd));
          else:
          
                #calcualte the split
                bitesize=np.floor_divide(trainMtx.shape[0],split);
                remem=np.mod(trainMtx.shape[0],split);
                
                bitesize2=np.floor_divide(testMtx.shape[0],split2);
                remem2=np.mod(testMtx.shape[0],split2);
                
                #setup an array for string the norm of each row
                
                
                for i in range(split+1):
                      for j in range(split2+1):
                        
                        startidx,endidx=getindex(i,split,bitesize,remem);
                        startidx2,endidx2=getindex(j,split2,bitesize2,remem2);
                              
                        prd=mtxproduct(trainMtx[startidx:endidx,:], testMtx[startidx2:endidx2,:])
                        scorevec[startidx2:endidx2,startidx:endidx]=np.transpose(np.asarray(prd));
                        del prd
                         
                                    
      return scorevec;
コード例 #9
0
def permute_sign_flip(n, samples=10000, seed=0):
    """Iterate over indices for ``samples`` permutations of the data

    Parameters
    ----------
    n : int
        Number of cases.
    samples : int
        Number of samples to yield. If < 0, all possible permutations are
        performed.
    seed : None | int
        Seed the random state of the randomization module (:mod:`random`) to
        make replication possible. None to skip seeding (default 0).

    Returns
    -------
    iterator over sign : array
        Iterate over sign flip permutations (``sign`` is the same object but
        its content modified in every iteration).

    Notes
    -----
    Sign flip of each element is encoded in successive bits. These bits are
    recoded as integer.
    """
    n = int(n)
    if seed is not None:
        random.seed(seed)

    # determine possible number of permutations
    n_perm = 2 ** n
    if n > 62:
        raise NotImplementedError("Too many cases for sign permutation "
                                  "without repetition")
    if samples < 0:
        # do all permutations
        sample_sequences = xrange(1, n_perm)
    else:
        # random resampling
        sample_sequences = random.sample(xrange(1, n_perm), samples)

    sign = np.empty(n, np.int8)
    mult = 2 ** np.arange(n, dtype=np.int64)
    buffer_ = np.empty(n, dtype=np.int64)
    choice = np.array([1, -1], np.int8)
    for i in sample_sequences:
        np.floor_divide(i, mult, buffer_, dtype=np.int64)
        buffer_ %= 2
        yield np.choose(buffer_, choice, sign)
コード例 #10
0
ファイル: wrapper_test.py プロジェクト: mthrok/luchador
    def test_floordiv_numbers(self):
        """Tensor // number is correct elementwise"""
        constant, shape = 10., (3, 5)
        with nn.variable_scope(self.get_scope()):
            tensor1 = fixture.create_ones_tensor(shape, dtype='float32')
            tensor2 = tensor1 // constant
            tensor3 = constant // tensor1

        session = nn.Session()

        val1, val2, val3 = session.run(
            outputs=[tensor1, tensor2, tensor3],
        )
        np.testing.assert_equal(np.floor_divide(val1, constant), val2)
        np.testing.assert_equal(np.floor_divide(constant, val1), val3)
コード例 #11
0
ファイル: grid_1D_ms.py プロジェクト: fdoperezi/becca
 def step(self, action): 
     """
     Advance the world by one time step
     """
     self.action = action.ravel()
     self.action[np.nonzero(self.action)] = 1.
     self.timestep += 1 
     energy = self.action[0] + self.action[1]
     self.world_state += self.action[0] - self.action[1]
     # Occasionally add a perturbation to the action to knock it 
     # into a different state 
     if np.random.random_sample() < self.JUMP_FRACTION:
         self.world_state = self.num_sensors * np.random.random_sample()
     # Ensure that the world state falls between 0 and 9
     self.world_state -= self.num_sensors * np.floor_divide(
             self.world_state, self.num_sensors)
     self.simple_state = int(np.floor(self.world_state))
     # TODO do this more elegantly
     if self.simple_state == 9:
         self.simple_state = 0
     # Assign sensors as zeros or ones. 
     # Represent the presence or absence of the current position in the bin.
     sensors = np.zeros(self.num_sensors)
     sensors[self.simple_state] = 1
     # Assign reward based on the current state 
     reward = sensors[8] * -1.
     reward += sensors[3] 
     # Punish actions just a little 
     reward -= energy * self.ENERGY_COST
     reward = np.max(reward, -1)
     return sensors, reward
コード例 #12
0
ファイル: matrix.py プロジェクト: Yevs/FuturealmsTest
def get_min_coords(face):
    """
    returns coordinates of minimum element in face (2-dimensional list)
    """

    min_index = np.argmin(face)  # flattened
    return np.floor_divide(min_index, face.shape[0]), min_index % face.shape[0]
コード例 #13
0
ファイル: cov.py プロジェクト: hpc4cmb/toast
    def test_distpix_init(self):
        # make a simple pointing matrix
        pointing = OpPointingHpix(nside=self.map_nside, nest=True, mode="IQU",
            hwprpm=self.hwprpm)
        pointing.exec(self.data)

        # get locally hit pixels
        lc = OpLocalPixels()
        localpix = lc.exec(self.data)

        # find the locally hit submaps.
        localsm = np.unique(np.floor_divide(localpix, self.subnpix))

        # construct a distributed map to store the covariance and hits

        invnpp = DistPixels(comm=self.data.comm.comm_world, size=self.sim_npix,
            nnz=6, dtype=np.float64, submap=self.subnpix, local=localsm)

        invnpp2 = DistPixels(comm=self.data.comm.comm_world,
            size=self.sim_npix, nnz=6, dtype=np.float64, submap=self.subnpix,
            localpix=localpix)

        nt.assert_equal( invnpp.local, invnpp2.local )

        return
コード例 #14
0
ファイル: rounds.py プロジェクト: ut-ras/venus
def solve_rounds(pixels, max_pixels_per_can=100):
	output = []

	pixels = np.array(pixels)
	## "Straining" pixels by value
	picture_mod   = pixels.shape[0]

	pixels = pixels.reshape(pixels.size, 1)
	unique = np.unique(pixels)

	for unique_value in unique:

		print("finding short path for label " + str(unique_value))

		filtered_idx = np.where(pixels == unique_value)

		x_coords = np.remainder(filtered_idx[0], picture_mod)
		y_coords = np.floor_divide(filtered_idx[0], picture_mod)
		coords   = np.transpose([x_coords, y_coords])

		## Ordering pixels

		ordered = np.array(find_short_path(coords))
		order_s = ordered.shape[0]

		num_divisions = int(order_s/max_pixels_per_can) + 1

		if num_divisions > 0:
			split_points  = [i*max_pixels_per_can for i in range(num_divisions)]
			output.append([unique_value] + np.array_split(ordered, split_points)[1:])
		else:
			output.append([unique_value] + ordered)

	return output
コード例 #15
0
ファイル: tile_coding.py プロジェクト: rldotai/flib
    def apply(self, array):
        """
        Map the input array to its tile coding representation.

        Essentially, this proceeds by first getting the integer coordinates of
        the input array (subject to scaling), then by offsetting the 
        coordinates according to the displacement vector for each tiling.
        Then, the displaced coordinates are hashed using `hfunc`, and the 
        resulting hashed values are summed modulo `n_tiles` to produce the 
        indices of the active tiles to be used as features.

        Args:
            array (np.ndarray): The array to be tiled.
                Must be of length `n_input`, or else an exception is raised.

        Returns:
            ret (np.ndarray): An array of length `n_output`, whose entries 
                correspond to the indices of the active tiles.
        """
        if len(array) != self.n_input:
            raise ValueError("Incompatible array with length", len(array))
        x = np.floor_divide(array, self.scale).astype(np.int)
        v = x - ((x - self.dmat) % self.n_output)
        a = np.apply_along_axis(self.hfunc, axis=0, arr=v)
        ret = np.sum(a, axis=1) % self.n_tiles
        return ret 
コード例 #16
0
ファイル: timeticker.py プロジェクト: aerler/pygeode
  def get_tick_prior(self, val):
  # {{{
    ''' get_tick_prior(val) - returns the date of the first tick prior to the
    date represented by val. If a tick lies on val, it returns the date of the
    previous tick.'''

    def unpack(dt): return dt.get('year', 1), dt.get('month', 1), dt.get('day', 1), dt.get('hour', 0), dt.get('minute', 0), dt.get('second', 0)
    def pack(yr, mn, dy, hr, mi, sc): return {'year':yr, 'month':mn, 'day':dy, 'hour':hr, 'minute':mi, 'second':sc}

    dt = self._taxis.val_as_date(val, allfields=True)
    yr, mn, dy, hr, mi, sc = unpack(dt)

    if val > self._taxis.date_as_val(pack(yr, mn, dy, hr, mi, sc)): 
      yr, mn, dy, hr, mi, sc = unpack(tu.wrapdate(self._taxis, pack(yr, mn, dy, hr, mi, sc+1), allfields=True))

    # Find first hour on given multiple prior to the given hour
    from numpy import floor_divide
    sc = floor_divide(sc - 1, self.mult) * self.mult

    # If we've wrapped, decrement the year
    d = tu.wrapdate(self._taxis, pack(yr, mn, dy, hr, mi, sc), allfields=True)
    d1 = tu.wrapdate(self._taxis, pack(yr, mn, dy, hr, mi+1, 0), allfields=True)

    if tu.date_diff(self._taxis, d, d1, 'seconds') < self.mult / 2:
      return d1
    else:
      return d
コード例 #17
0
ファイル: shuffle.py プロジェクト: floriango/dask
def shuffle_group(df, col, stage, k, npartitions):
    """ Splits dataframe into groups

    The group is determined by their final partition, and which stage we are in
    in the shuffle
    """
    if col == '_partitions':
        ind = df[col]
    else:
        ind = hash_pandas_object(df[col], index=False)

    c = ind._values
    typ = np.min_scalar_type(npartitions * 2)

    npartitions, k, stage = [np.array(x, dtype=np.min_scalar_type(x))[()]
                             for x in [npartitions, k, stage]]

    c = np.mod(c, npartitions).astype(typ, copy=False)
    c = np.floor_divide(c, k ** stage, out=c)
    c = np.mod(c, k, out=c)

    indexer, locations = groupsort_indexer(c.astype(np.int64), k)
    df2 = df.take(indexer)
    locations = locations.cumsum()
    parts = [df2.iloc[a:b] for a, b in zip(locations[:-1], locations[1:])]

    return dict(zip(range(k), parts))
コード例 #18
0
ファイル: grid_1D_ms.py プロジェクト: eimirae/becca
 def step(self, action): 
     """ Take one time step through the world """
     self.action = action.ravel()
     self.timestep += 1 
     energy = self.action[0] + self.action[1]
     self.world_state += self.action[0] - self.action[1]
     # Occasionally add a perturbation to the action to knock it 
     # into a different state 
     if np.random.random_sample() < self.JUMP_FRACTION:
         self.world_state = self.num_sensors * np.random.random_sample()
     # Ensure that the world state falls between 0 and 9
     self.world_state -= self.num_sensors * np.floor_divide(
             self.world_state, self.num_sensors)
     self.simple_state = int(np.floor(self.world_state))
     # Assign sensors as zeros or ones. 
     # Represent the presence or absence of the current position in the bin.
     sensors = np.zeros(self.num_sensors)
     sensors[self.simple_state] = 1
     # Assign reward based on the current state 
     reward = sensors[8] * (-self.REWARD_MAGNITUDE)
     reward += sensors[3] * (self.REWARD_MAGNITUDE)
     # Punish actions just a little 
     reward -= energy * self.ENERGY_COST
     reward = np.max(reward, -1)
     return sensors, reward
コード例 #19
0
ファイル: timeticker.py プロジェクト: aerler/pygeode
  def get_tick_prior(self, val):
  # {{{
    ''' get_tick_prior(val) - returns the date of the first tick prior to the
    date represented by val. If a tick lies on val, it returns the date of the
    previous tick.'''

    def unpack(dt): return dt.get('year', 1), dt.get('month', 1), dt.get('day', 1)

    dt = self._taxis.val_as_date(val, allfields=True)
    yr, mn, dy = unpack(dt)

    if val > self._taxis.date_as_val({'year':yr, 'month':mn, 'day':dy}): 
      yr, mn, dy = unpack(tu.wrapdate(self._taxis, {'year':yr, 'month':mn, 'day':dy+1}, allfields=True))

    # Find first day on given multiple prior to the given day
    from numpy import floor_divide
    dy = floor_divide(dy - 2, self.mult) * self.mult + 1

    # If we've wrapped, decrement the year
    d = tu.wrapdate(self._taxis, {'year':yr, 'month':mn, 'day':dy}, allfields=True)
    d1 = tu.wrapdate(self._taxis, {'year':yr, 'month':mn + 1, 'day':1}, allfields=True)

    if tu.date_diff(self._taxis, d, d1, 'days') < self.mult / 2:
      return d1
    else:
      return d
コード例 #20
0
ファイル: grid_1D_ms.py プロジェクト: brohrer/becca_test
    def step(self, action):
        """
        Advance the world by one time step.

        Parameters
        ----------
        action : array of floats
            The set of action commands to execute.

        Returns
        -------
        reward : float
            The amount of reward or punishment given by the world.
        sensors : array of floats
            The values of each of the sensors.
        """
        self.action = action
        self.action = np.round(self.action)
        self.timestep += 1
        self.energy = self.action[0] + self.action[1]
        self.world_state += self.action[0] - self.action[1]
        # Occasionally add a perturbation to the action to knock it
        # into a different state.
        if np.random.random_sample() < self.jump_fraction:
            self.world_state = self.num_positions * np.random.random_sample()
        # Ensure that the world state falls between 0 and 9
        self.world_state -= self.num_positions * np.floor_divide(
            self.world_state, self.num_positions)
        self.simple_state = int(np.floor(self.world_state))
        if self.simple_state == 9:
            self.simple_state = 0
        sensors = self.sense()
        reward = self.assign_reward()
        return sensors, reward
コード例 #21
0
ファイル: grid_1D_noise.py プロジェクト: alito/becca
 def step(self, action): 
     """ Take one time step through the world """
     self.action = action.copy().ravel()
     self.timestep += 1 
     step_size = self.action[0] - self.action[1]
     # An approximation of metabolic energy
     energy = self.action[0] + self.action[1]
     self.world_state = self.world_state + step_size
     # At random intervals, jump to a random position in the world
     if np.random.random_sample() < self.JUMP_FRACTION:
         self.world_state = (self.num_real_sensors * 
                             np.random.random_sample())
     # Ensure that the world state falls between 0 and num_real_sensors 
     self.world_state -= (self.num_real_sensors * 
                          np.floor_divide(self.world_state, 
                                          self.num_real_sensors))
     self.simple_state = int(np.floor(self.world_state))
     # Assign sensors as zeros or ones. 
     # Represent the presence or absence of the current position in the bin.
     real_sensors = np.zeros(self.num_real_sensors)
     real_sensors[self.simple_state] = 1
     # Generate a set of noise sensors
     noise_sensors = np.round(np.random.random_sample(
             self.num_noise_sensors))
     sensors = np.hstack((real_sensors, noise_sensors))
     reward = -self.REWARD_MAGNITUDE
     if self.simple_state == 1:
         reward = self.REWARD_MAGNITUDE
     reward -= energy * self.ENERGY_COST        
     return sensors, reward
コード例 #22
0
ファイル: correlations.py プロジェクト: EQ4/resonator
 def g2(intensity, compute_range = None):
     '''
     computes g2 from the intensity array
     @var arr is the intensity array of photon counts per unit time, such as [1, 0, 3, 4 ,...]
     '''
     intensity = np.array(intensity)
     N = intensity.size
     max_correlation_length = np.floor_divide(N , 2)
     correlate_window = intensity[ : max_correlation_length]
     g2 = []
     kk = []
     if compute_range == None:
         range_k = range(0, max_correlation_length + 1)
     else:
         range_k = range(0, compute_range)
     for k in range_k:
         print k
         #for each correlation length, offset the array and compute the product
         moving_window = intensity[k : max_correlation_length + k]
         product = np.mean(moving_window * correlate_window)
         normalized = product / (np.mean(correlate_window) * np.mean(moving_window))
         g2.append(normalized)
         kk.append(k)
     #special case for k = 0:
     g2[0] = np.mean(correlate_window * (correlate_window - 1)) / (np.mean(correlate_window)**2)
     return np.array(kk), np.array(g2)
コード例 #23
0
ファイル: __init__.py プロジェクト: pbiczo/vimconfig
def angle_difference(a, b, pi=np.pi):
    """Find the unwrapped difference in angle between `a` and `b`."""
    diff = np.subtract(a, b)
    div = np.floor_divide(pi + np.abs(diff), 2 * pi) * 2 * pi
    diff[diff > pi] -= div[diff > pi]
    diff[diff < -pi] += div[diff < -pi]
    return diff
コード例 #24
0
    def test_floor_divide_remainder_and_divmod(self):
        inch = u.Unit(0.0254 * u.m)
        dividend = np.array([1., 2., 3.]) * u.m
        divisor = np.array([3., 4., 5.]) * inch
        quotient = dividend // divisor
        remainder = dividend % divisor
        assert_allclose(quotient.value, [13., 19., 23.])
        assert quotient.unit == u.dimensionless_unscaled
        assert_allclose(remainder.value, [0.0094, 0.0696, 0.079])
        assert remainder.unit == dividend.unit
        quotient2 = np.floor_divide(dividend, divisor)
        remainder2 = np.remainder(dividend, divisor)
        assert np.all(quotient2 == quotient)
        assert np.all(remainder2 == remainder)
        quotient3, remainder3 = divmod(dividend, divisor)
        assert np.all(quotient3 == quotient)
        assert np.all(remainder3 == remainder)

        with pytest.raises(TypeError):
            divmod(dividend, u.km)

        with pytest.raises(TypeError):
            dividend // u.km

        with pytest.raises(TypeError):
            dividend % u.km

        if hasattr(np, 'divmod'):  # not NUMPY_LT_1_13
            quotient4, remainder4 = np.divmod(dividend, divisor)
            assert np.all(quotient4 == quotient)
            assert np.all(remainder4 == remainder)
            with pytest.raises(TypeError):
                np.divmod(dividend, u.km)
コード例 #25
0
def clean_data (game_data):
    
    #This takes the section variable in the form <LL###>
    #and breaks it into two sub-variables
    #The letters extract informaion about the level 
    #The digits extraction about the section
    sh = game_data.section.str.extract('(?P<letter>[A-Z]*)(?P<digit>\d*)')
    
    
    #Merge the extracted data back into the dataframe
    game_data= game_data.join(sh)
    
    
    #Some data did not have section info, This data is ingnored the analysis
    game_data= game_data[game_data.digit != ""]
    #This changes the section into an intiger
    game_data['digit'] = game_data['digit'].apply(int)
    game_data['price'] = game_data['price'].apply(float)
    
    
    #The section variable has two pieces of information
    #This first digit of the section vairable is deck (1,2,3)
    game_data['deck']= np.floor_divide(game_data['digit'],100)
    
    #The second two digits are the section, describing the relative position of the stadium
    game_data['block']= game_data['digit']-game_data['deck']*100
    
    return game_data
コード例 #26
0
ファイル: toast_satellite_sim.py プロジェクト: hpc4cmb/toast
def get_submaps(args, comm, data):
    """ Get a list of locally hit pixels and submaps on every process.

    """
    autotimer = timing.auto_timer()
    if comm.comm_world.rank == 0:
        print('Scanning local pixels', flush=args.flush)
    start = MPI.Wtime()

    # Prepare for using distpixels objects
    nside = args.nside
    subnside = 16
    if subnside > nside:
        subnside = nside
    subnpix = 12 * subnside * subnside

    # get locally hit pixels
    lc = tm.OpLocalPixels()
    localpix = lc.exec(data)
    if localpix is None:
        raise RuntimeError(
            'Process {} has no hit pixels. Perhaps there are fewer '
            'detectors than processes in the group?'.format(
                comm.comm_world.rank))

    # find the locally hit submaps.
    localsm = np.unique(np.floor_divide(localpix, subnpix))

    comm.comm_world.barrier()
    stop = MPI.Wtime()
    elapsed = stop - start
    if comm.comm_world.rank == 0:
        print('Local submaps identified in {:.3f} s'.format(elapsed),
              flush=args.flush)
    return localpix, localsm, subnpix
コード例 #27
0
ファイル: timeseries.py プロジェクト: mattja/nsim
 def __setitem__(self, index, value):
     #TODO update logic to match __getitem__
     ts = self.ts
     dt = (ts.tspan[-1] - ts.tspan[0]) / (len(ts) - 1)
     if isinstance(index, numbers.Number):
         newix = ts.tspan.searchsorted(index)
         return ts.__setitem__(newix, value)
     elif isinstance(index, _SliceType):
         if index.step is None:
             start, stop = ts.tspan.searchsorted(index.start, index.stop)
             return ts.__setitem__(slice(start, stop, None), value)
         else:
             n = np.floor_divide(index.start - index.stop, index.step)
             times = np.linspace(index.start, index.stop, n, endpoint=False)
             indices = ts.tspan.searchsorted(times)
             if indices[-1] == len(ts.tspan):
                 indices = indices[:-1]
             return ts.__setitem__(indices, value)
     elif isinstance(index, _EllipsisType) or index is None:
         return ts.__setitem__(index, value)
     elif isinstance(index, np.ndarray) and index.ndim is 1:
         indices = ts.tspan.searchsorted(index)
         if indices[-1] == len(ts.tspan):
             indices = indices[:-1]
         return ts.__setitem__(indices, value)
     elif isinstance(index, _TupleType):
         timeix = index[0]
         ts = ts.t[timeix]
         otherix = index[1:]
         return ts.__setitem__(otherix, value)
     else:
         raise TypeError("Time slicing can't handle that type of index yet")
コード例 #28
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
コード例 #29
0
ファイル: band.py プロジェクト: FPaquin/cp_matrices
    def findIndForIntpl(self,cp):
        '''find the indices of interpolation points'''
        #find base point first
        #subBlock is the sub of Block the base points lie in
        p = self.interpDegree
        if p%2 == 1:
            offset = p // 2
        else:
            offset = p // 2 - 1
        subBlock = np.floor_divide(cp+2-self.dx/2,self.dx)
        bp = (subBlock-(offset-1/2))*self.dx - 2
        subInBlock = np.mod(subBlock,self.m)
        subBlock = np.floor_divide(subBlock,self.m)
#        corner = self.BlockSub2CornerCarWithoutBand(subBlock)

        subInBlock -= offset

        offsetBlock = np.floor_divide(subInBlock,self.m)
        subInBlock = np.mod(subInBlock,self.m)
        subBlock += offsetBlock




        p = self.interpDegree + 1
        d = self.Dim
        x = np.arange(p**d)
        x = self.Ind2Sub(x, (p,)*d)
#        x = np.tile(x,(cp.shape[0],1))
        #time consuming or memory consuming? choose one
        subInBlock = np.repeat(subInBlock,p**d,axis=0)
        subBlock = np.repeat(subBlock,p**d,axis=0)
#        offset = np.zeros(subBlock.shape)
#        x += subInBlock
        subInBlock += np.tile(x,(cp.shape[0],1))
        subBlock += np.floor_divide(subInBlock,self.m)

#        ind = np.where( x > self.m )
#        offset[ind] = 1
        subInBlock = np.mod(subInBlock,self.m)

        indBlock = self.BlockSub2IndWithoutBand(subBlock)
        indBlock = self.ni2pi.app2petsc(indBlock)
        ind = self.Sub2Ind(subInBlock, (self.m,)*d)
        ind += indBlock*self.m**d
        return bp,ind.reshape((-1,p**d))
コード例 #30
0
ファイル: tif_icon.py プロジェクト: rstasney/540Nexy
def divideBy16(array):
    
    '''
        Function: Divides decimal numpy array by 16 creating an array with elements ranging from (0-F) hex
                  Returns: numpy array
    '''
    new = np.floor_divide(array,16) #floor division required to avoid floating point 
    return new
コード例 #31
0
# start = time.perf_counter()
# np.sin(x,x)
# print("numpy.sin:", time.perf_counter() - start)
#
# x = [i * 0.001 for i in range(1000000)]
# start = time.perf_counter()
# for i, t in enumerate(x):
#     x[i] = np.sin(t)
# print("numpy.sin loop:", time.perf_counter() - start)

# y = x1 + x2:	add(x1, x2 [, y])
# y = x1 - x2:	subtract(x1, x2 [, y])
# y = x1 * x2:	multiply (x1, x2 [, y])
# y = x1 / x2:	divide (x1, x2 [, y]), 如果两个数组的元素为整数,那么用整数除法
# y = x1 / x2:	true divide (x1, x2 [, y]), 总是返回精确的商
# y = x1 // x2:	floor divide (x1, x2 [, y]), 总是对返回值取整
# y = -x:	negative(x [,y])
# y = x1**x2:	power(x1, x2 [, y])
# y = x1 % x2:	remainder(x1, x2 [, y]), mod(x1, x2, [, y])
a = np.arange(0, 4)
print(a)
b = np.arange(1, 5)
print(b)
print("a+b: ", np.add(a, b))
print("a-b: ", np.subtract(a, b))
print("a*b: ", np.multiply(a, b))
print("a/b: ", np.divide(a, b))
print("a/b: ", np.true_divide(a, b))
print("a//b: ", np.floor_divide(a, b))
print("x1**x2: ", np.power(a, b))
print("x1 % x2: ", divmod(a, b))
コード例 #32
0
    # 使用zeros_like函数创建一个和a形状相同,并且元素全部为0的数组result
    result = np.zeros_like(a)
    result.flat = 42
    return result


# 使用frompyfunc创建通用函数。指定输入参数的个数为1,随后的1为输出参数的个数
# 其实通用函数并非真正的函数,而是能够表示函数的对象
ufunc = np.frompyfunc(answer, 1, 1)
print('The answer', ufunc(np.arange(4)))
print('The answer', ufunc(np.arange(4).reshape(2, 2)))
print()

a = np.arange(9)
print('reduce', np.add.reduce(a))
print('accumulate', np.add.accumulate(a))
print('outer', np.add.outer(np.arange(3), a))
print()

# 矩阵除法
a = np.array([2, 6, 5])
b = np.array([1, 2, 3])
print('divide', np.divide(a, b), np.divide(b, a))
print('true_divide', np.true_divide(a, b), np.true_divide(b, a))
print('floor divide', np.floor_divide(a, b), np.floor_divide(a, b))

c = 3.14 * b
print('floor divide', np.floor_divide(c, b), np.floor_divide(b, c))
print(a / b)
print(a // b)
コード例 #33
0
    def wideCircle(self, orig_seq, modif_seq):
        """
        Similar to wideTurn
        The first and last point of the sequence are the same,
        so it is possible to extend the end of the sequence
        with its beginning when seeking for triangles

        It is necessary to find the direction of the curve, knowing three points (a triangle)
        If the triangle is not wide enough, there is a huge risk of finding
        an incorrect orientation, due to insufficient accuracy.
        So, when the consecutive points are too close, the method
        use following and preceding points to form a wider triangle around
        the current point
        dmin_tri is the minimum distance between two consecutive points
        of an acceptable triangle
        """
        dmin_tri = 0.5
        iextra_base = np.floor_divide(len(orig_seq), 3)  # Nb of extra points
        ibeg = 0  # Index of first point of the triangle
        iend = 0  # Index of the third point of the triangle
        for i, step in enumerate(orig_seq):
            if i == 0 or i == len(orig_seq) - 1:
                # First and last point of the sequence are the same,
                # so it is necessary to skip one of these two points
                # when creating a triangle containing the first or the last point
                iextra = iextra_base + 1
            else:
                iextra = iextra_base
            # i is the index of the second point of the triangle
            # pos_after is the array of positions of the original sequence
            # after the current point
            pos_after = np.resize(np.roll(orig_seq, -i - 1, 0), (iextra, 2))
            # Vector of distances between the current point and each following point
            dist_from_point = ((step - pos_after)**2).sum(1)
            if np.amax(dist_from_point) < dmin_tri * dmin_tri:
                continue
            iend = np.argmax(dist_from_point >= dmin_tri * dmin_tri)
            # pos_before is the array of positions of the original sequence
            # before the current point
            pos_before = np.resize(np.roll(orig_seq, -i, 0)[::-1], (iextra, 2))
            # This time, vector of distances between the current point and each preceding point
            dist_from_point = ((step - pos_before)**2).sum(1)
            if np.amax(dist_from_point) < dmin_tri * dmin_tri:
                continue
            ibeg = np.argmax(dist_from_point >= dmin_tri * dmin_tri)
            # See https://github.com/electrocbd/post_stretch for explanations
            # relpos is the relative position of the projection of the second point
            # of the triangle on the segment from the first to the third point
            # 0 means the position of the first point, 1 means the position of the third,
            # intermediate values are positions between
            length_base = ((pos_after[iend] - pos_before[ibeg])**2).sum(0)
            relpos = ((step - pos_before[ibeg]) *
                      (pos_after[iend] - pos_before[ibeg])).sum(0)
            if np.fabs(relpos) < 1000.0 * np.fabs(length_base):
                relpos /= length_base
            else:
                relpos = 0.5  # To avoid division by zero or precision loss
            projection = (pos_before[ibeg] + relpos *
                          (pos_after[iend] - pos_before[ibeg]))
            dist_from_proj = np.sqrt(((projection - step)**2).sum(0))
            if dist_from_proj > 0.0003:  # Move central point only if points are not aligned
                modif_seq[i] = (step - (self.wc_stretch / dist_from_proj) *
                                (projection - step))

        return
コード例 #34
0
                           lambda x, name=None: scipy_special.erf(x))

erfc = utils.copy_docstring(tf.math.erfc,
                            lambda x, name=None: scipy_special.erfc(x))

erfinv = utils.copy_docstring(tf.math.erfinv,
                              lambda x, name=None: scipy_special.erfinv(x))

exp = utils.copy_docstring(tf.math.exp, lambda x, name=None: np.exp(x))

expm1 = utils.copy_docstring(tf.math.expm1, lambda x, name=None: np.expm1(x))

floor = utils.copy_docstring(tf.math.floor, lambda x, name=None: np.floor(x))

floordiv = utils.copy_docstring(tf.math.floordiv,
                                lambda x, y, name=None: np.floor_divide(x, y))

greater = utils.copy_docstring(tf.math.greater,
                               lambda x, y, name=None: np.greater(x, y))

greater_equal = utils.copy_docstring(
    tf.math.greater_equal, lambda x, y, name=None: np.greater_equal(x, y))

igamma = utils.copy_docstring(
    tf.math.igamma, lambda a, x, name=None: scipy_special.gammainc(a, x))

igammac = utils.copy_docstring(
    tf.math.igammac, lambda a, x, name=None: scipy_special.gammaincc(a, x))

imag = utils.copy_docstring(tf.math.imag,
                            lambda input, name=None: np.imag(input))
コード例 #35
0
ファイル: specex.py プロジェクト: dmargala/desispec
def main(args, comm=None):

    log = get_logger()

    #- only import when running, to avoid requiring specex install for import
    from specex.specex import run_specex

    imgfile = args.input_image
    outfile = args.output_psf

    nproc = 1
    rank = 0
    if comm is not None:
        nproc = comm.size
        rank = comm.rank

    hdr = None
    if rank == 0:
        hdr = fits.getheader(imgfile)
    if comm is not None:
        hdr = comm.bcast(hdr, root=0)

    #- Locate line list in $SPECEXDATA or specex/data
    if 'SPECEXDATA' in os.environ:
        specexdata = os.environ['SPECEXDATA']
    else:
        from pkg_resources import resource_filename
        specexdata = resource_filename('specex', 'data')

    lamp_lines_file = os.path.join(specexdata, 'specex_linelist_desi.txt')

    if args.input_psf is not None:
        inpsffile = args.input_psf
    else:
        from desispec.calibfinder import findcalibfile
        inpsffile = findcalibfile([
            hdr,
        ], 'PSF')

    optarray = []
    if args.extra is not None:
        optarray = args.extra.split()

    specmin = int(args.specmin)
    nspec = int(args.nspec)
    bundlesize = int(args.bundlesize)

    specmax = specmin + nspec

    # Now we divide our spectra into bundles

    checkbundles = set()
    checkbundles.update(
        np.floor_divide(np.arange(specmin, specmax),
                        bundlesize * np.ones(nspec)).astype(int))
    bundles = sorted(checkbundles)
    nbundle = len(bundles)

    bspecmin = {}
    bnspec = {}
    for b in bundles:
        if specmin > b * bundlesize:
            bspecmin[b] = specmin
        else:
            bspecmin[b] = b * bundlesize
        if (b + 1) * bundlesize > specmax:
            bnspec[b] = specmax - bspecmin[b]
        else:
            bnspec[b] = (b + 1) * bundlesize - bspecmin[b]

    # Now we assign bundles to processes

    mynbundle = int(nbundle / nproc)
    leftover = nbundle % nproc
    if rank < leftover:
        mynbundle += 1
        myfirstbundle = bundles[0] + rank * mynbundle
    else:
        myfirstbundle = bundles[0] + ((mynbundle + 1) * leftover) + \
            (mynbundle * (rank - leftover))

    if rank == 0:
        # Print parameters
        log.info("specex:  using {} processes".format(nproc))
        log.info("specex:  input image = {}".format(imgfile))
        log.info("specex:  input PSF = {}".format(inpsffile))
        log.info("specex:  output = {}".format(outfile))
        log.info("specex:  bundlesize = {}".format(bundlesize))
        log.info("specex:  specmin = {}".format(specmin))
        log.info("specex:  specmax = {}".format(specmax))
        if args.broken_fibers:
            log.info("specex:  broken fibers = {}".format(args.broken_fibers))

    # get the root output file

    outpat = re.compile(r'(.*)\.fits')
    outmat = outpat.match(outfile)
    if outmat is None:
        raise RuntimeError("specex output file should have .fits extension")
    outroot = outmat.group(1)

    outdir = os.path.dirname(outroot)
    if rank == 0:
        if outdir != "":
            if not os.path.isdir(outdir):
                os.makedirs(outdir)

    cam = hdr["camera"].lower().strip()
    band = cam[0]

    failcount = 0

    for b in range(myfirstbundle, myfirstbundle + mynbundle):
        outbundle = "{}_{:02d}".format(outroot, b)
        outbundlefits = "{}.fits".format(outbundle)
        com = ['desi_psf_fit']
        com.extend(['-a', imgfile])
        com.extend(['--in-psf', inpsffile])
        com.extend(['--out-psf', outbundlefits])
        com.extend(['--lamp-lines', lamp_lines_file])
        com.extend(['--first-bundle', "{}".format(b)])
        com.extend(['--last-bundle', "{}".format(b)])
        com.extend(['--first-fiber', "{}".format(bspecmin[b])])
        com.extend(['--last-fiber', "{}".format(bspecmin[b] + bnspec[b] - 1)])
        if band == "z":
            com.extend(['--legendre-deg-wave', "{}".format(3)])
            com.extend(['--fit-continuum'])
        else:
            com.extend(['--legendre-deg-wave', "{}".format(1)])
        if args.broken_fibers:
            com.extend(['--broken-fibers', "{}".format(args.broken_fibers)])
        if args.debug:
            com.extend(['--debug'])

        com.extend(optarray)

        log.debug("proc {} calling {}".format(rank, " ".join(com)))

        retval = run_specex(com)

        if retval != 0:
            comstr = " ".join(com)
            log.error("desi_psf_fit on process {} failed with return "
                      "value {} running {}".format(rank, retval, comstr))
            failcount += 1

    if comm is not None:
        from mpi4py import MPI
        failcount = comm.allreduce(failcount, op=MPI.SUM)

    if failcount > 0:
        # all processes throw
        raise RuntimeError("some bundles failed desi_psf_fit")

    if rank == 0:
        outfits = "{}.fits".format(outroot)

        inputs = ["{}_{:02d}.fits".format(outroot, x) for x in bundles]

        if args.disable_merge:
            log.info("don't merge")
        else:
            #- Empirically it appears that files written by one rank sometimes
            #- aren't fully buffer-flushed and closed before getting here,
            #- despite the MPI allreduce barrier.  Pause to let I/O catch up.
            log.info('5 sec pause before merging')
            sys.stdout.flush()
            time.sleep(5.)

            merge_psf(inputs, outfits)

            log.info('done merging')

            if failcount == 0:
                # only remove the per-bundle files if the merge was good
                for f in inputs:
                    if os.path.isfile(f):
                        os.remove(f)

    if comm is not None:
        failcount = comm.bcast(failcount, root=0)

    if failcount > 0:
        # all processes throw
        raise RuntimeError("merging of per-bundle files failed")

    return
コード例 #36
0
ファイル: avg_pixel.py プロジェクト: ulrikah/imani
#!/usr/bin/env python3
import imageio
import numpy as np
from pathlib import Path

faces = []

DATA_PATH = 'assets/faces/'  # make sure all images have the same size (w x h)
NEW_FACE_URI = 'out.png'
SUFFIXES = ['.png', '.jpg']

try:
    paths = [
        path for path in Path(DATA_PATH).iterdir() if path.suffix in SUFFIXES
    ]
    n = len(paths)
    print(f"{n} faces is used to generate this output")

except FileNotFoundError:
    print(f"Could not find the directory {DATA_PATH}")

for posix_path in paths:
    path = str(posix_path)
    faces.append(imageio.imread(path))
    # print(f"Added {path} to output")

faces_sum = np.sum([face for face in faces], axis=0, dtype=np.int32)
np.floor_divide(faces_sum, n)
imageio.imwrite(NEW_FACE_URI, faces_sum)
コード例 #37
0
ファイル: post_process-WGD.py プロジェクト: cpempire/pWGD
import pickle
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from datetime import datetime, timedelta
# # plot data, moving average, optimized simulation, and prediction

data = pickle.load(open("data/data_nSamples_128_isProjection_True_WGD.p", 'rb'))
d_average = data["d_average"]
plt.figure()
step = 20
for i in range(np.floor_divide(len(d_average), step)):
    label = "$\ell = $"+str(i*step)
    plt.plot(np.log10(np.sort(d_average[i*step])[::-1]), '.-', label=label)
plt.xlabel("r", fontsize=16)
plt.ylabel(r"$\log_{10}(|\lambda_r|)$", fontsize=16)
plt.legend()
plt.tick_params(axis='both', which='major', labelsize=16)
plt.tick_params(axis='both', which='minor', labelsize=16)
plt.savefig("figure/covid19_eigenvalues.pdf")
# plt.show()
plt.close()

from model import *

time_delta = timedelta(days=1)
stop_date = datetime(2020, 6, 6)
start_date = stop_date - timedelta(len(misfit.t_total))
dates = mdates.drange(start_date, stop_date, time_delta)

コード例 #38
0
def generate_pos_neg_label_crop_centers(
    spatial_size: Union[Sequence[int], int],
    num_samples: int,
    pos_ratio: float,
    label_spatial_shape: Sequence[int],
    fg_indices: np.ndarray,
    bg_indices: np.ndarray,
    rand_state: np.random.RandomState = np.random,
) -> List[List[np.ndarray]]:
    """
    Generate valid sample locations based on the label with option for specifying foreground ratio
    Valid: samples sitting entirely within image, expected input shape: [C, H, W, D] or [C, H, W]

    Args:
        spatial_size: spatial size of the ROIs to be sampled.
        num_samples: total sample centers to be generated.
        pos_ratio: ratio of total locations generated that have center being foreground.
        label_spatial_shape: spatial shape of the original label data to unravel selected centers.
        fg_indices: pre-computed foreground indices in 1 dimension.
        bg_indices: pre-computed background indices in 1 dimension.
        rand_state: numpy randomState object to align with other modules.

    Raises:
        ValueError: When the proposed roi is larger than the image.
        ValueError: When the foreground and background indices lengths are 0.

    """
    spatial_size = fall_back_tuple(spatial_size, default=label_spatial_shape)
    if not (np.subtract(label_spatial_shape, spatial_size) >= 0).all():
        raise ValueError("The proposed roi is larger than the image.")

    # Select subregion to assure valid roi
    valid_start = np.floor_divide(spatial_size, 2)
    # add 1 for random
    valid_end = np.subtract(label_spatial_shape + np.array(1),
                            spatial_size / np.array(2)).astype(np.uint16)
    # int generation to have full range on upper side, but subtract unfloored size/2 to prevent rounded range
    # from being too high
    for i in range(
            len(valid_start)
    ):  # need this because np.random.randint does not work with same start and end
        if valid_start[i] == valid_end[i]:
            valid_end[i] += 1

    def _correct_centers(center_ori: List[np.ndarray], valid_start: np.ndarray,
                         valid_end: np.ndarray) -> List[np.ndarray]:
        for i, c in enumerate(center_ori):
            center_i = c
            if c < valid_start[i]:
                center_i = valid_start[i]
            if c >= valid_end[i]:
                center_i = valid_end[i] - 1
            center_ori[i] = center_i
        return center_ori

    centers = []

    if not len(fg_indices) or not len(bg_indices):
        if not len(fg_indices) and not len(bg_indices):
            raise ValueError("No sampling location available.")
        warnings.warn(
            f"N foreground {len(fg_indices)}, N  background {len(bg_indices)},"
            "unable to generate class balanced samples.")
        pos_ratio = 0 if not len(fg_indices) else 1

    for _ in range(num_samples):
        indices_to_use = fg_indices if rand_state.rand(
        ) < pos_ratio else bg_indices
        random_int = rand_state.randint(len(indices_to_use))
        center = np.unravel_index(indices_to_use[random_int],
                                  label_spatial_shape)
        # shift center to range of valid centers
        center_ori = list(center)
        centers.append(_correct_centers(center_ori, valid_start, valid_end))

    return centers
コード例 #39
0
ファイル: wms.py プロジェクト: dbasrai/watchmyset-online
def predict(url):
    chunk_path = '/home/watch_my_set/chunks'
    prediction_path = '/home/watch_my_set/bucket/predictions'
    model_path = '/home/watch_my_set/bucket/model'
    url = url
    yt_url = f'https://youtu.be/{url}'
    output_path = f'/home/watch_my_set/youtube/{url}'
    ydl_opts = {
        'outtmpl':
        os.path.join(output_path, '%(title)s-%(id)s.%(ext)s'),
        'format':
        'bestaudio/best',
        'postprocessors': [{
            'key': 'FFmpegExtractAudio',
            'preferredcodec': 'wav',
            'preferredquality': '192'
        }],
        'postprocessor_args': ['-ar', '16000'],
        'prefer_ffmpeg':
        True,
        'keepvideo':
        True
    }
    with youtube_dl.YoutubeDL(ydl_opts) as ydl:
        ydl.download([yt_url])

    for filename in os.listdir(output_path):
        if filename.endswith(".wav"):
            title, chunkname_list = gen_chunks(f'{output_path}/{filename}',
                                               chunk_path)

    shutil.rmtree(output_path)

    X_pred_list = []
    _min, _max = float('inf'), -float('inf')

    model_name = 'WMS_model_5.5k.model'
    model = load_model(f'{model_path}/{model_name}')

    for i in tqdm(chunkname_list):
        signal, rate = librosa.load(f'{chunk_path}/{i}', sr=16000)
        mel = mfcc(signal[:rate], rate, numcep=13, nfilt=26, nfft=512)
        X_pred_list.append(mel)
        _min = min(np.amin(X_pred_list), _min)
        _max = max(np.amax(X_pred_list), _max)

    X_pred = np.array(X_pred_list)
    X_pred = (X_pred - _min) / (_max - _min)
    X_pred = X_pred.reshape(X_pred.shape[0], X_pred.shape[1], X_pred.shape[2])

    y_pred = model.predict(X_pred)

    y_guess = np.argmax(y_pred, axis=1)

    np.savetxt(f"{prediction_path}/{model_name}_{title}_csv_predict.csv",
               y_guess,
               delimiter=",")

    delete_path(chunk_path)

    numLaughter = np.count_nonzero(y_guess)
    laughPercent = round((numLaughter / y_guess.size) * 100, 1)
    laughTimes = []
    laughTimesList = []
    numLaughs = 0
    check = 0

    for i in tqdm(range(y_guess.size)):
        if (y_guess[i] == 1 and check == 0):
            numLaughs = numLaughs + 1
            check = 1
            laughTimes.append(i)
        else:
            check = y_guess[i]

    for i in tqdm(range(len(laughTimes))):
        seconds = np.mod(laughTimes[i], 60)
        minutes = np.floor_divide(laughTimes[i], 60)
        laughTimesList.append('{0} minutes '.format(minutes) +
                              'and {0} seconds'.format(seconds))

    laughsPerMin = numLaughs / (y_guess.size / 60)
    laughsPerMin = round(laughsPerMin, 1)

    print('\n{0} stats'.format(title))
    print('Laugh Percentage = {0}%'.format(laughPercent))
    print('Num Laughs = {0}'.format(numLaughs))
    print("\n".join(laughTimesList))

    plt.rcParams["figure.figsize"] = (3, 1)
    x_axis = []
    y_axis = []
    for i in range(y_guess.size - 1):
        minutes = str(np.floor_divide(i, 60))
        seconds = str(np.mod(i, 60))
        if minutes == '0':
            x_axis.append('' + seconds)
        else:
            x_axis.append('' + minutes + ':' + seconds)
        if y_guess[i] == 0:
            y_axis.append('silence')
        else:
            y_axis.append('laughter')

    fig, ax = plt.subplots()
    ax.step(x_axis, y_axis)

    plt.xlim([0, y_guess.shape[0]])
    plt.ylim([-.05, 1.05])
    plt.yticks(fontsize=16)
    plt.xticks(fontsize=8)
    plt.xticks(np.arange(0, y_guess.shape[0], 10))
    plt.fill_between(x_axis, y_axis, step="pre", alpha=0.2)
    plt.savefig(f'/home/watch_my_set/bucket/plots/{url}.png', dpi=300)
    return laughPercent, numLaughs, laughsPerMin, laughTimesList, laughTimes
コード例 #40
0
def haga_selection(original_pop, ref, cap, delta):
    lambda_pop = original_pop[:]
    popsize = original_pop.shape[0]

    nobj = lambda_pop.shape[1]

    original_pop = lambda_pop[:]

    grid_max = np.amax(lambda_pop, axis=0)
    grid_min = np.amin(lambda_pop, axis=0)
    extremes = grid_min[:]

    grid_range = abs(grid_max - grid_min)
    grid_pad = grid_range * .1

    grid_max = grid_max + grid_pad
    grid_min = grid_min - grid_pad

    grid_range = abs(grid_max - grid_min)
    grid_step = grid_range / delta

    # remove (preserve) extreme solutions
    extreme_idx = np.array([]).astype(int)

    worst_idx = np.array([]).astype(int)

    for y in range(lambda_pop.shape[1]):
        min_idx = np.where(lambda_pop[:, y] == extremes[y])
        extreme_idx = np.append(extreme_idx, min_idx[0][0])

    extreme_idx = np.unique(extreme_idx)
    if (len(extreme_idx) >= cap):
        for r in range(len(extreme_idx) - cap):
            rejected_pop = original_pop[extreme_idx]
            rejected = worst_chv(rejected_pop, ref, nobj, (len(extreme_idx)))
            extreme_idx = np.delete(extreme_idx, (rejected), axis=0)

        best_idx = extreme_idx
        l1 = np.array(range(len(original_pop)))
        worst_idx = [x for x in l1 if x not in best_idx]
    else:

        grid_locations = np.zeros(lambda_pop.shape).astype(int)
        for y in range(lambda_pop.shape[0]):
            grid_locations[y, :] = np.floor_divide(
                (lambda_pop[y, :] - grid_min), grid_step) + 1

        # grid_density
        b = np.ascontiguousarray(grid_locations).view(
            np.dtype(
                (np.void,
                 grid_locations.dtype.itemsize * grid_locations.shape[1])))
        unique_a, grid_density = np.unique(b, return_counts=True)
        unique_a = unique_a.view(grid_locations.dtype).reshape(
            -1, grid_locations.shape[1])

        # target grid density
        ideal_grid_pop_size = cap / len(grid_density)

        sel_grid = 0
        while (sum(grid_density) > cap):
            if (grid_density[sel_grid] == max(grid_density)):
                grid_density[sel_grid] = grid_density[sel_grid] - 1
            sel_grid = (sel_grid + 1) % len(grid_density)

        init_idx = np.array([]).astype(int)
        rejected = []
        for s in range(len(grid_density)):
            grid_sel = np.where(np.all(grid_locations == unique_a[s],
                                       axis=1))[0]
            grid_pop = lambda_pop[grid_sel]
            for r in range(len(grid_sel) - grid_density[s]):
                grid_rejected = worst_chv(grid_pop, ref, nobj, (len(grid_pop)))
                rejected.append(grid_sel[grid_rejected])
                grid_sel = np.delete(grid_sel, (grid_rejected), axis=0)
                grid_pop = np.delete(grid_pop, (grid_rejected), axis=0)

        for s in range(len(rejected)):
            reject = np.where(
                np.all(original_pop == lambda_pop[rejected[s]], axis=1))
            worst_idx = np.append(worst_idx, reject)

    return worst_idx
コード例 #41
0
datatype = np.array(img)  #get data type of lab01.jpg
print('Data Type is', datatype.dtype)

color = ('b', 'g', 'r')  #show histogram of img (RGB)
for i, col in enumerate(color):
    histogram = cv2.calcHist([img], [i], None, [256], [0, 256])
    plt.plot(histogram, color=col)
    plt.xlim([0, 256])
plt.show()

Qlevel = 4
info = np.iinfo(gray.dtype)
Qstep = (info.max - info.min) / 2**Qlevel

img4bit = np.floor_divide(gray, Qstep) * Qstep
img4bit = np.uint8(img4bit)

datatype = np.array(img4bit)

cv2.imshow('img', img4bit)
cv2.waitKey(0)
cv2.destroyAllWindows()

img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
plt.figure(1)
plt.subplot(121, title='1')
plt.imshow(img)
plt.colorbar(orientation='horizontal')

img = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
コード例 #42
0
def readMRCHeader(MRCfilename,
                  endian='le',
                  fileConvention='ccpem',
                  pixelunits=u'\\AA'):
    '''
    Reads in the first 1024 bytes from an MRC file and parses it into a Python dictionary, yielding 
    header information.
    '''
    if endian == 'le':
        endchar = '<'
    else:
        endchar = '>'

    header = {}
    with open(MRCfilename, 'rb') as f:
        # diagStr = ''
        # Get dimensions, in format [nz, ny, nx] (stored as [nx,ny,nz] in the file)
        header['dimensions'] = np.flipud(
            np.fromfile(f, dtype=endchar + 'i4', count=3))

        header['MRCtype'] = int(
            np.fromfile(f, dtype=endchar + 'i4', count=1)[0])
        # Hack to fix lack of standard endian indication in the file header
        if header['MRCtype'] > 16000000:
            # Endianess found to be backward
            header['MRCtype'] = int(
                np.asarray(header['MRCtype']).byteswap()[0])
            header['dimensions'] = header['dimensions'].byteswap()
            if endchar == '<':
                endchar = '>'
            else:
                endchar = '<'

        # Extract compressor from dtype > MRC_COMP_RATIO
        header['compressor'] = COMPRESSOR_ENUM[np.floor_divide(
            header['MRCtype'], MRC_COMP_RATIO)]
        header['MRCtype'] = np.mod(header['MRCtype'], MRC_COMP_RATIO)
        logger.info('compressor: %s, MRCtype: %s' %
                    (str(header['compressor']), str(header['MRCtype'])))

        fileConvention = fileConvention.lower()
        # if fileConvention == 'ccpem':
        #     diagStr += ('ioMRC.readMRCHeader: MRCtype: %s, compressor: %s, dimensions %s' %
        #         (CCPEM_ENUM[header['MRCtype']],header['compressor'], header['dimensions'] ) )
        # elif fileConvention == 'eman2':
        #     diagStr += ( 'ioMRC.readMRCHeader: MRCtype: %s, compressor: %s, dimensions %s' %
        #         (EMAN2_ENUM[header['MRCtype']],header['compressor'], header['dimensions'] ) )

        if fileConvention == 'eman2':
            try:
                header['dtype'] = EMAN2_ENUM[header['MRCtype']]
            except:
                raise ValueError('Error: unrecognized EMAN2-MRC data type = ' +
                                 str(header['MRCtype']))

        elif fileConvention == 'ccpem':  # Default is CCPEM
            try:
                header['dtype'] = CCPEM_ENUM[header['MRCtype']]
            except:
                raise ValueError('Error: unrecognized CCPEM-MRC data type = ' +
                                 str(header['MRCtype']))
        else:
            raise ValueError(
                'Error: unrecognized MRC file convention: {}'.format(
                    fileConvention))

        # Apply endian-ness to NumPy dtype
        header['dtype'] = endchar + header['dtype']
        # Read in pixelsize
        f.seek(40)
        cellsize = np.fromfile(f, dtype=endchar + 'f4', count=3)
        header['pixelsize'] = np.flipud(cellsize) / header['dimensions']
        # MRC is Angstroms by convention

        header['pixelunits'] = pixelunits

        # '\AA' will eventually be deprecated, please cease using it.
        if header['pixelunits'] == u'\\AA' or header['pixelunits'] == u'\AA':
            pass
        elif header['pixelunits'] == u'\mum':
            header['pixelsize'] *= 1E-5
        elif header['pixelunits'] == u'pm':
            header['pixelsize'] *= 100.0
        else:  # Default to nm
            header['pixelsize'] *= 0.1

        # Read in [X,Y,Z] array ordering
        # Currently I don't use this
        # f.seek(64)
        # axesTranpose = np.fromfile( f, dtype=endchar + 'i4', count=3 ) - 1

        # Read in statistics
        f.seek(76)
        (header['minImage'], header['maxImage'],
         header['meanImage']) = np.fromfile(f, dtype=endchar + 'f4', count=3)

        f.seek(92)
        header['extendedBytes'] = int(
            np.fromfile(f, dtype=endchar + 'i4', count=1))
        if header['extendedBytes'] > 0:
            # diagStr += ', extended header %d' % header['extendedBytes']

            f.seek(104)
            header['metaId'] = f.read(4)
            if header['metaId'] == b'json':
                f.seek(DEFAULT_HEADER_LEN)
                header.update(
                    json.loads(
                        f.read(header['extendedBytes']).decode('utf-8')))

        # Read in kV, C3, and gain
        f.seek(132)
        header['voltage'] = np.fromfile(f, dtype=endchar + 'f4', count=1)
        header['C3'] = np.fromfile(f, dtype=endchar + 'f4', count=1)
        header['gain'] = np.fromfile(f, dtype=endchar + 'f4', count=1)

        #diagStr += ', voltage: %.1f, C3: %.2f, gain: %.2f' % (header['voltage'], header['C3'], header['gain'])

        # Read in size of packed data
        f.seek(144)
        # Have to convert to Python int to avoid index warning.
        header['packedBytes'] = struct.unpack('q', f.read(8))
        # header['packedBytes'] = int( np.fromfile( f, dtype=endchar + 'i8', count=1) )
        # if header['packedBytes'] > 0:
        #     diagStr += ', packedBytes: %d' % header['packedBytes']

        # How many bytes in an MRC
        return header
コード例 #43
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b),
                     [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b),
                     ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
コード例 #44
0
    def __init__(self, params, techs, dt):
        """ Generates the objective function, finds and creates constraints.

          Args:
            params (Dict): input parameters
            techs (Dict): technology objects after initialization, as saved in a dictionary
            dt (float): optimization timestep (hours)

        TODO: edge cases to consider -- if the event occurs at the beginning or end of an opt window  --HN
        """

        # generate the generic service object
        ValueStream.__init__(self, techs['Storage'], 'Resource Adequacy', dt)

        # add RA specific attributes
        self.days = params['days']  # number of peak events
        self.length = params['length']  # discharge duration
        self.idmode = params['idmode'].lower()  # peak selection mode
        self.dispmode = params['dispmode']  # dispatch mode
        self.capacity_rate = params['value']  # monthly RA capacity rate (length = 12)
        self.active = params['active'] == 1  # active RA timesteps (length = 8760/dt) must be boolean, not int
        self.system_load = params['system_load']  # system load profile (length = 8760/dt)
        self.dt = params['dt']  # dt for the system load profile

        # FIND THE TIME-STEPS PEAKS THAT THE RA EVENTS WILL OCCUR AROUND
        self.peak_intervals = []
        for year in self.system_load.index.year.unique():
            year_o_system_load = self.system_load.loc[self.system_load.index.year == year]
            if self.idmode == 'peak by year':
                # 1) sort system load from largest to smallest
                max_int = year_o_system_load.sort_values(ascending=False)
                # 2) keep only the first (and therefore largest) instant load per day, using an array of booleans that are True
                # for every item that has already occurred before in the index
                max_int_date = pd.Series(max_int.index.date, index=max_int.index)
                max_days = max_int.loc[~max_int_date.duplicated(keep='first')]

                # 3) select peak time-steps
                # find ra_events number of events in year where system_load is at peak:
                # select only the first DAYS number of timestamps
                self.peak_intervals += list(max_days.index[:self.days].values)

            elif self.idmode == 'peak by month':
                # 1) sort system load from largest to smallest
                max_int = year_o_system_load.sort_values(ascending=False)
                # 2) keep only the first (and therefore largest) instant load per day, using an array of booleans that are True
                # for every item that has already occurred before in the index
                max_int_date = pd.Series(max_int.index.date, index=max_int.index)
                max_days = max_int.loc[~max_int_date.duplicated(keep='first')]

                # 3) select peak time-steps
                # find number of events in month where system_load is at peak:
                # select only the first DAYS number of timestamps, per month
                self.peak_intervals += list(max_days.groupby(by=max_days.index.month).head(self.days).index.values)

            elif self.idmode == 'peak by month with active hours':
                # 1) sort system load, during ACTIVE time-steps from largest to smallest
                max_int = year_o_system_load.loc[self.active].sort_values(ascending=False)
                # 2) keep only the first (and therefore largest) instant load per day, using an array of booleans that are True
                # for every item that has already occurred before in the index
                max_int_date = pd.Series(max_int.index.date, index=max_int.index)
                max_days = max_int.loc[~max_int_date.duplicated(keep='first')]

                # 3) select peak time-steps
                # find number of events in month where system_load is at peak during active hours:
                # select only first DAYS number of timestamps, per month
                self.peak_intervals += list(max_days.groupby(by=max_days.index.month).head(self.days).index.values)

        # DETERMINE RA EVENT INTERVALS
        event_interval = pd.Series(np.zeros(len(self.system_load)), index=self.system_load.index)
        event_start = pd.Series(np.zeros(len(self.system_load)), index=self.system_load.index)  # used to set energy constraints
        # odd intervals straddle peak & even intervals have extra interval after peak
        steps = self.length/self.dt
        if steps % 2:  # this is true if mod(steps/2) is not 0 --> if steps is odd
            presteps = np.floor_divide(steps, 2)
        else:  # steps is even
            presteps = (steps/2) - 1
        poststeps = presteps + 1

        for peak in self.peak_intervals:
            # TODO: check that this works for sub-hourly system load profiles
            first_int = peak - pd.Timedelta(presteps*self.dt, unit='h')
            last_int = peak + pd.Timedelta(poststeps * self.dt, unit='h')

            # handle edge RA event intervals
            if first_int < event_interval.index[0]:  # RA event starts before the first time-step in the system load
                first_int = event_interval.index[0]
            if last_int > event_interval.index[-1]:  # RA event ends after the last time-step in the system load
                last_int = event_interval.index[-1]

            event_range = pd.date_range(start=first_int, end=last_int, periods=steps)
            event_interval.loc[event_range] = 1
            event_start.loc[first_int] = 1
        self.event_intervals = self.system_load[event_interval == 1].index
        self.event_start_times = self.system_load[event_start == 1].index

        # DETERMINE QUALIFYING COMMITMENT & ENERGY
        p_max = techs['Storage'].dis_max_rated
        energy_max = techs['Storage'].ene_max_rated
        ulsoc = techs['Storage'].ulsoc
        llsoc = techs['Storage'].llsoc

        self.qualifying_commitment = np.minimum(p_max, (energy_max*ulsoc)/self.length)
        total_time_intervals = len(self.event_intervals)

        if self.dispmode:
            # create dispatch power constraint
            # charge power should be 0, while discharge should be be the qualifying commitment for the times that correspond to the RA event

            self.charge_max_constraint = pd.Series(np.zeros(total_time_intervals), index=self.event_intervals,
                                                   name='RA Charge Max (kW)')
            self.discharge_min_constraint = pd.Series(np.repeat(self.qualifying_commitment, total_time_intervals), index=self.event_intervals,
                                                      name='RA Discharge Min (kW)')
            self.constraints = {'dis_min': Const.Constraint('dis_min', self.name, self.discharge_min_constraint),
                                'ch_max': Const.Constraint('ch_max', self.name, self.charge_max_constraint)}
        else:
            # create energy reservation constraint
            # TODO: double check to see if this needs to stack...
            # in the event of a black out -- you will not be providing resource adequacy, so no?
            qualifying_energy = self.qualifying_commitment * self.length

            # we constrain the energy to be at least the qualifying energy value at the beginning of the RA event to make sure that we
            # have enough energy to meet our promise during the entirety of the event.
            self.energy_min_constraint = pd.Series(np.repeat(qualifying_energy, len(self.event_start_times)), index=self.event_start_times,
                                                   name='RA Energy Min (kWh)')
            self.constraints = {'ene_min': Const.Constraint('ene_min', self.name, self.energy_min_constraint)}
コード例 #45
0
def randomized_benchmarking_seq(nseeds=1,
                                length_vector=None,
                                rb_pattern=None,
                                length_multiplier=1,
                                seed_offset=0,
                                align_cliffs=False,
                                interleaved_gates=None,
                                is_purity=False,
                                group_gates=None):
    """Get a generic randomized benchmarking sequence

    Args:
        nseeds: number of seeds
        length_vector: 'm' length vector of sequence lengths. Must be in
            ascending order. RB sequences of increasing length grow on top of
            the previous sequences.
        rb_pattern: A list of the form [[i,j],[k],...] which will make
            simultaneous RB sequences where
            Qi,Qj are a 2Q RB sequence and Qk is a 1Q sequence, etc.
            E.g. [[0,3],[2],[1]] would create RB sequences that are
            2Q for Q0/Q3, 1Q for Q1+Q2
            The number of qubits is the sum of the entries.
            For 'regular' RB the qubit_pattern is just [[0]],[[0,1]].
        length_multiplier: if this is an array it scales each rb_sequence by
            the multiplier
        seed_offset: What to start the seeds at (e.g. if we
            want to add more seeds later)
        align_cliffs: If true adds a barrier across all qubits in rb_pattern
            after each set of elements, not necessarily Cliffords
            (note: aligns after each increment of elements including the
            length multiplier so if the multiplier is [1,3] it will barrier
            after 1 element for the first pattern and 3 for the second).
        interleaved_gates: A list of gates of elements that
            will be interleaved (for interleaved randomized benchmarking)
            The length of the list would equal the length of the rb_pattern.
        is_purity: True only for purity rb (default is False)
        group_gates: On which group (or gate set) we perform RB
            (default is the Clifford group)
            '0' or None or 'Clifford': Clifford group
            '1' or 'CNOT-Dihedral' or 'Non-Clifford': CNOT-Dihedral group


    Returns:
        A tuple of different fields depending on inputs. The different fields
        are:

         * ``circuits``: list of lists of circuits for the rb sequences
            (separate list for each seed)
         * ``xdata``: the sequences lengths (with multiplier if applicable)
         * ``circuits_interleaved`` `(only if interleaved_gates is not None)`:
           list of lists of circuits for the interleaved rb sequences
           (separate list for each seed)
         * ``circuits_purity`` `(only if is_purity=True)`:
           list of lists of lists of circuits for purity rb
           (separate list for each seed and each of the 3^n circuits)
         * ``npurity`` `(only if is_purity=True)`:
            the number of purity rb circuits (per seed)
            which equals to 3^n, where n is the dimension

    """
    # Set modules (default is Clifford)
    if group_gates is None or group_gates in ('0', 'Clifford', 'clifford'):
        Gutils = clutils()
        Ggroup = Clifford
        rb_circ_type = 'rb'
        group_gates_type = 0
    elif group_gates in ('1', 'Non-Clifford', 'NonClifford'
                         'CNOTDihedral', 'CNOT-Dihedral'):
        Gutils = dutils()
        Ggroup = CNOTDihedral
        rb_circ_type = 'rb_cnotdihedral'
        group_gates_type = 1
    else:
        raise ValueError("Unknown group or set of gates.")

    if rb_pattern is None:
        rb_pattern = [[0]]
    if length_vector is None:
        length_vector = [1, 10, 20]

    qlist_flat, n_q_max, max_dim = check_pattern(rb_pattern, is_purity)
    length_multiplier = handle_length_multiplier(length_multiplier,
                                                 len(rb_pattern), is_purity)
    # number of purity rb circuits per seed
    npurity = 3**max_dim

    xdata = calc_xdata(length_vector, length_multiplier)

    pattern_sizes = [len(pat) for pat in rb_pattern]
    max_nrb = np.max(pattern_sizes)

    # load group tables
    group_tables = [[] for _ in range(max_nrb)]
    for rb_num in range(max_nrb):
        group_tables[rb_num] = Gutils.load_tables(rb_num + 1)

    # initialization: rb sequences
    circuits = [[] for e in range(nseeds)]
    # initialization: interleaved rb sequences
    circuits_interleaved = [[] for e in range(nseeds)]
    # initialization: non-clifford cnot-dihedral
    # rb sequences
    circuits_cnotdihedral = [[] for e in range(nseeds)]
    # initialization: non-clifford cnot-dihedral
    # interleaved rb sequences
    circuits_cnotdihedral_interleaved = [[] for e in range(nseeds)]
    # initialization: purity rb sequences
    circuits_purity = [[[] for d in range(npurity)] for e in range(nseeds)]

    # go through for each seed
    for seed in range(nseeds):
        qr = qiskit.QuantumRegister(n_q_max + 1, 'qr')
        cr = qiskit.ClassicalRegister(len(qlist_flat), 'cr')
        general_circ = qiskit.QuantumCircuit(qr, cr)
        interleaved_circ = qiskit.QuantumCircuit(qr, cr)

        # make sequences for each of the separate sequences in
        # rb_pattern
        Elmnts = []
        for rb_q_num in pattern_sizes:
            Elmnts.append(Ggroup(rb_q_num))
            # Sequences for interleaved rb sequences
            Elmnts_interleaved = []
        for rb_q_num in pattern_sizes:
            Elmnts_interleaved.append(Ggroup(rb_q_num))

        # go through and add elements to RB sequences
        length_index = 0
        for elmnts_index in range(length_vector[-1]):
            for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):

                for _ in range(length_multiplier[rb_pattern_index]):
                    new_elmnt_gatelist = Gutils.random_gates(rb_q_num)
                    Elmnts[rb_pattern_index] = Gutils.compose_gates(
                        Elmnts[rb_pattern_index], new_elmnt_gatelist)
                    general_circ += replace_q_indices(
                        get_quantum_circuit(Gutils.gatelist(), rb_q_num),
                        rb_pattern[rb_pattern_index], qr)

                    # add a barrier
                    general_circ.barrier(
                        *[qr[x] for x in rb_pattern[rb_pattern_index]])

                    # interleaved rb sequences
                    if interleaved_gates is not None:
                        Elmnts_interleaved[rb_pattern_index] = \
                            Gutils.compose_gates(
                                Elmnts_interleaved[rb_pattern_index],
                                new_elmnt_gatelist)
                        interleaved_circ += replace_q_indices(
                            get_quantum_circuit(Gutils.gatelist(), rb_q_num),
                            rb_pattern[rb_pattern_index], qr)
                        Elmnts_interleaved[rb_pattern_index] = \
                            Gutils.compose_gates(
                                Elmnts_interleaved[rb_pattern_index],
                                interleaved_gates[rb_pattern_index])
                        # add a barrier - interleaved rb
                        interleaved_circ.barrier(
                            *[qr[x] for x in rb_pattern[rb_pattern_index]])
                        interleaved_circ += replace_q_indices(
                            get_quantum_circuit(Gutils.gatelist(), rb_q_num),
                            rb_pattern[rb_pattern_index], qr)
                        # add a barrier - interleaved rb
                        interleaved_circ.barrier(
                            *[qr[x] for x in rb_pattern[rb_pattern_index]])

            if align_cliffs:
                # if align at a barrier across all patterns
                general_circ.barrier(*[qr[x] for x in qlist_flat])
                # align for interleaved rb
                if interleaved_gates is not None:
                    interleaved_circ.barrier(*[qr[x] for x in qlist_flat])

            # if the number of elements matches one of the sequence lengths
            # then calculate the inverse and produce the circuit
            if (elmnts_index + 1) == length_vector[length_index]:
                # circ for rb:
                circ = qiskit.QuantumCircuit(qr, cr)
                circ += general_circ
                # circ_interleaved for interleaved rb:
                circ_interleaved = qiskit.QuantumCircuit(qr, cr)
                circ_interleaved += interleaved_circ

                for (rb_pattern_index, rb_q_num) in enumerate(pattern_sizes):
                    inv_key = Gutils.find_key(Elmnts[rb_pattern_index],
                                              rb_q_num)
                    inv_circuit = Gutils.find_inverse_gates(
                        rb_q_num, group_tables[rb_q_num - 1][inv_key])
                    circ += replace_q_indices(
                        get_quantum_circuit(inv_circuit, rb_q_num),
                        rb_pattern[rb_pattern_index], qr)
                    # calculate the inverse and produce the circuit
                    # for interleaved rb
                    if interleaved_gates is not None:
                        inv_key = Gutils.find_key(
                            Elmnts_interleaved[rb_pattern_index], rb_q_num)
                        inv_circuit = Gutils.find_inverse_gates(
                            rb_q_num, group_tables[rb_q_num - 1][inv_key])
                        circ_interleaved += replace_q_indices(
                            get_quantum_circuit(inv_circuit, rb_q_num),
                            rb_pattern[rb_pattern_index], qr)

                # Circuits for purity rb
                if is_purity:
                    circ_purity = [[] for d in range(npurity)]
                    for d in range(npurity):
                        circ_purity[d] = qiskit.QuantumCircuit(qr, cr)
                        circ_purity[d] += circ
                        circ_purity[d].name = rb_circ_type + '_purity_'
                        ind_d = d
                        purity_qubit_num = 0
                        while True:
                            # Per each qubit:
                            # do nothing or rx(pi/2) or ry(pi/2)
                            purity_qubit_rot = np.mod(ind_d, 3)
                            ind_d = np.floor_divide(ind_d, 3)
                            if purity_qubit_rot == 0:  # do nothing
                                circ_purity[d].name += 'Z'
                            if purity_qubit_rot == 1:  # add rx(pi/2)
                                for pat in rb_pattern:
                                    circ_purity[d].rx(
                                        np.pi / 2, qr[pat[purity_qubit_num]])
                                circ_purity[d].name += 'X'
                            if purity_qubit_rot == 2:  # add ry(pi/2)
                                for pat in rb_pattern:
                                    circ_purity[d].ry(
                                        np.pi / 2, qr[pat[purity_qubit_num]])
                                circ_purity[d].name += 'Y'
                            purity_qubit_num = purity_qubit_num + 1
                            if ind_d == 0:
                                break
                        # padding the circuit name with Z's so that
                        # all circuits will have names of the same length
                        for _ in range(max_dim - purity_qubit_num):
                            circ_purity[d].name += 'Z'
                        # add measurement for purity rb
                        for qind, qb in enumerate(qlist_flat):
                            circ_purity[d].measure(qr[qb], cr[qind])
                        circ_purity[d].name += '_length_%d_seed_%d' \
                                               % (length_index,
                                                  seed + seed_offset)

                # add measurement for Non-Clifford cnot-dihedral rb
                # measure both the ground state |0...0> (circ)
                # and the |+...+> state (cnot-dihedral_circ)
                cnotdihedral_circ = qiskit.QuantumCircuit(qr, cr)
                cnotdihedral_interleaved_circ = qiskit.QuantumCircuit(qr, cr)
                if group_gates_type == 1:
                    for _, qb in enumerate(qlist_flat):
                        cnotdihedral_circ.h(qr[qb])
                        cnotdihedral_circ.barrier(qr[qb])
                        cnotdihedral_interleaved_circ.h(qr[qb])
                        cnotdihedral_interleaved_circ.barrier(qr[qb])
                    cnotdihedral_circ += circ
                    cnotdihedral_interleaved_circ += circ_interleaved
                    for _, qb in enumerate(qlist_flat):
                        cnotdihedral_circ.barrier(qr[qb])
                        cnotdihedral_circ.h(qr[qb])
                        cnotdihedral_interleaved_circ.barrier(qr[qb])
                        cnotdihedral_interleaved_circ.h(qr[qb])
                    for qind, qb in enumerate(qlist_flat):
                        cnotdihedral_circ.measure(qr[qb], cr[qind])
                        cnotdihedral_interleaved_circ.measure(qr[qb], cr[qind])

                # add measurement for standard rb
                # qubits measure to the c registers as
                # they appear in the pattern
                for qind, qb in enumerate(qlist_flat):
                    circ.measure(qr[qb], cr[qind])
                    # add measurement for interleaved rb
                    circ_interleaved.measure(qr[qb], cr[qind])

                circ.name = \
                    rb_circ_type + '_length_%d_seed_%d' % \
                    (length_index, seed + seed_offset)
                circ_interleaved.name = \
                    rb_circ_type + '_interleaved_length_%d_seed_%d' % \
                    (length_index, seed + seed_offset)

                if group_gates_type == 1:
                    circ.name = rb_circ_type + '_Z_length_%d_seed_%d' % \
                                (length_index, seed + seed_offset)
                    circ_interleaved.name = \
                        rb_circ_type + '_interleaved_Z_length_%d_seed_%d' % \
                        (length_index, seed + seed_offset)
                    cnotdihedral_circ.name = \
                        rb_circ_type + '_X_length_%d_seed_%d' % \
                        (length_index, seed + seed_offset)
                    cnotdihedral_interleaved_circ.name = \
                        rb_circ_type + 'interleaved_X_length_%d_seed_%d' % \
                        (length_index, seed + seed_offset)

                circuits[seed].append(circ)
                circuits_interleaved[seed].append(circ_interleaved)
                circuits_cnotdihedral[seed].append(cnotdihedral_circ)
                circuits_cnotdihedral_interleaved[seed].append(
                    cnotdihedral_interleaved_circ)

                if is_purity:
                    for d in range(npurity):
                        circuits_purity[seed][d].append(circ_purity[d])
                length_index += 1

    # output of purity rb
    if is_purity:
        return circuits_purity, xdata, npurity
    # output of non-clifford cnot-dihedral interleaved rb
    if interleaved_gates is not None and group_gates_type == 1:
        return circuits, xdata, circuits_cnotdihedral, circuits_interleaved, \
               circuits_cnotdihedral_interleaved
    # output of interleaved rb
    if interleaved_gates is not None:
        return circuits, xdata, circuits_interleaved
    # output of Non-Clifford cnot-dihedral rb
    if group_gates_type == 1:
        return circuits, xdata, circuits_cnotdihedral
    # output of standard (simultaneous) rb
    return circuits, xdata
コード例 #46
0
def main():

    #    env = gym.make("CartPoleRob-v0")
    #    env = gym.make("CartPole-v0")
    #    env = gym.make("CartPole-v1")
    #    env = gym.make("Acrobot-v1")
    #    env = gym.make("MountainCarRob-v0")
    #    env = gym.make("FrozenLake-v0")
    #    env = gym.make("FrozenLake8x8-v0")
    #    env = gym.make("FrozenLake8x8rob-v0")
    #    env = gym.make("FrozenLake16x16rob-v0")
    env = gym.make("TestRob3-v0")

    # same as getDeictic except this one just calculates for the observation
    # input: n x n x channels
    # output: dn x dn x channels
    def getDeicticObs(obses_t, windowLen):
        deicticObses_t = []
        for i in range(np.shape(obses_t)[0] - windowLen + 1):
            for j in range(np.shape(obses_t)[1] - windowLen + 1):
                deicticObses_t.append(obses_t[i:i + windowLen,
                                              j:j + windowLen, :])
        return np.array(deicticObses_t)

    # get set of deictic alternatives
    # input: batch x n x n x channels
    # output: (batch x deictic) x dn x dn x channels
    def getDeictic(obses_t, actions, obses_tp1, weights, windowLen):
        deicticObses_t = []
        deicticActions = []
        deicticObses_tp1 = []
        deicticWeights = []
        for i in range(np.shape(obses_t)[0]):
            for j in range(np.shape(obses_t)[1] - windowLen + 1):
                for k in range(np.shape(obses_t)[2] - windowLen + 1):
                    deicticObses_t.append(obses_t[i, j:j + windowLen,
                                                  k:k + windowLen, :])
                    deicticActions.append(actions[i])
                    deicticObses_tp1.append(obses_tp1[i, j:j + windowLen,
                                                      k:k + windowLen, :])
                    deicticWeights.append(weights[i])

        return np.array(deicticObses_t), np.array(deicticActions), np.array(
            deicticObses_tp1), np.array(deicticWeights)

    # conv model parameters: (num_outputs, kernel_size, stride)
    model = models.cnn_to_mlp(
        #        convs=[(32, 8, 4), (64, 4, 2), (64, 3, 1)], # used in pong
        #        hiddens=[256],  # used in pong
        #        convs=[(8,4,1)], # used for non-deictic TestRob3-v0
        #        convs=[(8,3,1)], # used for deictic TestRob3-v0
        convs=[(16, 3, 1)],  # used for deictic TestRob3-v0
        #        convs=[(4,3,1)], # used for deictic TestRob3-v0
        #        convs=[(16,3,1)], # used for deictic TestRob3-v0
        #        convs=[(8,2,1)], # used for deictic TestRob3-v0
        hiddens=[16],
        dueling=True)

    #    model = models.mlp([6])

    # parameters
    q_func = model
    lr = 1e-3
    #    lr=1e-4
    #    max_timesteps=100000
    #    max_timesteps=50000
    max_timesteps = 20000
    buffer_size = 50000
    #    exploration_fraction=0.1
    exploration_fraction = 0.2
    exploration_final_eps = 0.02
    #    exploration_final_eps=0.005
    #    exploration_final_eps=0.1
    print_freq = 10
    checkpoint_freq = 10000
    learning_starts = 1000
    gamma = .98
    target_network_update_freq = 500
    prioritized_replay = False
    #    prioritized_replay=True
    prioritized_replay_alpha = 0.6
    prioritized_replay_beta0 = 0.4
    prioritized_replay_beta_iters = None
    prioritized_replay_eps = 1e-6
    num_cpu = 16

    #    batch_size=32
    #    train_freq=1
    #    batch_size=64
    #    train_freq=2
    #    batch_size=128
    #    train_freq=4
    #    batch_size=256
    #    train_freq=4
    batch_size = 512
    train_freq = 8

    # deicticShape must be square.
    # These two parameters need to be consistent w/ each other.
    #    deicticShape = (2,2,1)
    #    num_deictic_patches=36
    deicticShape = (3, 3, 1)
    num_deictic_patches = 36

    #    deicticShape = (4,4,1)
    #    num_deictic_patches=25
    #    deicticShape = (5,5,1)
    #    num_deictic_patches=16
    #    deicticShape = (6,6,1)
    #    num_deictic_patches=9
    #    deicticShape = (7,7,1)
    #    num_deictic_patches=4
    #    deicticShape = (8,8,1)
    #    num_deictic_patches=1

    def make_obs_ph(name):
        #        return U.BatchInput(env.observation_space.shape, name=name)
        return U.BatchInput(deicticShape, name=name)

    matchShape = (batch_size * 25, )

    def make_match_ph(name):
        return U.BatchInput(matchShape, name=name)

    sess = U.make_session(num_cpu)
    sess.__enter__()

    #    act, train, update_target, debug = build_graph.build_train(
    #    getq, train, trainWOUpdate, update_target, debug = build_graph.build_train_deictic(
    #    getq, train, trainWOUpdate, debug = build_graph.build_train_deictic(
    #    getq, train, trainWOUpdate, update_target, debug = build_graph.build_train_deictic(
    getq, train, trainWOUpdate, update_target, debug = build_graph.build_train_deictic_min(
        make_obs_ph=make_obs_ph,
        make_match_ph=make_match_ph,
        q_func=q_func,
        num_actions=env.action_space.n,
        batch_size=batch_size,
        num_deictic_patches=num_deictic_patches,
        optimizer=tf.train.AdamOptimizer(learning_rate=lr),
        gamma=gamma,
        grad_norm_clipping=10,
        double_q=False)

    act_params = {
        'make_obs_ph': make_obs_ph,
        'q_func': q_func,
        'num_actions': env.action_space.n,
    }

    # Create the replay buffer
    if prioritized_replay:
        replay_buffer = PrioritizedReplayBuffer(buffer_size,
                                                alpha=prioritized_replay_alpha)
        if prioritized_replay_beta_iters is None:
            prioritized_replay_beta_iters = max_timesteps
        beta_schedule = LinearSchedule(prioritized_replay_beta_iters,
                                       initial_p=prioritized_replay_beta0,
                                       final_p=1.0)
    else:
        replay_buffer = ReplayBuffer(buffer_size)
        beta_schedule = None

    # Create the schedule for exploration starting from 1.
    exploration = LinearSchedule(schedule_timesteps=int(exploration_fraction *
                                                        max_timesteps),
                                 initial_p=1.0,
                                 final_p=exploration_final_eps)

    # Initialize the parameters and copy them to the target network.
    U.initialize()
    update_target()

    episode_rewards = [0.0]
    saved_mean_reward = None
    obs = env.reset()

    #    with tempfile.TemporaryDirectory() as td:
    model_saved = False
    #        model_file = os.path.join(td, "model")
    for t in range(max_timesteps):

        # get action to take
        #        action = act(np.array(obs)[None], update_eps=exploration.value(t))[0]
        #        qvalues = getq(np.array(obs)[None])
        #        action = np.argmax(qvalues)
        #        if np.random.rand() < exploration.value(t):
        #            action = np.random.randint(env.action_space.n)

        deicticObs = getDeicticObs(obs, deicticShape[0])
        qvalues = getq(np.array(deicticObs))
        action = np.argmax(np.max(qvalues, 0))
        selPatch = np.argmax(np.max(qvalues, 1))
        if np.random.rand() < exploration.value(t):
            action = np.random.randint(env.action_space.n)

#        # temporarily take uniformly random actions all the time
#        action = np.random.randint(env.action_space.n)
#        env.render()

        new_obs, rew, done, _ = env.step(action)

        # display state, action, nextstate
        if t > 20000:
            toDisplay = np.reshape(new_obs, (8, 8))
            toDisplay[
                np.
                int32(np.floor_divide(selPatch, np.sqrt(num_deictic_patches))),
                np.int32(np.remainder(selPatch, np.sqrt(num_deictic_patches))
                         )] = 50
            print(
                "Current/next state. 50 denotes the upper left corner of the deictic patch."
            )
            print(str(toDisplay))


#        env.render()

# Store transition in the replay buffer.
        replay_buffer.add(obs, action, rew, new_obs, float(done))
        obs = new_obs

        episode_rewards[-1] += rew
        if done:
            obs = env.reset()
            episode_rewards.append(0.0)
            if t > 20000:
                print("q-values:")
                print(str(qvalues))
                print("*** Episode over! ***\n\n")

        if t > learning_starts and t % train_freq == 0:

            # Get batch
            if prioritized_replay:
                experience = replay_buffer.sample(batch_size,
                                                  beta=beta_schedule.value(t))
                (obses_t, actions, rewards, obses_tp1, dones, weights,
                 batch_idxes) = experience
            else:
                obses_t, actions, rewards, obses_tp1, dones = replay_buffer.sample(
                    batch_size)
                weights, batch_idxes = np.ones_like(rewards), None

            # Convert batch to deictic format
            obses_t_deic, actions_deic, obses_tp1_deic, weights_deic = getDeictic(
                obses_t, actions, obses_tp1, weights, deicticShape[0])

            obses_t_deic_fingerprints = [
                np.reshape(obses_t_deic[i],
                           [deicticShape[0] * deicticShape[1]])
                for i in range(np.shape(obses_t_deic)[0])
            ]
            _, _, fingerprintMatch = np.unique(obses_t_deic_fingerprints,
                                               axis=0,
                                               return_index=True,
                                               return_inverse=True)
            #            matchTemplates = [fingerprintMatch == i for i in range(np.max(fingerprintMatch)+1)]

            #            td_errors = train(obses_t, actions, rewards, obses_tp1, dones, weights)
            #            td_errors = train(obses_t_deic, actions_deic, rewards, obses_tp1_deic, dones, weights_deic)
            #            debug1, debug2, debug3 = trainWOUpdate(obses_t_deic, actions_deic, rewards, obses_tp1_deic, dones, weights_deic)
            #            debug1, debug2, debug3, debug4 = trainWOUpdate(obses_t_deic, actions_deic, rewards, obses_tp1_deic, fingerprintMatch, dones, weights_deic)
            #            td_errors = train(obses_t_deic, actions_deic, rewards, obses_tp1_deic, fingerprintMatch, dones, weights_deic)
            #            td_errors2, min_values_of_groups2, match_onehot2 = train(obses_t_deic, actions_deic, rewards, obses_tp1_deic, fingerprintMatch, dones, weights_deic)

            td_errors, min_values_of_groups, match_onehot = train(
                obses_t_deic, actions_deic, rewards, obses_tp1_deic,
                fingerprintMatch, dones, weights_deic)

            if prioritized_replay:
                new_priorities = np.abs(td_errors) + prioritized_replay_eps
                replay_buffer.update_priorities(batch_idxes, new_priorities)

        if t > learning_starts and t % target_network_update_freq == 0:

            # Update target network periodically.
            update_target()

        mean_100ep_reward = round(np.mean(episode_rewards[-101:-1]), 1)
        num_episodes = len(episode_rewards)

        if done and print_freq is not None and len(
                episode_rewards) % print_freq == 0:
            print("steps: " + str(t) + ", episodes: " + str(num_episodes) +
                  ", mean 100 episode reward: " + str(mean_100ep_reward) +
                  ", % time spent exploring: " +
                  str(int(100 * exploration.value(t))))

            if t > learning_starts and t % train_freq == 0:
                group_counts = np.sum(match_onehot, 1)
                print(str(min_values_of_groups[min_values_of_groups < 1000]))
                #                print(str(min_values_of_groups2[min_values_of_groups2 < 1000]))
                print(str(group_counts[group_counts > 0]))

                # display one of most valuable deictic patches
                min_values_of_groups_trunc = min_values_of_groups[
                    min_values_of_groups < 1000]
                most_valuable_patches_idx = np.argmax(
                    min_values_of_groups_trunc)
                most_valuable_patches = obses_t_deic[fingerprintMatch ==
                                                     most_valuable_patches_idx]
                print(
                    str(np.reshape(most_valuable_patches[0],
                                   deicticShape[0:2])))
                print(
                    "value of most valuable patch: " +
                    str(min_values_of_groups_trunc[most_valuable_patches_idx]))
                print("sum group counts: " + str(np.sum(group_counts)))

    num2avg = 20
    rListAvg = np.convolve(episode_rewards, np.ones(num2avg)) / num2avg
    plt.plot(rListAvg)
    #    plt.plot(episode_rewards)
    plt.show()

    sess
コード例 #47
0
import numpy
if __name__ == '__main__':
    n, m = map(int, input().split())
    arr1 = numpy.array([input().split() for i in range(n)], dtype=int)
    arr2 = numpy.array([input().split() for i in range(n)], dtype=int)

    print(numpy.add(arr1, arr2))
    print(numpy.subtract(arr1, arr2))
    print(numpy.multiply(arr1, arr2))
    print(numpy.floor_divide(arr1, arr2))
    print(numpy.mod(arr1, arr2))
    print(numpy.power(arr1, arr2))
コード例 #48
0
ファイル: jagged.py プロジェクト: douglasdavis/uproot
 def fromroot(self, data, offsets, local_entrystart, local_entrystop):
     contents = self.asdtype.fromroot(data, None, None, None)
     numpy.floor_divide(offsets, self.asdtype.fromdtype.itemsize, offsets)
     starts = offsets[local_entrystart:local_entrystop]
     stops = offsets[local_entrystart + 1:local_entrystop + 1]
     return JaggedArray(contents, starts, stops)
コード例 #49
0
    def __init__(self,
                 bands=[],
                 wave={},
                 flux={},
                 ivar={},
                 mask=None,
                 resolution_data=None,
                 fibermap=None,
                 meta=None,
                 extra=None,
                 single=False):

        self._bands = bands
        self._single = single
        self._ftype = np.float64

        if single:
            self._ftype = np.float32

        self.meta = None

        if meta is None:
            self.meta = {}

        else:
            self.meta = meta.copy()

        nspec = 0

        # check consistency of input dimensions
        for b in self._bands:
            if wave[b].ndim != 1:
                raise RuntimeError(
                    "wavelength array for band {} should have dim == 1".format(
                        b))
            if flux[b].ndim != 2:
                raise RuntimeError(
                    "flux array for band {} should have dim == 2".format(b))
            if flux[b].shape[1] != wave[b].shape[0]:
                raise RuntimeError(
                    "flux array wavelength dimension for band {} does not match wavelength grid"
                    .format(b))
            if nspec is None:
                nspec = flux[b].shape[0]
            if fibermap is not None:
                if fibermap.dtype != spectra_dtype():
                    print(fibermap.dtype)
                    print(spectra_dtype())
                    raise RuntimeError(
                        "fibermap data type does not match desispec.spectra.spectra_columns"
                    )
                if len(fibermap) != flux[b].shape[0]:
                    raise RuntimeError(
                        "flux array number of spectra for band {} does not match fibermap"
                        .format(b))
            if ivar[b].shape != flux[b].shape:
                raise RuntimeError(
                    "ivar array dimensions do not match flux for band {}".
                    format(b))
            if mask is not None:
                if mask[b].shape != flux[b].shape:
                    raise RuntimeError(
                        "mask array dimensions do not match flux for band {}".
                        format(b))
                if mask[b].dtype not in (int, np.int64, np.int32, np.uint64,
                                         np.uint32):
                    raise RuntimeError("bad mask type {}".format(mask.dtype))
            if resolution_data is not None:
                if resolution_data[b].ndim != 3:
                    raise RuntimeError(
                        "resolution array for band {} should have dim == 3".
                        format(b))
                if resolution_data[b].shape[0] != flux[b].shape[0]:
                    raise RuntimeError(
                        "resolution array spectrum dimension for band {} does not match flux"
                        .format(b))
                if resolution_data[b].shape[2] != wave[b].shape[0]:
                    raise RuntimeError(
                        "resolution array wavelength dimension for band {} does not match grid"
                        .format(b))
            if extra is not None:
                for ex in extra[b].items():
                    if ex[1].shape != flux[b].shape:
                        raise RuntimeError(
                            "extra arrays must have the same shape as the flux array"
                        )

        # copy data
        if fibermap is not None:
            self.fibermap = fibermap.copy()

        else:
            # create bogus fibermap table.
            fmap = np.zeros(shape=(nspec, ), dtype=spectra_columns())
            if nspec > 0:
                fake = np.arange(nspec, dtype=np.int32)
                fiber = np.mod(fake, 5000).astype(np.int32)
                expid = np.floor_divide(fake, 5000).astype(np.int32)
                fmap[:]["EXPID"] = expid
                fmap[:]["FIBER"] = fiber
            self.fibermap = encode_table(fmap)  #- unicode -> bytes

        self.wave = {}
        self.flux = {}
        self.ivar = {}

        if mask is None:
            self.mask = None

        else:
            self.mask = {}

        if resolution_data is None:
            self.resolution_data = None
            self.R = None

        else:
            self.resolution_data = {}
            self.R = {}

        if extra is None:
            self.extra = None

        else:
            self.extra = {}

        for b in self._bands:
            self.wave[b] = np.copy(wave[b].astype(self._ftype))
            self.flux[b] = np.copy(flux[b].astype(self._ftype))
            self.ivar[b] = np.copy(ivar[b].astype(self._ftype))

            if mask is not None:
                self.mask[b] = np.copy(mask[b])

            if resolution_data is not None:
                self.resolution_data[b] = resolution_data[b].astype(
                    self._ftype)
                self.R[b] = np.array(
                    [Resolution(r) for r in resolution_data[b]])

            if extra is not None:
                self.extra[b] = {}

                for ex in extra[b].items():
                    self.extra[b][ex[0]] = np.copy(ex[1].astype(self._ftype))
コード例 #50
0
del df['Time']                                                 # delete time from data frame to match model input
del df['Density']                                                 # delete density from data frame to match model input
del df['Depth(u)']
del df['Temp']
del df['Sal.']

# add column with latitude in
lat = 78
df['lat'] = pd.Series([lat for x in range(len(df.index))])

# rearrange columns so they match the needed model input
#df = df[df.columns[[3, 1, 0, 5, 4, 2]]]
df = df.reindex(columns= ['z', 't', 's', 'lat', 'd'])

# create means over one meter depth
bins = np.arange(0,np.floor_divide(bottom, 1)+2, 1)             # create bins in 1m steps
df['binned depth'] = pd.cut(df['z'], bins)               # sort depth into bins
df=df.groupby('binned depth').mean()                            # group the sorted depths and create mean over bin 
df=df.interpolate()                                             # interpolate to get rid of NaNs
df["z"]=np.arange(0,np.floor_divide(bottom, 1)+1, 1)     # fix depth to 1m steps after they have vanished due to interpolation

df = df.groupby('binned depth').mean().reset_index()            # convert group back to data frame
del df['binned depth']                                          # delete binned depth data frame

###########################################################################
df = df.iloc[:-1,:]

length = df['t'].shape[0]
N=4
ker_len=10
ker = (1.0/ker_len)*np.ones(ker_len)
コード例 #51
0
    def train(self, epochs, batch_size):
        print("Start Training the PGLearner.")

        r_n, a_n = [], []
        self.iS.clickPlay()
        time.sleep(3.5)
        self.agent.updateInput()
        rd_s = np.empty((0, 1))
        x_n = np.empty((0, 4, 20, 20))

        for episode in range(epochs):
            start_time = time.time()
            eps = 1  #0.5 + 0.5*(episode/(0.9*epochs))

            old_score = 0.0
            old_cleared = 0
            old_height = 0
            n_blocks_old = 0
            r_sum = 0
            while True:
                x, action = self.agent.oneAction(eps)
                self.agent.updateInput()
                score = self.agent.pP.getScore()
                height = self.agent.pP.getHighestLine()
                cleared = self.agent.pP.getLinesCleared()
                n_blocks = self.agent.pP.getNBlocksInPlayField()

                if score == -1:
                    s = 0
                else:
                    s = ((score - old_score) / 100)

                if cleared == -1:
                    c = 0
                else:
                    c = (cleared - old_cleared)

                h = (height - old_height)
                n = (n_blocks - n_blocks_old)

                if (-h != c and h < 0) or height < 4:
                    h = 0

                conc = 0.0

                if n > 0:
                    conc = float(n_blocks) / (height * 9)
                    conc = 2 * (conc - 0.33)

                c = 2.5 * c
                s = 0.5 * s
                reward = c + conc + s

                x_n = np.append(x_n, x, 0)
                a_n.append(action)
                r_n.append(reward)

                r_sum += reward

                if self.agent.pP.isMenuOpen():
                    t = time.time() - start_time
                    r_n[-1] = -0.5
                    r_sum -= c
                    r_sum -= 0.5
                    print(
                        "Game {} of {} took {:.3f}s and reached a score of {}".
                        format(episode + 1, epochs, t, r_sum))
                    self.log(t, r_sum, old_score, old_cleared)

                    rd_tmp = self.discount_rewards(np.vstack(r_n))
                    rd_s = np.concatenate((rd_s, rd_tmp))

                    r_n = []

                    if np.mod(episode + 1, 1) == 0:
                        x_s = x_n
                        a_s = np.vstack(a_n)

                        a_n = []

                        a_s = a_s.reshape(a_s.shape[0], )
                        rd_s = rd_s.reshape(rd_s.shape[0], )

                        shuf = np.arange(x_s.shape[0])
                        np.random.shuffle(shuf)

                        for k in range(
                                np.floor_divide(x_s.shape[0], batch_size)):
                            it1 = k * batch_size
                            it2 = (k + 1) * batch_size
                            self.train_fn(x_s[shuf[it1:it2], :, :, :],
                                          a_s[it1:it2], rd_s[it1:it2])

                        rd_s = np.empty((0, 1))
                        x_n = np.empty((0, 4, 20, 20))

                    self.agent.resetInput()
                    time.sleep(0.5)

                    if self.agent.pP.tryAgain():
                        self.iS.clickTryAgain()
                        time.sleep(3.5)
                        self.agent.updateInput()
                        break
                    else:
                        self.iS.enterInitials()
                        time.sleep(1.0)
                        self.iS.clickTryAgain()
                        time.sleep(3.5)
                        self.agent.updateInput()
                        break

                old_score = score
                old_cleared = cleared
                old_height = height

        print("Saving Model to File...")
        self.agent.saveParams('trained_model1.npz')
        print("End Training Program!")
コード例 #52
0
cv2.imwrite('data/dst/lena_diff_abs.png', im_diff_abs)
# True

im_diff_abs_norm = im_diff_abs / im_diff_abs.max() * 255

print(im_diff_abs_norm.max())
# 255.0

print(im_diff_abs_norm.min())
# 0.0

cv2.imwrite('data/dst/lena_diff_abs_norm.png', im_diff_abs_norm)
# True

im_diff_center = np.floor_divide(im_diff, 2) + 128

print(im_diff_center.max())
# 199

print(im_diff_center.min())
# 77

cv2.imwrite('data/dst/lena_diff_center.png', im_diff_center)
# True

im_diff_center_norm = im_diff / np.abs(im_diff).max() * 127.5 + 127.5

print(im_diff_center_norm.max())
# 255.0
コード例 #53
0
 def _tile_params_to_volume_dims(self, params_to_reshape):
     target_shape = [6] + self.im_shape
     data_shape = params_to_reshape.shape
     tiles = np.floor_divide(target_shape, data_shape, dtype=int)
     return np.tile(params_to_reshape, reps=tiles)
コード例 #54
0
 def to_grid_coords(self, x, y):
     pt = np.asarray([x, y]) - [self.grid.xoffset, self.grid.yoffset]
     pt = np.floor_divide(pt,
                          [self.grid.xsize, self.grid.ysize]).astype(np.int)
     return pt
コード例 #55
0
ファイル: trainer.py プロジェクト: noel0714/MIM-Pytorch
def test(model, test_input_handle, configs, save_name, hidden_state,
         cell_state, hidden_state_diff, cell_state_diff, st_memory,
         conv_lstm_c, MIMB_oc_w, MIMB_ct_w, MIMN_oc_w, MIMN_ct_w):
    test_input_handle.begin(do_shuffle=False)
    res_path = configs.gen_frm_dir
    if not os.path.isdir(res_path):
        os.mkdir(res_path)
    avg_mse = 0
    batch_id = 0
    img_mse, ssim, psnr, fmae, sharp = [], [], [], [], []

    for i in range(configs.total_length - configs.input_length):
        img_mse.append(0)
        ssim.append(0)
        psnr.append(0)
        fmae.append(0)
        sharp.append(0)

    if configs.img_height > 0:
        height = configs.img_height
    else:
        height = configs.img_width

    real_input_flag = np.zeros(
        (configs.batch_size, configs.total_length - configs.input_length - 1,
         configs.patch_size**2 * configs.img_channel,
         configs.img_width // configs.patch_size,
         height // configs.patch_size))

    with torch.no_grad():
        while not test_input_handle.no_batch_left():
            batch_id = batch_id + 1
            if save_name != 'test_result':
                if batch_id > 100: break

            test_ims = test_input_handle.get_batch()
            test_ims = test_ims[:, :configs.total_length]

            if len(test_ims.shape) > 3:
                test_dat = preprocess.reshape_patch(test_ims,
                                                    configs.patch_size)
            else:
                test_dat = test_ims

            # test_dat = np.split(test_dat, configs.n_gpu)
            # 여기서 debug 바꿔줘야 함 현재 im_gen만 나오게 바껴져 있음 원래는 뭐였는지 살펴보기
            test_dat_tensor = torch.tensor(test_dat,
                                           device=configs.device,
                                           requires_grad=False)
            img_gen = model.forward(test_dat_tensor, real_input_flag,
                                    hidden_state, cell_state,
                                    hidden_state_diff, cell_state_diff,
                                    st_memory, conv_lstm_c, MIMB_oc_w,
                                    MIMB_ct_w, MIMN_oc_w, MIMN_ct_w)
            img_gen = img_gen.clone().detach().to('cpu').numpy()

            # concat outputs of different gpus along batch
            # img_gen = np.concatenate(img_gen)
            if len(img_gen.shape) > 3:
                img_gen = preprocess.reshape_patch_back(
                    img_gen, configs.patch_size)

            # MSE per frame
            for i in range(configs.total_length - configs.input_length):
                x = test_ims[:, i + configs.input_length, :, :, :]
                x = x[:configs.batch_size * configs.n_gpu]
                x = x - np.where(x > 10000,
                                 np.floor_divide(x, 10000) * 10000,
                                 np.zeros_like(x))
                gx = img_gen[:, i, :, :, :]
                fmae[i] += metrics.batch_mae_frame_float(gx, x)
                gx = np.maximum(gx, 0)
                gx = np.minimum(gx, 1)
                mse = np.square(x - gx).sum()
                img_mse[i] += mse
                avg_mse += mse
                real_frm = np.uint8(x * 255)
                pred_frm = np.uint8(gx * 255)
                psnr[i] += metrics.batch_psnr(pred_frm, real_frm)

                for b in range(configs.batch_size):
                    sharp[i] += np.max(
                        cv2.convertScaleAbs(cv2.Laplacian(pred_frm[b], 3)))
                    gx_trans = np.transpose(gx[b], (1, 2, 0))
                    x_trans = np.transpose(x[b], (1, 2, 0))
                    score = structural_similarity(gx_trans,
                                                  x_trans,
                                                  multichannel=True)
                    ssim[i] += score

            # save prediction examples
            if batch_id <= configs.num_save_samples:
                path = os.path.join(res_path, str(save_name))
                if not os.path.isdir(path):
                    os.mkdir(path)

                # if len(debug) != 0:
                #     np.save(os.path.join(path, "f.npy"), debug)

                for i in range(configs.total_length):
                    name = 'gt' + str(i + 1) + '.png'
                    file_name = os.path.join(path, name)
                    img_gt = np.uint8(test_ims[0, i, :, :, :] * 255)
                    if configs.img_channel == 2:
                        img_gt = img_gt[:, :, :1]
                    img_gt = np.transpose(img_gt, (1, 2, 0))
                    cv2.imwrite(file_name, img_gt)

                for i in range(configs.total_length - 1):
                    name = 'pd' + str(i) + '.png'
                    file_name = os.path.join(path, name)
                    img_pd = img_gen[0, i, :, :, :]
                    if configs.img_channel == 2:
                        img_pd = img_pd[:, :, :1]
                    img_pd = np.maximum(img_pd, 0)
                    img_pd = np.minimum(img_pd, 1)
                    img_pd = np.uint8(img_pd * 255)
                    img_pd = np.transpose(img_pd, (1, 2, 0))
                    cv2.imwrite(file_name, img_pd)
            test_input_handle.next()

    print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
          'test...' + str(save_name))

    avg_mse = avg_mse / (batch_id * configs.batch_size * configs.n_gpu)
    print('mse per seq: ' + str(avg_mse))

    for i in range(configs.total_length - configs.input_length):
        print(img_mse[i] / (batch_id * configs.batch_size * configs.n_gpu))

    psnr = np.asarray(psnr, dtype=np.float32) / batch_id
    ssim = np.asarray(ssim, dtype=np.float32) / (configs.batch_size * batch_id)
    fmae = np.asarray(fmae, dtype=np.float32) / batch_id
    sharp = np.asarray(sharp,
                       dtype=np.float32) / (configs.batch_size * batch_id)

    print('psnr per frame: ' + str(np.mean(psnr)))
    print('ssim per frame: ' + str(np.mean(ssim)))
    print('fmae per frame: ' + str(np.mean(fmae)))
    print('sharpness per frame: ' + str(np.mean(sharp)))
コード例 #56
0
ファイル: dtype.py プロジェクト: cminmins/Pixel_processing
    def _scale(a, n, m, copy=True):
        """Scale an array of unsigned/positive integers from `n` to `m` bits.

        Numbers can be represented exactly only if `m` is a multiple of `n`.

        Parameters
        ----------
        a : ndarray
            Input image array.
        n : int
            Number of bits currently used to encode the values in `a`.
        m : int
            Desired number of bits to encode the values in `out`.
        copy : bool, optional
            If True, allocates and returns new array. Otherwise, modifies
            `a` in place.

        Returns
        -------
        out : array
            Output image array. Has the same kind as `a`.
        """
        kind = a.dtype.kind
        if n > m and a.max() < 2 ** m:
            mnew = int(np.ceil(m / 2) * 2)
            if mnew > m:
                dtype = "int{}".format(mnew)
            else:
                dtype = "uint{}".format(mnew)
            n = int(np.ceil(n / 2) * 2)
            warn("Downcasting {} to {} without scaling because max "
                 "value {} fits in {}".format(a.dtype, dtype, a.max(), dtype))
            return a.astype(_dtype_bits(kind, m))
        elif n == m:
            return a.copy() if copy else a
        elif n > m:
            # downscale with precision loss
            prec_loss()
            if copy:
                b = np.empty(a.shape, _dtype_bits(kind, m))
                np.floor_divide(a, 2**(n - m), out=b, dtype=a.dtype,
                                casting='unsafe')
                return b
            else:
                a //= 2**(n - m)
                return a
        elif m % n == 0:
            # exact upscale to a multiple of `n` bits
            if copy:
                b = np.empty(a.shape, _dtype_bits(kind, m))
                np.multiply(a, (2**m - 1) // (2**n - 1), out=b, dtype=b.dtype)
                return b
            else:
                a = a.astype(_dtype_bits(kind, m, a.dtype.itemsize), copy=False)
                a *= (2**m - 1) // (2**n - 1)
                return a
        else:
            # upscale to a multiple of `n` bits,
            # then downscale with precision loss
            prec_loss()
            o = (m // n + 1) * n
            if copy:
                b = np.empty(a.shape, _dtype_bits(kind, o))
                np.multiply(a, (2**o - 1) // (2**n - 1), out=b, dtype=b.dtype)
                b //= 2**(o - m)
                return b
            else:
                a = a.astype(_dtype_bits(kind, o, a.dtype.itemsize), copy=False)
                a *= (2**o - 1) // (2**n - 1)
                a //= 2**(o - m)
                return a
コード例 #57
0
def getXY(n):
    y = np.floor_divide((n - 1), GRID) + 1
    x = n - (GRID * y) + GRID
    return x, y
コード例 #58
0
ファイル: poloidal_grid.py プロジェクト: boutproject/zoidberg
    def findIndex(self, R, Z, tol=1e-10, show=False):
        """Finds the (x, z) index corresponding to the given (R, Z) coordinate

        Parameters
        ----------
        R, Z : array_like
            Locations. Can be scalar or array, must be the same shape
        tol : float, optional
            Maximum tolerance on the square distance

        Returns
        -------
        x, z : (ndarray, ndarray)
            Index as a float, same shape as R, Z

        """

        # Make sure inputs are NumPy arrays
        R = np.asfarray(R)
        Z = np.asfarray(Z)

        # Check that they have the same shape
        assert R.shape == Z.shape

        input_shape = R.shape  # So output has same shape as input

        # Get distance and index into flattened data
        # Note ind can be an integer, or an array of ints
        # with the same number of elements as the input (R,Z) arrays
        n = R.size
        position = np.concatenate((R.reshape((n, 1)), Z.reshape((n, 1))),
                                  axis=1)

        R = R.reshape((n, ))
        Z = Z.reshape((n, ))

        dists, ind = self.tree.query(position)

        # Calculate (x,y) index
        nx, nz = self.R.shape
        xind = np.floor_divide(ind, nz)
        zind = ind - xind * nz

        # Convert indices to float
        xind = np.asfarray(xind)
        zind = np.asfarray(zind)

        # Create a mask for the positions
        mask = np.ones(xind.shape)
        mask[np.logical_or(
            (xind < 0.5),
            (xind > (nx - 1.5)))] = 0.0  # Set to zero if near the boundary

        if show and plotting_available:
            plt.plot(self.R, self.Z, ".")
            plt.plot(R, Z, "x")

        while True:
            # Use Newton iteration to find the index
            # dR, dZ are the distance away from the desired point
            Rpos, Zpos = self.getCoordinate(xind, zind)
            if show and plotting_available:
                plt.plot(Rpos, Zpos, "o")
            dR = Rpos - R
            dZ = Zpos - Z

            # Check if close enough
            # Note: only check the points which are not in the boundary
            if np.amax(mask * (dR**2 + dZ**2)) < tol:
                break

            # Calculate derivatives
            dRdx, dZdx = self.getCoordinate(xind, zind, dx=1)
            dRdz, dZdz = self.getCoordinate(xind, zind, dz=1)

            # Invert 2x2 matrix to get change in coordinates
            #
            # (x) -=  ( dR/dx   dR/dz )^-1  (dR)
            # (y)     ( dZ/dx   dZ/dz )     (dz)
            #
            #
            # (x) -=  ( dZ/dz  -dR/dz ) (dR)
            # (y)     (-dZ/dx   dR/dx ) (dZ) / (dR/dx*dZ/dy - dR/dy*dZ/dx)
            determinant = dRdx * dZdz - dRdz * dZdx

            xind -= mask * ((dZdz * dR - dRdz * dZ) / determinant)
            zind -= mask * ((dRdx * dZ - dZdx * dR) / determinant)

            # Re-check for boundary
            in_boundary = xind < 0.5
            mask[in_boundary] = 0.0  # Set to zero if near the boundary
            xind[in_boundary] = 0.0
            out_boundary = xind > (nx - 1.5)
            mask[out_boundary] = 0.0  # Set to zero if near the boundary
            xind[out_boundary] = nx - 1

        if show and plotting_available:
            plt.show()

        # Set xind to -1 if in the inner boundary, nx if in outer boundary
        in_boundary = xind < 0.5
        xind[in_boundary] = -1
        out_boundary = xind > (nx - 1.5)
        xind[out_boundary] = nx

        return xind.reshape(input_shape), zind.reshape(input_shape)
コード例 #59
0
folder_switch = {
    0: 'crater',
    1: 'not crater',
    2: 'not sure',
}

base_filename = "hs-45-45_lola20sp"

full_size = [30400, 30400]
p_size = [3800, 3800]
cut_size = [32, 32]
stride = np.divide(cut_size, 2)

for n in (26):
    v_pieces = np.floor_divide(full_size[0], p_size[0])
    h_pieces = np.floor_divide(full_size[1], p_size[1])
    y_ind = np.floor_divide(n, v_pieces)
    x_ind = np.mod(n, v_pieces)
    y_pos = [0] * 2
    x_pos = [0] * 2
    y_pos[0] = np.multiply(p_size[0], y_ind)
    x_pos[0] = np.multiply(p_size[1], x_ind)
    y_pos[1] = y_pos[0] + p_size[0]
    x_pos[1] = x_pos[0] + p_size[1]

for m in len(folder_switch):
    folder_name = folder_switch[m]
    if not os.path.isfile(base_folder + folder_name):
        os.mkdir(base_folder + folder_name)
    for filename in os.listdir(base_folder + folder_name):
コード例 #60
0
    def build_mini(self, creature):
        if not hasattr(self, 'grid_size'):
            # check if settings loaded manually, otherwise load default settings
            self.load_settings()

        if not isinstance(creature, Creature):
            return 'Object is not a Creature.'

        if creature.img_url == '':
            return 'No image url found.'

        # Size-based settings in mm
        # after the change to how the font is handled, some settings here are obsolete
        # I will keep them in for now
        min_height_mm = 40
        if creature.size in ['M', 'S', 'T']:
            m_width = self.grid_size
            max_height_mm = 40
            n_height = 8
            font_size = 1.15  # opencv "height"
            font_height = 50  # PIL drawing max height for n_height = 8
            font_width = 1
            enum_size = 2.2
            enum_width = 3
        elif creature.size == 'L':
            m_width = self.grid_size * 2
            max_height_mm = 50
            n_height = 10
            font_size = 2
            font_height = 70
            font_width = 2
            enum_size = 5 * self.grid_size / 24
            enum_width = 8 * self.grid_size / 24
        elif creature.size == 'H':
            m_width = self.grid_size * 3
            max_height_mm = 60 if not self.paper_format == 'letter' else 51
            n_height = 12
            font_size = 2.5
            font_height = 80
            font_width = 2
            enum_size = 8
            enum_width = 16
        elif creature.size == 'G':
            m_width = self.grid_size * 4
            max_height_mm = 80 if not self.paper_format == 'letter' else 73
            n_height = 14
            font_size = 3
            font_height = 100
            font_width = 3
            enum_size = 14
            enum_width = 32
        else:
            return 'Invalid creature size.'
        ## end of settings

        # mm to px
        width = m_width * self.dpmm
        name_height = n_height * self.dpmm
        base_height = m_width * self.dpmm
        max_height = max_height_mm * self.dpmm
        if self.fixed_height:
            min_height = max_height
        else:
            min_height = min_height_mm * self.dpmm

        text = creature.name

        # scale for grid size
        enum_size = int(np.ceil(enum_size * self.grid_size / 24))
        enum_width = int(np.ceil(enum_size * self.grid_size / 24))
        min_height = int(np.ceil(min_height * self.grid_size / 24))

        ## OPENCV versions (with an attempt to use utf-8 but I couldn't get it to work) of the nameplate.
        # It is now done with PIL to have UTF-8 support.
        # name plate
        # if creature.show_name:
        #     n_img = np.zeros((name_height, width, 3), np.uint8) + 255
        #     x_margin = 0
        #     y_margin = 0
        #     # find optimal font size
        #     while x_margin < 2 or y_margin < 10:
        #         font_size = round(font_size - 0.05, 2)
        #         textsize = cv.getTextSize(text, self.font, font_size, font_width)[0]
        #         x_margin = n_img.shape[1] - textsize[0]
        #         y_margin = n_img.shape[0] - textsize[1]
        #     #        print(font_size, x_margin, y_margin)
        #     # write text
        #     textX = np.floor_divide(x_margin, 2)
        #     textY = np.floor_divide(n_img.shape[0] + textsize[1], 2)
        #
        #     cv.putText(n_img, text, (textX, textY), self.font, font_size, (0, 0, 0), font_width, cv.LINE_AA)
        #     cv.rectangle(n_img, (0, 0), (n_img.shape[1] - 1, n_img.shape[0] - 1), (0, 0, 0), thickness=1)
        #     # img = cv.circle(img, (100, 400), 20, (255,0,0), 3)
        # if creature.show_name:
        #     n_img = np.zeros((name_height, width, 3), np.uint8) + 255
        #     ft = cv.freetype.createFreeType2()
        #     ft.loadFontData(fontFileName='DejaVuSans.ttf', id=0)
        #     x_margin = 0
        #     y_margin = 0
        #     # find optimal font size
        #     while x_margin < 2 or y_margin < 10:
        #         font_size = round(font_size - 0.05, 2)
        #         textsize = ft.getTextSize(text, font_size, font_width)[0]
        #         x_margin = n_img.shape[1] - textsize[0]
        #         y_margin = n_img.shape[0] - textsize[1]
        #     #        print(font_size, x_margin, y_margin)
        #     # write text
        #     textX = np.floor_divide(x_margin, 2)
        #     textY = np.floor_divide(n_img.shape[0] + textsize[1], 2)
        #
        #     ft.putText(n_img, text, (textX, textY), font_size, (0, 0, 0), font_width, cv.LINE_AA)
        #     cv.rectangle(n_img, (0, 0), (n_img.shape[1] - 1, n_img.shape[0] - 1), (0, 0, 0), thickness=1)
        #     # img = cv.circle(img, (100, 400), 20, (255,0,0), 3)

        ## nameplate
        show_name = ""

        if self.force_name == "force_name":
            show_name = True
        elif self.force_name == "force_blank":
            show_name = False
        else:
            show_name = creature.show_name

        if show_name:
            # PIL fix for utf-8 characters
            n_img_pil = Image.new("RGB", (width, name_height), (255, 255, 255))
            x_margin = 0
            y_margin = 0
            # find optimal font size
            while x_margin < 2 or y_margin < 10:
                #print(font_height)
                unicode_font = ImageFont.truetype("DejaVuSans.ttf",
                                                  font_height)
                font_height = round(font_height - 2, 2)
                textsize = unicode_font.getsize(text)
                im_w, im_h = n_img_pil.size
                x_margin = im_w - textsize[0]
                y_margin = im_h - textsize[1]
            # write text
            textX = x_margin // 2
            textY = y_margin // 2
            draw = ImageDraw.Draw(n_img_pil)
            draw.text((textX, textY), text, font=unicode_font, fill=(0, 0, 0))
            n_img = np.array(n_img_pil)
            cv.rectangle(n_img, (0, 0),
                         (n_img.shape[1] - 1, n_img.shape[0] - 1), (0, 0, 0),
                         thickness=1)
        else:
            n_img = np.zeros((1, width, 3), np.uint8)

        ## mimiature image
        try:
            req = Request(creature.img_url, headers=self.header)
            with urlopen(req) as resp:
                arr = np.asarray(bytearray(resp.read()), dtype=np.uint8)
                m_img = cv.imdecode(arr, -1)  # Load it "as it is"
        except:
            return 'Image could not be found or loaded.'
        # fix grayscale images

        try:
            if len(m_img.shape) == 2:
                m_img = cv.cvtColor(m_img, cv.COLOR_GRAY2RGB)
        except:
            return 'Image could not be found or loaded.'

        # replace alpha channel with white for pngs (with fix for grayscale images)
        if m_img.shape[2] == 4:
            alpha_channel = m_img[:, :, 3]
            mask = (alpha_channel == 0)
            mask = np.dstack((mask, mask, mask))
            color = m_img[:, :, :3]
            color[mask] = 255
            m_img = color

        # find optimal size of image
        # leave 1 pixel on each side for black border
        if m_img.shape[1] > width - 2:
            f = (width - 2) / m_img.shape[1]
            m_img = cv.resize(m_img, (0, 0), fx=f, fy=f)
            white_vert = np.zeros((m_img.shape[0], 1, 3), np.uint8) + 255
            m_img = np.concatenate((white_vert, m_img, white_vert), axis=1)

        if m_img.shape[0] > max_height - 2:
            f = (max_height - 2) / m_img.shape[0]
            m_img = cv.resize(m_img, (0, 0), fx=f, fy=f)
            white_horiz = np.zeros((1, m_img.shape[1], 3), np.uint8) + 255
            m_img = np.concatenate((white_horiz, m_img, white_horiz), axis=0)

        if m_img.shape[1] < width:
            diff = width - m_img.shape[1]
            left = np.floor_divide(diff, 2)
            right = left
            if diff % 2 == 1: right += 1
            m_img = np.concatenate(
                (np.zeros((m_img.shape[0], left, 3), np.uint8) + 255, m_img,
                 np.zeros((m_img.shape[0], right, 3), np.uint8) + 255),
                axis=1)

        if m_img.shape[0] < min_height:
            diff = min_height - m_img.shape[0]
            top = np.floor_divide(diff, 2)
            bottom = top
            if diff % 2 == 1: bottom += 1
            if creature.position == Creature.WALKING:
                m_img = np.concatenate((np.zeros(
                    (diff, m_img.shape[1], 3), np.uint8) + 255, m_img),
                                       axis=0)
            elif creature.position == Creature.HOVERING:
                m_img = np.concatenate(
                    (np.zeros((top, m_img.shape[1], 3), np.uint8) + 255, m_img,
                     np.zeros((bottom, m_img.shape[1], 3), np.uint8) + 255),
                    axis=0)
            elif creature.position == Creature.FLYING:
                m_img = np.concatenate(
                    (m_img, np.zeros(
                        (diff, m_img.shape[1], 3), np.uint8) + 255),
                    axis=0)
            else:
                return 'Position setting is invalid. Chose Walking, Hovering or Flying.'

        #draw border
        cv.rectangle(m_img, (0, 0), (m_img.shape[1] - 1, m_img.shape[0] - 1),
                     (0, 0, 0),
                     thickness=1)

        ## flipped miniature image
        m_img_flipped = np.flip(m_img, 0)
        if self.darken:
            # change Intensity (V-Value) in HSV color space
            hsv = cv.cvtColor(m_img_flipped, cv.COLOR_BGR2HSV)
            h, s, v = cv.split(hsv)
            # darkening factor between 0 and 1
            factor = max(min((1 - self.darken / 100), 1), 0)
            v[v < 255] = v[v < 255] * (factor)
            final_hsv = cv.merge((h, s, v))
            m_img_flipped = cv.cvtColor(final_hsv, cv.COLOR_HSV2BGR)

        ## base
        bgr_color = tuple(int(creature.color[i:i + 2], 16) for i in (4, 2, 0))
        demi_base = base_height // 2
        if creature.size == 'G': feet_mod = 1
        else: feet_mod = 2
        base_height = int(np.floor(demi_base * feet_mod))
        b_img = np.zeros((base_height, width, 3), np.uint8) + 255
        # fill base
        if self.base_shape == 'square':
            cv.rectangle(b_img, (0, 0), (b_img.shape[1] - 1, demi_base - 1),
                         bgr_color,
                         thickness=-1)
            cv.rectangle(b_img, (0, 0),
                         (b_img.shape[1] - 1, b_img.shape[0] - 1), (0, 0, 0),
                         thickness=1)
        elif self.base_shape == 'circle':
            cv.ellipse(b_img, (width // 2, 0), (width // 2, width // 2), 0, 0,
                       180, bgr_color, -1)
            cv.ellipse(b_img, (width // 2, 0), (width // 2, width // 2), 0, 0,
                       180, (0, 0, 0), 2)
            if feet_mod >= 2:
                cv.ellipse(b_img, (width // 2, base_height),
                           (width // 2, width // 2), 0, 180, 360, (0, 0, 0), 2)
                cv.line(b_img, (0, base_height), (width, base_height),
                        (0, 0, 0), 3)
        elif self.base_shape == 'hexagon':
            half = width // 2
            hexagon_bottom = np.array([(0, 0), (width // 4, half),
                                       (width // 4 * 3, half), (width, 0)],
                                      np.int32)
            hexagon_top = np.array([(0, width), (width // 4, half),
                                    (width // 4 * 3, half), (width, width)],
                                   np.int32)
            cv.fillConvexPoly(b_img, hexagon_bottom, bgr_color, 1)
            if feet_mod >= 2:
                cv.polylines(b_img, [hexagon_top], True, (0, 0, 0), 2)
        else:
            return 'Invalid base shape. Choose square, hexagon or circle.'

        # enumerate
        if self.enumerate and creature.name in self.creature_counter:
            #print(creature.name, self.creature_counter[creature.name])
            text = str(self.creature_counter[creature.name])
            textsize = cv.getTextSize(text, self.font, enum_size,
                                      enum_width)[0]
            x_margin = b_img.shape[1] - textsize[0]
            y_margin = b_img.shape[0] - textsize[1]

            textX = np.floor_divide(x_margin, 2)
            textY = np.floor_divide(demi_base + textsize[1], 2)
            cv.putText(b_img, text, (textX, textY), self.font, enum_size,
                       (255, 255, 255), enum_width, cv.LINE_AA)

            self.creature_counter[creature.name] -= 1

        ## construct full miniature
        img = np.concatenate((m_img, n_img, b_img), axis=0)
        # m_img_flipped = np.flip(m_img, 0)

        nb_flipped = np.rot90(np.concatenate((n_img, b_img), axis=0), 2)
        img = np.concatenate((nb_flipped, m_img_flipped, img), axis=0)

        ## Save image (not needed; only for debug/dev)
        # RGB_img = cv.cvtColor(img, cv.COLOR_BGR2RGB)
        # im_pil = Image.fromarray(RGB_img)
        # im_pil.save(self.save_dir + creature.name + ".png", dpi=(25.4 * self.dpmm, 25.4 * self.dpmm))

        return img