Example #1
0
def get_indtime_from_timehjd(fnames, time_hjd, fitsreader):
    
    # if not list, make list
    if not isinstance(time_hjd, (tuple, list, np.ndarray)):
        time_hjd = [time_hjd]


    if fitsreader=='astropy' or fitsreader=='pyfits':            
        with pyfits.open(fnames['nights'], mode='denywrite') as hdulist:
            time_hjd_all = np.int64( hdulist['HJD'].data[0]/3600./24. )
            del hdulist['IMAGELIST'].data
        
    elif fitsreader=='fitsio' or fitsreader=='cfitsio': 
        with fitsio.FITS(fnames['nights'], vstorage='object') as hdulist:
            time_hjd_all = np.int64( hdulist['HJD'][0,:]/3600./24. )[0]
            
    else: sys.exit('"fitsreader" can only be "astropy"/"pyfits" or "fitsio"/"cfitsio".')  

    ind_time = np.in1d(time_hjd_all, time_hjd).nonzero()[0] 
     

    #::: check if all dates were found in fits file        
    for hjd in time_hjd:
        if hjd not in time_hjd_all[ind_time]:
            warning = 'Date-HJD '+ hjd +' not found in fits file.'
            sys.exit(warning)
   
    #::: clean up      
    del time_hjd_all   
   
   
    return ind_time
Example #2
0
def get_Memcpy2D_d2d(src, dst, src_pitch, dst_pitch, dim_args, itemsize,
                     **kwargs):
    ''' Wrapper for the pycuda.driver.Memcpy2d() function (same args)
    Returns a callable object which copies the arrays on invocation of ()
    dim_args: list, [width, height, depth] !not width_in_bytes
    kwargs: gets ignored, exists to provide a uniform interface with 3d
    '''
    height, width = dim_args
    width_in_bytes = width * itemsize
    src_ptr = getattr(src, 'gpudata', 0) # set to NULL if no valid ptr
    dst_ptr = getattr(dst, 'gpudata', 0) # set to NULL if no valid ptr
    cpy = drv.Memcpy2D()
    cpy.set_src_device(src_ptr)
    cpy.set_dst_device(dst_ptr)
    cpy.height = np.int64(height)
    cpy.width_in_bytes = np.int64(width_in_bytes)
    cpy.src_pitch = src_pitch
    cpy.dst_pitch = dst_pitch
    class _copy():
        ''' Proxy class for the memcpy2d object:
        Wrap the call to pass aligned=True which seems to be necessary
        in the 2D version (compared to 3D where it doesn't work with this arg
        Add the set_src_device and set_dst_device proxy methods to be able
        to set the src/dst
        '''
        def __init__(self, memcpy2d):
            self.cpy = memcpy2d
        def set_src_device(self, src_ptr):
            self.cpy.set_src_device(src_ptr)
        def set_dst_device(self, dst_ptr):
            self.cpy.set_dst_device(dst_ptr)
        def __call__(self):
            self.cpy(aligned=True)
    return _copy(cpy)
Example #3
0
  def testInt64(self):
    save_path = os.path.join(self.get_temp_dir(), "int64")

    with self.test_session() as sess:
      # Build a graph with 1 node, and save and restore for them.
      v = tf.Variable(np.int64(15), name="v")
      save = tf.train.Saver({"v": v}, restore_sequentially=True)
      tf.initialize_all_variables().run()

      # Save the initialized values in the file at "save_path"
      val = save.save(sess, save_path)
      self.assertTrue(isinstance(val, six.string_types))
      self.assertEqual(save_path, val)

      with self.test_session() as sess:
        v = tf.Variable(np.int64(-1), name="v")
        save = tf.train.Saver({"v": v})

      with self.assertRaisesWithPredicateMatch(
          tf.OpError, lambda e: "uninitialized value v" in e.message):
        sess.run(v)

      # Restore the saved values in the parameter nodes.
      save.restore(sess, save_path)
      # Check that the parameter nodes have been restored.
      self.assertEqual(np.int64(15), v.eval())
Example #4
0
	def step(self):
		'''
		For each timestep, a sparse linear system (Ax = C) need to be solved to update ice surface elevation
		'''

		### update diffusivity for each timestep
		self.diffusion_update()
		D_sum = self.D_IC_jc + self.D_IP_jc + self.D_ic_JC + self.D_ic_JP

		row = np.int64([[self.ic_jc],[self.ic_jc],[self.ic_jc],[self.ic_jc],[self.ic_jc]]).flatten()
		col = np.int64([[self.im_jc],[self.ip_jc],[self.ic_jm],[self.ic_jp],[self.ic_jc]]).flatten()
		val = np.array([[-self.OMEGA * self.D_IC_jc],[-self.OMEGA * self.D_IP_jc],[-self.OMEGA * self.D_ic_JC],[-self.OMEGA * self.D_ic_JP],[1/self.dt + self.OMEGA * D_sum]]).flatten()
		C = (1 - self.OMEGA) * ((self.D_IC_jc * self.S[self.im_jc]) + self.D_IP_jc * self.S[self.ip_jc] + self.D_ic_JC * self.S[self.ic_jm] + self.D_ic_JP * \
			self.S[self.ic_jp]) + (1/self.dt - (1 - self.OMEGA) * D_sum) * self.S[self.ic_jc] + self.b_dot
		C = C.flatten()

		### construct a sparse matrix A
		A = csr_matrix( (val,(row,col)), shape=(self.N, self.N))
		# print 'solving'
		S_out = linalg.spsolve(A,C)
		# print 'solved'

		### ice thickness couldn't be negative, ice surface elevation should not be less than bed elevation
		S_out[S_out < self.B] = self.B[S_out < self.B]

		t_n = self.t + self.dt
		return S_out, t_n
Example #5
0
    def calculateComplexDerefOpAddress(complexDerefOp, registerMap):

        match = re.match("((?:\\-?0x[0-9a-f]+)?)\\(%([a-z0-9]+),%([a-z0-9]+),([0-9]+)\\)", complexDerefOp)
        if match != None:
            offset = 0L
            if len(match.group(1)) > 0:
                offset = long(match.group(1), 16)

            regA = RegisterHelper.getRegisterValue(match.group(2), registerMap)
            regB = RegisterHelper.getRegisterValue(match.group(3), registerMap)

            mult = long(match.group(4), 16)

            # If we're missing any of the two register values, return None
            if regA == None or regB == None:
                if regA == None:
                    return (None, "Missing value for register %s" % match.group(2))
                else:
                    return (None, "Missing value for register %s" % match.group(3))

            if RegisterHelper.getBitWidth(registerMap) == 32:
                val = int32(uint32(regA)) + int32(uint32(offset)) + (int32(uint32(regB)) * int32(uint32(mult)))
            else:
                # Assume 64 bit width
                val = int64(uint64(regA)) + int64(uint64(offset)) + (int64(uint64(regB)) * int64(uint64(mult)))
            return (long(val), None)

        return (None, "Unknown failure.")
Example #6
0
    def test_maybe_convert_scalar(self):

        # pass thru
        result = maybe_convert_scalar('x')
        assert result == 'x'
        result = maybe_convert_scalar(np.array([1]))
        assert result == np.array([1])

        # leave scalar dtype
        result = maybe_convert_scalar(np.int64(1))
        assert result == np.int64(1)
        result = maybe_convert_scalar(np.int32(1))
        assert result == np.int32(1)
        result = maybe_convert_scalar(np.float32(1))
        assert result == np.float32(1)
        result = maybe_convert_scalar(np.int64(1))
        assert result == np.float64(1)

        # coerce
        result = maybe_convert_scalar(1)
        assert result == np.int64(1)
        result = maybe_convert_scalar(1.0)
        assert result == np.float64(1)
        result = maybe_convert_scalar(Timestamp('20130101'))
        assert result == Timestamp('20130101').value
        result = maybe_convert_scalar(datetime(2013, 1, 1))
        assert result == Timestamp('20130101').value
        result = maybe_convert_scalar(Timedelta('1 day 1 min'))
        assert result == Timedelta('1 day 1 min').value
Example #7
0
    def test_asset_comparisons(self):

        s_23 = Asset(23)
        s_24 = Asset(24)

        self.assertEqual(s_23, s_23)
        self.assertEqual(s_23, 23)
        self.assertEqual(23, s_23)
        self.assertEqual(int32(23), s_23)
        self.assertEqual(int64(23), s_23)
        self.assertEqual(s_23, int32(23))
        self.assertEqual(s_23, int64(23))
        # Check all int types (includes long on py2):
        for int_type in integer_types:
            self.assertEqual(int_type(23), s_23)
            self.assertEqual(s_23, int_type(23))

        self.assertNotEqual(s_23, s_24)
        self.assertNotEqual(s_23, 24)
        self.assertNotEqual(s_23, "23")
        self.assertNotEqual(s_23, 23.5)
        self.assertNotEqual(s_23, [])
        self.assertNotEqual(s_23, None)
        # Compare to a value that doesn't fit into a platform int:
        self.assertNotEqual(s_23, sys.maxsize + 1)

        self.assertLess(s_23, s_24)
        self.assertLess(s_23, 24)
        self.assertGreater(24, s_23)
        self.assertGreater(s_24, s_23)
Example #8
0
def testNumpyTypeCoercion():
    t = emzed.utils.toTable("a", [np.int32(1)])
    t.info()
    assert t.getColTypes() == [int], t.getColTypes()
    t = emzed.utils.toTable("a", [None, np.int32(1)])
    t.info()
    assert t.getColTypes() == [int], t.getColTypes()

    t.addColumn("b", np.int32(1))
    assert t.getColTypes() == [int, int], t.getColTypes()
    t.replaceColumn("b", [None, np.int32(1)])
    assert t.getColTypes() == [int, int], t.getColTypes()

    t.replaceColumn("b", np.int64(1))
    assert t.getColTypes() == [int, int], t.getColTypes()
    t.replaceColumn("b", [None, np.int64(1)])
    assert t.getColTypes() == [int, int], t.getColTypes()

    t.replaceColumn("b", np.float32(1.0))
    assert t.getColTypes() == [int, float], t.getColTypes()
    t.replaceColumn("b", [None, np.float32(1.0)])
    assert t.getColTypes() == [int, float], t.getColTypes()

    t.replaceColumn("b", np.float64(2.0))
    assert t.getColTypes() == [int, float], t.getColTypes()
    t.replaceColumn("b", [None, np.float64(2.0)])
    assert t.getColTypes() == [int, float], t.getColTypes()
Example #9
0
    def test_cf_timedelta(self):
        examples = [
            ('1D', 'days', np.int64(1)),
            (['1D', '2D', '3D'], 'days', np.array([1, 2, 3], 'int64')),
            ('1h', 'hours', np.int64(1)),
            ('1ms', 'milliseconds', np.int64(1)),
            ('1us', 'microseconds', np.int64(1)),
            (['NaT', '0s', '1s'], None, [np.nan, 0, 1]),
            (['30m', '60m'], 'hours', [0.5, 1.0]),
        ]
        if pd.__version__ >= '0.16':
            # not quite sure why, but these examples don't work on older pandas
            examples.extend([(np.timedelta64('NaT', 'ns'), 'days', np.nan),
                             (['NaT', 'NaT'], 'days', [np.nan, np.nan])])

        for timedeltas, units, numbers in examples:
            timedeltas = pd.to_timedelta(timedeltas, box=False)
            numbers = np.array(numbers)

            expected = numbers
            actual, _ = conventions.encode_cf_timedelta(timedeltas, units)
            self.assertArrayEqual(expected, actual)
            self.assertEqual(expected.dtype, actual.dtype)

            if units is not None:
                expected = timedeltas
                actual = conventions.decode_cf_timedelta(numbers, units)
                self.assertArrayEqual(expected, actual)
                self.assertEqual(expected.dtype, actual.dtype)

        expected = np.timedelta64('NaT', 'ns')
        actual = conventions.decode_cf_timedelta(np.array(np.nan), 'days')
        self.assertArrayEqual(expected, actual)
Example #10
0
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Example #11
0
def KMeansConfig(dataTable, k, eps = 0.00001, srcDims=100000000000,gpuMemSize = 512, settings = {}):
    """
    Creates all the memory/data settings to run GPU accelerated KMeans.
    """
    settings = dataConfig(dataTable,settings)
    settings["sourceDims"] = min(settings["sourceDims"],srcDims)
    
    #XXX: determine memory and thread sizes from device
    settings["memSize"] = gpuMemSize*1024*1024
    settings["maxThreads"] = 1024
    
    #set up chunk sizes
    memoryPerElement = 4*(settings["sourceDims"]*2+2) + 20*4 #this is an estimated memory used per element
    settings["chunkSize"] = min(int(math.ceil(float(settings["memSize"])/memoryPerElement)),settings["dataLength"])
    settings["lastChunkSize"] = ((settings["dataLength"]-1) % settings["chunkSize"]) + 1
    
    #create kernel gridsize tuples
    settings["block"] = (settings["maxThreads"],1,1)
    settings["grid"] = (max(int(math.ceil(float(settings["chunkSize"])/settings["maxThreads"])),1),1,1)
    
    #precalculate all constant kernel params
    settings["dimensions"] = numpy.int64(settings["sourceDims"])
    settings["k"] = numpy.int64(k)
    settings["eps"] = numpy.float32(eps)
    settings["dataSize"] = numpy.int64(settings["dataLength"])
    settings["chunkSize"] = numpy.int64(settings["chunkSize"])
    settings["maxThreads"] = numpy.int64(settings["maxThreads"])
    
    return settings
Example #12
0
 def test_tostring(self):
     import numpy as np
     assert np.int64(123).tostring() == np.array(123, dtype='i8').tostring()
     assert np.int64(123).tostring('C') == np.array(123, dtype='i8').tostring()
     assert np.float64(1.5).tostring() == np.array(1.5, dtype=float).tostring()
     exc = raises(TypeError, 'np.int64(123).tostring("Z")')
     assert exc.value[0] == 'order not understood'
Example #13
0
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Example #14
0
    def test_maybe_convert_scalar(self):

        # pass thru
        result = com._maybe_convert_scalar('x')
        self.assertEqual(result, 'x')
        result = com._maybe_convert_scalar(np.array([1]))
        self.assertEqual(result, np.array([1]))

        # leave scalar dtype
        result = com._maybe_convert_scalar(np.int64(1))
        self.assertEqual(result, np.int64(1))
        result = com._maybe_convert_scalar(np.int32(1))
        self.assertEqual(result, np.int32(1))
        result = com._maybe_convert_scalar(np.float32(1))
        self.assertEqual(result, np.float32(1))
        result = com._maybe_convert_scalar(np.int64(1))
        self.assertEqual(result, np.float64(1))

        # coerce
        result = com._maybe_convert_scalar(1)
        self.assertEqual(result, np.int64(1))
        result = com._maybe_convert_scalar(1.0)
        self.assertEqual(result, np.float64(1))
        result = com._maybe_convert_scalar(pd.Timestamp('20130101'))
        self.assertEqual(result, pd.Timestamp('20130101').value)
        result = com._maybe_convert_scalar(datetime(2013, 1, 1))
        self.assertEqual(result, pd.Timestamp('20130101').value)
        result = com._maybe_convert_scalar(pd.Timedelta('1 day 1 min'))
        self.assertEqual(result, pd.Timedelta('1 day 1 min').value)
Example #15
0
def test_gh_5430():
    # At least one of these raises an error unless gh-5430 is
    # fixed. In py2k an int is implemented using a C long, so
    # which one fails depends on your system. In py3k there is only
    # one arbitrary precision integer type, so both should fail.
    sigma = np.int32(1)
    out = sndi._ni_support._normalize_sequence(sigma, 1)
    assert_equal(out, [sigma])
    sigma = np.int64(1)
    out = sndi._ni_support._normalize_sequence(sigma, 1)
    assert_equal(out, [sigma])
    # This worked before; make sure it still works
    sigma = 1
    out = sndi._ni_support._normalize_sequence(sigma, 1)
    assert_equal(out, [sigma])
    # This worked before; make sure it still works
    sigma = [1, 1]
    out = sndi._ni_support._normalize_sequence(sigma, 2)
    assert_equal(out, sigma)
    # Also include the OPs original example to make sure we fixed the issue
    x = np.random.normal(size=(256, 256))
    perlin = np.zeros_like(x)
    for i in 2**np.arange(6):
        perlin += sndi.filters.gaussian_filter(x, i, mode="wrap") * i**2
    # This also fixes gh-4106, show that the OPs example now runs.
    x = np.int64(21)
    sndi._ni_support._normalize_sequence(x, 0)
Example #16
0
def get_stock_his_day_Data(code, startDay, endDay):###generator for the stock data share by year
	df = ts.get_stock_basics()
	tmDate = df.ix[code]['timeToMarket']

	if '-' in startDay:
		_d = startDay.split('-')
		startDay = _d[0]+_d[1]+_d[2]

	if '-' in endDay:
		_d = endDay.split('-')
		endDay = _d[0]+_d[1]+_d[2]

	if not isinstance(startDay, np.int64):
		startDay = np.int64(startDay)
	if not isinstance(endDay, np.int64):
		endDay = np.int64(endDay)

	if startDay < tmDate:
		startDay = tmDate

	today = np.int64( str(datetime.date.today()).replace('-','') )

	if endDay > today:
		endDay = today
 
 	#search by year, for the reliability
 	nyears = endDay/10000 - startDay/10000 + 1
 	sstartDay, sendDay = str(startDay), str(endDay)
	for nyear in xrange(startDay/10000,endDay/10000+1):
		tmpStart = sstartDay[0:4]+'-'+sstartDay[4:6]+'-'+sstartDay[6:8] if nyear==startDay/10000 else str(nyear)+'-01-01'
		tmpEnd = sendDay[0:4]+'-'+sendDay[4:6]+'-'+sendDay[6:8] if nyear==(endDay/10000) else str(nyear)+'-12-31'
		logging.debug("get code:%s history data from %s to %s" %(code, tmpStart, tmpEnd))
		tmpdata = ts.get_h_data(code, start=tmpStart, end=tmpEnd)
		yield(tmpdata)
Example #17
0
 def test_numpy_scalar_conversion_values(self):
     self.assertEqual(nd.as_py(nd.array(np.bool_(True))), True)
     self.assertEqual(nd.as_py(nd.array(np.bool_(False))), False)
     self.assertEqual(nd.as_py(nd.array(np.int8(100))), 100)
     self.assertEqual(nd.as_py(nd.array(np.int8(-100))), -100)
     self.assertEqual(nd.as_py(nd.array(np.int16(20000))), 20000)
     self.assertEqual(nd.as_py(nd.array(np.int16(-20000))), -20000)
     self.assertEqual(nd.as_py(nd.array(np.int32(1000000000))), 1000000000)
     self.assertEqual(nd.as_py(nd.array(np.int64(-1000000000000))),
                      -1000000000000)
     self.assertEqual(nd.as_py(nd.array(np.int64(1000000000000))),
                      1000000000000)
     self.assertEqual(nd.as_py(nd.array(np.int32(-1000000000))),
                      -1000000000)
     self.assertEqual(nd.as_py(nd.array(np.uint8(200))), 200)
     self.assertEqual(nd.as_py(nd.array(np.uint16(50000))), 50000)
     self.assertEqual(nd.as_py(nd.array(np.uint32(3000000000))), 3000000000)
     self.assertEqual(nd.as_py(nd.array(np.uint64(10000000000000000000))),
                      10000000000000000000)
     self.assertEqual(nd.as_py(nd.array(np.float32(2.5))), 2.5)
     self.assertEqual(nd.as_py(nd.array(np.float64(2.5))), 2.5)
     self.assertEqual(nd.as_py(nd.array(np.complex64(2.5-1j))), 2.5-1j)
     self.assertEqual(nd.as_py(nd.array(np.complex128(2.5-1j))), 2.5-1j)
     if np.__version__ >= '1.7':
         self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13'))),
                          date(2000, 12, 13))
Example #18
0
 def _wrapSPE(self):
     if 0 and sys.version < '3.0':
         self.File.seek(42)
         xdim = numpy.int64(numpy.fromfile(self.File, numpy.int16, 1)[0])
         self.File.seek(656)
         ydim = numpy.int64(numpy.fromfile(self.File, numpy.int16, 1))
         self.File.seek(4100)
         self.__data = numpy.fromfile(self.File, numpy.uint16, int(xdim * ydim))
     else:
         import struct
         self.File.seek(0)
         a = self.File.read()
         xdim = numpy.int64(struct.unpack('<h', a[42:44])[0])
         ydim = numpy.int64(struct.unpack('<h', a[656:658])[0])
         fmt = '<%dH' % int(xdim * ydim)
         self.__data = numpy.array(struct.unpack(fmt, a[4100:int(4100 + int(2 * xdim * ydim))])).astype(numpy.uint16)
     self.__data.shape = ydim, xdim
     Index = 0
     self.Images.append(Image())
     self.NumImages = 1
     self.Images[Index].Dim1 = ydim
     self.Images[Index].Dim2 = xdim
     self.Images[Index].NumDim = 2
     self.Images[Index].DataType = 'UnsignedShort'
     self.Images[Index].ByteOrder = 'LowByteFirst'
     if self.SysByteOrder.upper() != self.Images[Index].ByteOrder.upper():
         self.__data = self.__data.byteswap()
     self.Images[Index].StaticHeader['Dim_1'] = self.Images[Index].Dim1
     self.Images[Index].StaticHeader['Dim_2'] = self.Images[Index].Dim2
     self.Images[Index].StaticHeader['Offset_1'] = 0
     self.Images[Index].StaticHeader['Offset_2'] = 0
     self.Images[Index].StaticHeader['DataType'] = self.Images[Index].DataType
Example #19
0
 def __init__(self, id_number):
     self.shape = (6, 6)
     tmp_pat = np.int64(id_number) & id_mask
     self.pattern = np.int64(0)
     for i in range(4):
         self.pattern |= tmp_pat << pos_mask[i]
     self.pattern ^= full_mask
def reduce_dataset(dataset, reducer):
  """Returns the result of reducing the `dataset` using `reducer`.

  Args:
    dataset: A @{tf.data.Dataset} object.
    reducer: A @{tf.contrib.data.Reducer} object representing the reduce logic.

  Returns:
    A nested structure of @{tf.Tensor} objects, corresponding to the result
    of reducing `dataset` using `reducer`.

  Raises:
    TypeError: if `dataset` is not a `tf.data.Dataset` object.
  """
  if not isinstance(dataset, dataset_ops.Dataset):
    raise TypeError("`dataset` must be a `tf.data.Dataset` object.")

  # The sentinel dataset is used in case the reduced dataset is empty.
  sentinel_dataset = dataset_ops.Dataset.from_tensors(
      reducer.finalize_func(reducer.init_func(np.int64(0))))
  reduced_dataset = dataset.apply(
      grouping.group_by_reducer(lambda x: np.int64(0), reducer))

  return get_single_element(
      reduced_dataset.concatenate(sentinel_dataset).take(1))
Example #21
0
def get_matlab_complex_fi(var_name, data, suffix='_int'):
    """
    Reconstruct a complex array from saved matlab fixed-point data
    """
    real_int = np.int64(data['%s_real%s' % (var_name, suffix)].ravel())
    imag_int = np.int64(data['%s_imag%s' % (var_name, suffix)].ravel())
    return real_int + imag_int * 1j
Example #22
0
def compress(n_file_input, factor, n_neurons, n_delays, do_media, n_file_output):
    delay = np.empty((n_neurons,n_neurons,n_delays), dtype=int)
    for i in range(0, n_neurons):
        delay[i] = np.loadtxt(n_file_input+str(i)+".txt", dtype=int)

    d1 = np.zeros ((n_neurons, n_neurons, np.floor(n_delays/factor)+1))
    for i in range(0, np.int64(np.floor(n_delays/factor))):   #check +1
        d1[:,:,i] = np.sum(delay[:,:,i*factor:(i+1)*factor], 2)
    
    if(n_delays%factor != 0):
        i = i+1
        d1[:,:,i]= np.sum(delay[:,:,(i)*f:], 2)
        
    if(do_media):
        for i in range(0,n_neurons):
            for j in range(0,n_neurons):
                d1[i,j,1:-1] -= np.int64(np.mean(d1[i,j,1:200]))
        
        for i in range(0,n_neurons):
            for j in range(0,n_neurons):
                for h in range (0,d1.shape[2]):
                    if (d1[i,j,h] < 0):
                        d1[i,j,h] = 0        
    for i in range(0,n_neurons):
        np.savetxt(n_file_output+str(i)+".txt" ,d1[i,:,:], fmt = '%01d')
    
                
    
    mySave_3D(d1[:,:,0:20], n_file_output + "tot", n_neurons, n_delays)
Example #23
0
File: ttl.py Project: cjbe/artiq
    def __init__(self, dmgr, channel, core_device="core"):
        self.core = dmgr.get(core_device)
        self.channel = channel

        # in RTIO cycles
        self.previous_timestamp = numpy.int64(0)
        self.acc_width = numpy.int64(24)
Example #24
0
    def histogram_on_speed(self, probmap):
        cx = probmap.shape[1] / 2
        cy = probmap.shape[0] / 2



        binsize = 1
        binnum = np.int64(self.nbr_radius / binsize) + 1
        hist = np.zeros((binnum,1))


        if probmap[cy,cx] == probmap.max():
            return hist # no optical flow found and this observation will be discarded

        for y in range(probmap.shape[0]):
            for x in range(probmap.shape[1]):
                if probmap[y,x] < 0.0001:
                    continue 
                dx = np.abs(x - cx)
                dy = np.abs(y - cy)
                d = np.maximum( dx, dy )
                d = np.int64(d/binsize)
                if d >= binnum:
                    d = binnum - 1
                hist[d,0] += probmap[y,x]
        hist = hist /(0.001 + np.sum(hist) )
        return hist
Example #25
0
    def antnum_list_to_baselines(self, antnums=[]):
        '''
        antnums will be a list of either tuples of strings, or strings
        this implementation allows the user to input both 0_1 and 1_0
        and it will return the expected baseline (0_1) in both cases
        '''
        antnums_in_data = set(self.ant_1_array)
        baselines = set()
        
        for i in antnums:
            if isinstance(i, tuple):
                ant1, ant2 = np.int64(i)
                
                if ant1 not in antnums_in_data:
                    raise ValueError('No antenna {} found in data.'.format(ant1))
                if ant2 not in antnums_in_data:
                    raise ValueError('No antenna {} found in data.'.format(ant2))
                
                baselines.add(self.antnums_to_baseline(min(ant1, ant2), max(ant1, ant2)))
            
            else:
                ant = np.int64(i)

                if ant not in antnums_in_data:
                    raise ValueError('No antenna {} found in data.'.format(ant))
                
                for j in antnums_in_data:
                    baselines.add(self.antnums_to_baseline(min(ant, j), max(ant, j)))

        return baselines
Example #26
0
File: spi.py Project: cjbe/artiq
 def __init__(self, dmgr, channel, core_device="core"):
     self.core = dmgr.get(core_device)
     self.ref_period_mu = seconds_to_mu(self.core.coarse_ref_period, self.core)
     self.channel = channel
     self.write_period_mu = numpy.int64(0)
     self.read_period_mu = numpy.int64(0)
     self.xfer_period_mu = numpy.int64(0)
Example #27
0
    def test_trunc(self):
        xsg_test_data = loadmat('rounding_behavior_Fix_8_7_to_Fix_5_4.mat')

        input_fractWidth = int(xsg_test_data['input_fract_len'][0,0])
        input_bit_width = int(xsg_test_data['input_bit_width'][0,0])
        input_intWidth = input_bit_width - input_fractWidth - 1
        input_dtype = (input_intWidth, input_fractWidth)

        output_fractWidth = int(xsg_test_data['output_fract_len'][0,0])
        output_bit_width = int(xsg_test_data['output_bit_width'][0,0])
        output_intWidth = output_bit_width - output_fractWidth - 1
        output_dtype = (output_intWidth, output_fractWidth)

        input_int = np.int64(xsg_test_data['input_int'].ravel())
        input_float = input_int * (2 ** -input_fractWidth);

        trunc_int = np.int64(xsg_test_data['trunc_int'].ravel())
        trunc_float = trunc_int * (2 ** -output_fractWidth)

        N = len(input_int)
        for k in range(N):
            input = FixedInt(input_float[k], dtype=input_dtype)
            input_trunc = fixed_point.rounding.round_trunc(input, output_dtype)

            self.assertTrue(input_trunc.fValue == trunc_float[k])
def backward_propagation_with_regularization(X,Y,cache,lambd):
    '''
    实现我们添加了L2正则化的模型的后向传播
    :param X: 输入数据集,维度为(输入节点数量,数据集里面的数量
    :param Y: 标签,维度为(输出节点数量,数据集里面的数量
    :param cache: 来自forward_propagation()的cache输出
    :param lambd: regularization超参数,实数
    :return: 
    gradients - 一个包含了每个参数\激活值和预激活值变量的替独子点
    '''
    m = X.shape[1]
    (Z1,A1,W1,b1,Z2,A2,W2,b2,Z3,A3,W3,b3)=cache
    dZ3 = A3-Y

    dW3 = (1/m)*np.dot(dZ3,A2.T)+((lambd*W3)/m)
    db3 = (1/m)*np.sum(dZ3,axis = 1,keepdims=True)

    dA2 = np.dot(W3.T,dZ3)
    dZ2 = np.multiply(dA2,np.int64(A2>0))
    dW2 = (1/m)*np.dot(dZ2,A1.T)+((lambd*W2)/m)
    db2 = (1/m)*np.sum(dZ2,axis = 1,keepdims=True)

    dA1 = np.dot(W2.T,dZ2)
    dZ1 = np.multiply(dA1,np.int64(A1>0))
    dW1 = (1/m)*np.dot(dZ1,X.T)+((lambd*W1)/m)
    db1 = (1/m)*np.sum(dZ1,axis =1,keepdims = True)

    gradients = {
        'dZ3':dZ3,'dW3':dW3,'db3':db3,'dA2':dA2,
        'dZ2':dZ2,'dW2':dW2,'db2':db2,'dA1':dA1,
        'dZ1':dZ1,'dW1':dW1,'db1':db1
    }

    return gradients
  def testSaveRestoreNumpyState(self):
    directory = self.get_temp_dir()
    prefix = os.path.join(directory, "ckpt")
    save_state = _NumpyState()
    saver = util.Checkpoint(numpy=save_state)
    save_state.a = numpy.ones([2, 2])
    save_state.b = numpy.ones([2, 2])
    save_state.b = numpy.zeros([2, 2])
    save_state.c = numpy.int64(3)
    self.assertAllEqual(numpy.ones([2, 2]), save_state.a)
    self.assertAllEqual(numpy.zeros([2, 2]), save_state.b)
    self.assertEqual(3, save_state.c)
    first_save_path = saver.save(prefix)
    save_state.a[1, 1] = 2.
    save_state.c = numpy.int64(4)
    second_save_path = saver.save(prefix)

    load_state = _NumpyState()
    loader = util.Checkpoint(numpy=load_state)
    loader.restore(first_save_path).initialize_or_restore()
    self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
    self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
    self.assertEqual(3, load_state.c)
    load_state.a[0, 0] = 42.
    self.assertAllEqual([[42., 1.], [1., 1.]], load_state.a)
    loader.restore(first_save_path).run_restore_ops()
    self.assertAllEqual(numpy.ones([2, 2]), load_state.a)
    loader.restore(second_save_path).run_restore_ops()
    self.assertAllEqual([[1., 1.], [1., 2.]], load_state.a)
    self.assertAllEqual(numpy.zeros([2, 2]), load_state.b)
    self.assertEqual(4, load_state.c)
Example #30
0
    def test_valid(self):
        prop = bcpp.Int()

        assert prop.is_valid(None)

        assert prop.is_valid(0)
        assert prop.is_valid(1)

        assert prop.is_valid(np.int8(0))
        assert prop.is_valid(np.int8(1))
        assert prop.is_valid(np.int16(0))
        assert prop.is_valid(np.int16(1))
        assert prop.is_valid(np.int32(0))
        assert prop.is_valid(np.int32(1))
        assert prop.is_valid(np.int64(0))
        assert prop.is_valid(np.int64(1))
        assert prop.is_valid(np.uint8(0))
        assert prop.is_valid(np.uint8(1))
        assert prop.is_valid(np.uint16(0))
        assert prop.is_valid(np.uint16(1))
        assert prop.is_valid(np.uint32(0))
        assert prop.is_valid(np.uint32(1))
        assert prop.is_valid(np.uint64(0))
        assert prop.is_valid(np.uint64(1))

        # TODO (bev) should fail
        assert prop.is_valid(False)
        assert prop.is_valid(True)
Example #31
0
async def delete_hist_features_handler(item: Item_image_id):
    delete_descriptor_by_id(item.image_id)
    index.remove_ids(np.int64([item.image_id]))
    return Response(status_code=status.HTTP_200_OK)
def unionfind_cluster_editing(filename, output_path, missing_weight, n, x):

    """
    This is a cluster editing algorithm, based on semi-streaming approach using union find to analyze graph structures.
    The input file should contain edges in format
    <np.int64: edge 1> <np.int64: edge 2> <np.float64: edge weight>\n
    Parameter missing_weight sets a weight for edges that are not contained in the file (for unweighted data: -1)
    Parameter n gives the number of objects (nodes)
    Parameter x is the number of generated solutions (which are the basis for a merged solution). It merely influences running time, however with limited memory it should not be chosen too high. 300-1k is recommended, the more the better.
    """
    merge_filter = 0.1
    repair_filter = 0.9
    union_threshold = 0.05
    big_border = 0.3

    graph_file = open(filename, mode="r")


### Preprocessing ###
# Knotengrade berechnen je Knoten (Scan über alle Kanten)
    node_dgr = np.zeros(n, dtype=np.int64)

    for line in graph_file:
        # Kommentar-Zeilen überspringen
        if line[0] == "#":
            continue
        splitted = line.split()
        nodes = np.array(splitted[:-1], dtype=np.int64)
        weight = np.float64(splitted[2])

        # Falls Kante 'existiert' nach Threshold:
        if weight > 0:
            node_dgr[nodes[0]] += 1
            node_dgr[nodes[1]] += 1

# Sequentiell für alle Lösungen (alle UF-Strukturen gleichzeitig, oder zumindest so viele wie passen):
# Größe einer Lösung: Array mit n Einträgen aus je 64bit
### Generate Solutions ###
    parents = np.full((x,n), np.arange(n, dtype=np.int64))
    sizes = np.ones((x,n), dtype=np.int64)
    # Modellparameter einlesen:
    parameters_b = load_model_flexible_v2('params_below_100.csv')
    parameters_a = load_model_flexible_v2('params_above_100.csv')
    #cluster_count = np.full(x, n, dtype=np.int64)
    # Alle Parameter für die Modelle festlegen:
    cluster_model = np.full(x,17)
    def generate_solutions(first, c_opt):
        if first:
            k = int(x/37)
            j = 0
            c = 0

            for i in range(0,x):
                cluster_model[i] = c
                j += 1
                if j == k and c < 36:
                    c += 1
                    j = 0
        if not first:
            # Überschreibe Lösungen mit nicht-optimalem Parameter um danach neue zu generieren
            for i in range(0,x):
                if cluster_model[i] != c_opt:
                    parents[i] = np.arange(n, dtype=np.int64)
                    sizes[i] = np.ones(n, dtype = np.int64)

    # 2. Scan über alle Kanten: Je Kante samplen in UF-Strukturen
        graph_file = open(filename, mode="r")

        for line in graph_file:
            # Kommentar-Zeilen überspringen
            if line[0] == "#":
                continue
            splitted = line.split()
            nodes = np.array(splitted[:-1], dtype=np.int64)
            weight = np.float64(splitted[2])

            guess_n = (node_dgr[nodes[0]] + node_dgr[nodes[1]]) / 2

            decision_values = rand.rand(x)
            for i in range(0, x):
                if not first:
                    if cluster_model[i] == c_opt:
                        # Ändere in 2. Lauf nichts an den Lösungen, die bereits gut sind!
                        continue
            # Samplingrate ermitteln
                sampling_rate = model_flexible_v2(parameters_b, parameters_a, guess_n, cluster_model[i])
                # Falls Kante gesamplet...
                if decision_values[i] < sampling_rate:
                    # ...füge Kante ein in UF-Struktur
                    union(nodes[0], nodes[1], parents[i], sizes[i])

    generate_solutions(True, 0)



### Solution Assessment ###
# Nachbearbeitung aller Lösungen: Flache Struktur (= Knoten in selbem Cluster haben selben Eintrag im Array)
# Und Berechnung benötigter Kanten je Cluster (n_c * (n_c-1) / 2) pro UF

    def calculate_costs(solutions_parents, x, merged):
        if merged:
            inner_sizes = merged_sizes
        else:
            inner_sizes = sizes
        solution_costs = np.zeros(x, dtype=np.float64)
        vertex_costs = np.zeros((x,n), dtype=np.float64)
        c_edge_counter = np.zeros((x,n), dtype=np.int64)

        for i in range(x):
            for j in range(n):
                root = flattening_find(j,solutions_parents[i])
                n_c = inner_sizes[i, root]
                c_edge_counter[i, j] = n_c - 1

        # 3. Scan über alle Kanten: Kostenberechnung für alle Lösungen (Gesamtkosten und Clusterkosten)
        graph_file = open(filename, mode="r")

        for line in graph_file:
            # Kommentar-Zeilen überspringen
            if line[0] == "#":
                continue
            splitted = line.split()
            nodes = np.array(splitted[:-1], dtype=np.int64)
            weight = np.float64(splitted[2])

            for i in range(0,x):
                if not merged:
                    root1 = find(nodes[0],solutions_parents[i])
                    root2 = find(nodes[1],solutions_parents[i])
                else:
                    root1 = solutions_parents[i, nodes[0]]
                    root2 = solutions_parents[i, nodes[1]]
                # Kante zwischen zwei Clustern
                if root1 != root2:
                    # mit positivem Gewicht (zu viel)
                    if weight > 0:
                        vertex_costs[i, nodes[0]] += weight / 2
                        vertex_costs[i, nodes[1]] += weight / 2
                        solution_costs[i] += weight
                # Kante innerhalb von Cluster
                else:
                    # mit negativem Gewicht (fehlt)
                    if weight < 0:
                        vertex_costs[i, nodes[0]] -= weight / 2
                        vertex_costs[i, nodes[1]] -= weight / 2
                        solution_costs[i] -= weight
                    c_edge_counter[i, nodes[0]] -= 1
                    c_edge_counter[i, nodes[1]] -= 1
                    #print("missing edges for now: ", c_edge_counter[i][root1])

        for i in range(0,x):
            # über Cluster(-Repräsentanten, Keys) iterieren:
            for j in range(n):
                missing_edges = c_edge_counter[i, j]
                if missing_edges > 0:
                    # Kosten für komplett fehlende Kanten zur Lösung addieren
                    vertex_costs[i, j] += missing_edges * (-missing_weight) * 0.5
                    solution_costs[i] += missing_edges * (-missing_weight) * 0.5 # Zwei Knoten innerhalb eines Clusters vermissen die selbe Kante, daher *0.5 bei Berechnung über die Knoten
        return (vertex_costs, solution_costs)
    costs = calculate_costs(parents, x, False)
    vertex_costs = costs[0]
    solution_costs = costs[1]

### Solution Merge ###

# Mithilfe der Bewertungen/Kosten Lösungen sinnvoll mergen/reparieren

    mean_costs_c = np.zeros(37, dtype=np.float64)
    c_count = np.zeros(37, dtype= np.int64)
    # Summierte Kosten für selben Parameter
    for i in range(x):
        c = cluster_model[i]
        mean_costs_c[c] = mean_costs_c[c] + solution_costs[i]
        c_count[c] += 1
    # Teilen durch Anzahl Lösungen mit dem Parameter
    for i in range(37):
        mean_costs_c[i] = mean_costs_c[i]/c_count[i]
    # c_opt ist Parameter mit geringsten Durchschnittskosten der Lösungen
    c_opt = np.argsort(mean_costs_c)[0]
    print_result(output_path, "c_opt_v4.txt", c_opt)
    generate_solutions(False, c_opt)
    costs = calculate_costs(parents, x, False)
    vertex_costs = costs[0]
    solution_costs = costs[1]
    # Optimierung: Filtern der "besten" Lösungen, um eine solidere Basis für den Merge zu schaffen.
    top_percent = range(np.int64(x*merge_filter))
    mid_percent = range(np.int64(x*repair_filter))
    cost_sorted_i = np.argsort(solution_costs)
    good_costs_i = cost_sorted_i[top_percent]
    mid_costs_i = cost_sorted_i[mid_percent]
    # Artefakt aus Zeit mit n_merges > 1; sonst inkompatibel mit calculate_costs.
    merged_solutions = np.full((1,n), np.arange(n, dtype=np.int64))
    final_solutions = np.full((1,n), np.arange(n, dtype=np.int64))
    merged_sizes = np.full((1,n), np.zeros(n, dtype=np.int64))
    merged = merged_solution_scan(solution_costs[good_costs_i], vertex_costs[good_costs_i], parents[good_costs_i], sizes[good_costs_i], missing_weight, n, filename, output_path, union_threshold)
    merged_save = np.copy(merged[0])
    merged_solutions[0] = merged[0]
    merged_sizes[0] = merged[1]
    merged_c = calculate_costs(merged_solutions, 1, True)
    merged_costs = merged_c[1]
    merged_vc = merged_c[0]
    #merged_to_file(merged_solutions, merged_costs, filename, missing_weight, n, len(good_costs_i), 1)
    print_result(output_path, "merged_cost_v4.txt", merged_costs[0])
    print_zhk(output_path, merged_solutions[0], merged_sizes[0])
    # Glätten der Lösung falls Baumstruktur auftritt
    for j in range(0,n):
        flattening_find(j, merged_solutions[0])
    #rep = repair_merged(merged_solutions[i], merged_sizes[i], solution_costs, vertex_costs, parents, sizes, n, node_dgr)
    rep = repair_merged_v4_scan(merged_solutions[0], merged_sizes[0], solution_costs[mid_costs_i], vertex_costs[mid_costs_i], parents[mid_costs_i], sizes[mid_costs_i], n, node_dgr, big_border, filename)
    merged_solutions[0] = rep[0]
    merged_sizes[0] = rep[1]
    # Sicherheitshalber noch mal glätten für Lösungsberechnung:
    for j in range(0,n):
        flattening_find(j, merged_solutions[0])
    rep_c = calculate_costs(merged_solutions, 1, True)
    merged_costs = rep_c[1]
    rep_vc = rep_c[0]
    print_result(output_path, "rep_v4.txt", merged_costs[0])
    mr_3 = undo_merge_repair(merged_save, rep[0], merged_vc[0], rep_vc[0])
    final_solutions[0] = mr_3[0]
    merged_sizes[0] = mr_3[1]
    final_costs = calculate_costs(final_solutions, 1, True)
    print_result(output_path, "final_v4.txt", final_costs[1][0])
    # Da Merge-Repair auf weniger Lösungen basiert, nur diese angeben:
    x2 = len(mid_costs_i)
    #merged_to_file(merged_solutions, merged_costs, filename, missing_weight, n, x2, 1)
    merged_to_file(final_solutions, final_costs[1], filename, missing_weight, n, x2, 1, output_path)
Example #33
0
 def test_int(self):
     self.assertRoundtrip(numpy.int32(42))
     self.assertRoundtrip(numpy.int64(42))
Example #34
0
 def numpy_things(self):
     return (numpy.int32(10), numpy.int64(20), numpy.array([42,]))
Example #35
0
 def testIntFromNPInt64(self):
     import numpy as np
     self.assertEqual(self.Test.callInt(np.int64(123)), 123)
     self.assertEqual(self.Test.callInt(np.uint64(123)), 123)
Example #36
0
        print "running time (in millisecond): " + str(
            (datetime.now() - timeStart).microseconds / 1000)
        print vc

        print "---------------------------------------------------"
        print "Testing function sum  numpy array int32 "
        timeStart = datetime.now()
        vc = xx.run("sum", np.array([100000, 200000, 300000]))
        print "running time (in millisecond): " + str(
            (datetime.now() - timeStart).microseconds / 1000)
        print vc

        print "---------------------------------------------------"
        print "Testing function sum  numpy array int64 "
        timeStart = datetime.now()
        vc = xx.run("sum", np.int64([1e15, 2e15, 3e15]))
        print "running time (in millisecond): " + str(
            (datetime.now() - timeStart).microseconds / 1000)
        print vc

        print "---------------------------------------------------"
        print "Testing function sum  numpy array float64 "
        timeStart = datetime.now()
        vc = xx.run("sum", np.array([100000.0, 200000.0, 300000.0]))
        print "running time (in millisecond): " + str(
            (datetime.now() - timeStart).microseconds / 1000)
        print vc

        print "---------------------------------------------------"
        print "Testing function sum  numpy array bool "
        timeStart = datetime.now()
Example #37
0
    def __init__(self,
                 max_tokens,
                 num_oov_indices,
                 mask_token,
                 oov_token,
                 vocabulary=None,
                 invert=False,
                 output_mode=INT,
                 sparse=False,
                 pad_to_max_tokens=False,
                 **kwargs):
        # If max_tokens is set, the value must be greater than 1 - otherwise we
        # are creating a 0-element vocab, which doesn't make sense.
        if max_tokens is not None and max_tokens <= 1:
            raise ValueError("If set, `max_tokens` must be greater than 1. "
                             "You passed {}".format(max_tokens))

        if num_oov_indices < 0:
            raise ValueError(
                "`num_oov_indices` must be greater than or equal to 0. "
                "You passed {}".format(num_oov_indices))

        # Support deprecated names for output_modes.
        if output_mode == "binary":
            output_mode = MULTI_HOT
        if output_mode == "tf-idf":
            output_mode = TF_IDF
        # 'output_mode' must be one of (INT, MULTI_HOT, COUNT, TF_IDF)
        layer_utils.validate_string_arg(output_mode,
                                        allowable_strings=(INT, MULTI_HOT,
                                                           COUNT, TF_IDF),
                                        layer_name=self.__class__.__name__,
                                        arg_name="output_mode")

        if invert and output_mode != INT:
            raise ValueError(
                "`output_mode` must be {} when `invert` is true. You "
                "passed {}".format(INT, output_mode))

        self.invert = invert
        self.max_tokens = max_tokens
        self.num_oov_indices = num_oov_indices
        self.oov_token = oov_token
        self.output_mode = output_mode
        self.sparse = sparse
        self.pad_to_max_tokens = pad_to_max_tokens
        self._called = False

        # A note on vocab_size: we need to always keep a non-Tensor representation
        # of vocab_size around to use in graph building. Because we might be
        # in a tf.function, we can't rely on evaluating the actual tables to
        # find the value either.
        self._vocab_size = None
        # We need to keep track our current vocab size outside of our layer weights
        # to support a static output shape when `output_mode != INT`. The bincount
        # ops do not set shape on their outputs, which means we have to set it
        # ourselves. We persist the current vocab size as a hidden part of the
        # config when serializing our model.
        if "vocabulary_size" in kwargs:
            self._vocab_size = kwargs["vocabulary_size"]
            del kwargs["vocabulary_size"]

        restore_from_static_table = kwargs.pop("has_static_table", False)

        # Make sure the mask token is truly of the dtype we want. We can ignore
        # strings here, because they have only one dtype.
        if mask_token is not None:
            dtype = kwargs["dtype"]
            if dtype == dtypes.int32:
                mask_token = np.int32(mask_token)
            elif dtype == dtypes.int64:
                mask_token = np.int64(mask_token)
        self.mask_token = mask_token

        if max_tokens is not None:
            available_vocab_size = max_tokens - self._token_start_index()
        else:
            available_vocab_size = None

        super(IndexLookup, self).__init__(combiner=_IndexLookupCombiner(
            vocab_size=available_vocab_size,
            mask_value=mask_token,
            oov_value=oov_token,
            compute_idf=(output_mode == TF_IDF)),
                                          **kwargs)

        # We need to save the key dtype so that we know if we're expecting int64
        # keys. If we are, we will cast int32 inputs to int64 as well.
        if invert:
            self._key_dtype = dtypes.int64
            self._value_dtype = self.dtype
            self._mask_key = 0
            self._mask_value = mask_token
            key_index = lookup_ops.TextFileIndex.LINE_NUMBER
            value_index = lookup_ops.TextFileIndex.WHOLE_LINE
            default_value = self.oov_token
            oov_indices = None
        else:
            self._key_dtype = self.dtype
            self._value_dtype = dtypes.int64
            self._mask_key = mask_token
            key_index = lookup_ops.TextFileIndex.WHOLE_LINE
            value_index = lookup_ops.TextFileIndex.LINE_NUMBER
            # Masks should map to 0 for int output and be dropped otherwise. Max ints
            # will be dropped from the bincount op.
            self._mask_value = 0 if self.output_mode == INT else dtypes.int64.max
            oov_start = self._oov_start_index()
            token_start = self._token_start_index()
            if self.num_oov_indices == 0:
                # If there are no OOV indices, we map OOV tokens to -1 for int output
                # and drop them from bagged output. Max ints will be dropped from the
                # bincount op.
                default_value = -1 if self.output_mode == INT else dtypes.int64.max
                oov_indices = None
            elif self.num_oov_indices == 1:
                # If there is only one OOV index, we can set that index as the default
                # value of the index_lookup table.
                default_value = oov_start
                oov_indices = None
            else:
                # If we hav multiple OOV values, we need to do a further hashing step;
                # to make this easier, we set the OOV value to -1. (This lets us do a
                # vectorized add and cast to boolean to determine locations where we
                # need to do extra hashing.)
                default_value = -1
                oov_indices = list(range(oov_start, token_start))

        self._static_vocabulary_path = None
        has_vocab_path = (vocabulary is not None
                          and isinstance(vocabulary, str))
        if has_vocab_path or restore_from_static_table:
            self._has_static_table = True
            if vocabulary is None:
                # If we're restoring a layer that was saved with a static table
                # initializer, we create a fake initializer object to let the code
                # progress. The savedmodel restoration code will handle restoring
                # the actual data.
                initializer = _NullInitializer(self._key_dtype,
                                               self._value_dtype)
            else:
                if not gfile.Exists(vocabulary):
                    raise ValueError("Vocabulary file %s does not exist." %
                                     (vocabulary, ))
                self._static_vocabulary_path = vocabulary
                num_tokens = table_utils.num_tokens_in_file(vocabulary)
                self._vocab_size = self._token_start_index() + num_tokens

                initializer = lookup_ops.TextFileInitializer(
                    filename=vocabulary,
                    key_dtype=self._key_dtype,
                    key_index=key_index,
                    value_dtype=self._value_dtype,
                    value_index=value_index,
                    value_index_offset=self._token_start_index())

            self._table = lookup_ops.StaticHashTable(
                initializer, default_value=default_value)
            self._table_handler = table_utils.TableHandler(
                table=self._table,
                mask_token=self._mask_key,
                mask_value=self._mask_value,
                oov_tokens=oov_indices)

            tracked_table = self._add_trackable(self._table, trainable=False)

        else:
            self._has_static_table = False
            self._table = lookup_ops.MutableHashTable(
                key_dtype=self._key_dtype,
                value_dtype=self._value_dtype,
                default_value=default_value,
                name=(self._name + "_index_table"))
            self._table_handler = table_utils.TableHandler(
                table=self._table, oov_tokens=oov_indices)
            if vocabulary is not None:
                self.set_vocabulary(vocabulary)
            tracked_table = self._add_trackable(self._table, trainable=False)

        if self.output_mode == TF_IDF:
            # The TF-IDF weight may have a (None,) tensorshape. This creates
            # a 1D variable with arbitrary shape, which we can assign any weight to
            # so long as it has 1 dimension. In order to properly initialize this
            # weight in Keras, we need to provide a custom callable initializer which
            # does not depend on the shape of the weight (as all other initializers
            # do) since the weight is not known. Hence the lambda shape, dtype: [0].
            if not self.pad_to_max_tokens or max_tokens is None:
                initializer = lambda shape, dtype: [0]
            else:
                initializer = init_ops.zeros_initializer

            # We are adding these here instead of in build() since they do not depend
            # on the input shape at all.
            idf_shape = (max_tokens, ) if self.pad_to_max_tokens else (None, )
            self.tf_idf_weights = self._add_state_variable(
                name="idf",
                shape=tensor_shape.TensorShape(idf_shape),
                dtype=backend.floatx(),
                initializer=initializer)

        # This is a workaround for summary() on this layer. Because the table is
        # not mutable during training, the effective number of parameters (and so
        # the weight shape) is 0; we add this as an attr so that the parameter
        # counting code in the Model object doesn't throw an attribute error.
        tracked_table.shape = tensor_shape.TensorShape((0, ))
Example #38
0
def unique_id():
    ''' unique ID based on cputime '''
    from time import time
    return np.int64(time()*1e6)
Example #39
0
def compute_reho(in_file, mask_file, cluster_size):
    """
    Computes the ReHo Map, by computing tied ranks of the timepoints,
    followed by computing Kendall's coefficient concordance(KCC) of a
    timeseries with its neighbours

    Parameters
    ----------

    in_file : nifti file
        4D EPI File 

    mask_file : nifti file
        Mask of the EPI File(Only Compute ReHo of voxels in the mask)

    cluster_size : integer
        for a brain voxel the number of neighbouring brain voxels to use for
        KCC.


    Returns
    -------

    out_file : nifti file
        ReHo map of the input EPI image

    """

    out_file = None

    res_fname = (in_file)
    res_mask_fname = (mask_file)
    CUTNUMBER = 10

    if not (cluster_size == 27 or cluster_size == 19 or cluster_size == 7):
        cluster_size = 27

    nvoxel = cluster_size

    res_img = nb.load(res_fname)
    res_mask_img = nb.load(res_mask_fname)

    res_data = res_img.get_data()
    res_mask_data = res_mask_img.get_data()

    print(res_data.shape)
    (n_x, n_y, n_z, n_t) = res_data.shape

    # "flatten" each volume of the timeseries into one big array instead of
    # x,y,z - produces (timepoints, N voxels) shaped data array
    res_data = np.reshape(res_data, (n_x * n_y * n_z, n_t), order='F').T

    # create a blank array of zeroes of size n_voxels, one for each time point
    Ranks_res_data = np.tile((np.zeros((1, (res_data.shape)[1]))),
                             [(res_data.shape)[0], 1])

    # divide the number of total voxels by the cutnumber (set to 10)
    # ex. end up with a number in the thousands if there are tens of thousands
    # of voxels
    segment_length = np.ceil(float((res_data.shape)[1]) / float(CUTNUMBER))

    for icut in range(0, CUTNUMBER):

        segment = None

        # create a Numpy array of evenly spaced values from the segment
        # starting point up until the segment_length integer
        if not (icut == (CUTNUMBER - 1)):
            segment = np.array(
                np.arange(icut * segment_length, (icut + 1) * segment_length))
        else:
            segment = np.array(
                np.arange(icut * segment_length, (res_data.shape[1])))

        segment = np.int64(segment[np.newaxis])

        # res_data_piece is a chunk of the original timeseries in_file, but
        # aligned with the current segment index spacing
        res_data_piece = res_data[:, segment[0]]
        nvoxels_piece = res_data_piece.shape[1]

        # run a merge sort across the time axis, re-ordering the flattened
        # volume voxel arrays
        res_data_sorted = np.sort(res_data_piece, 0, kind='mergesort')
        sort_index = np.argsort(res_data_piece, axis=0, kind='mergesort')

        # subtract each volume from each other
        db = np.diff(res_data_sorted, 1, 0)

        # convert any zero voxels into "True" flag
        db = db == 0

        # return an n_voxel (n voxels within the current segment) sized array
        # of values, each value being the sum total of TRUE values in "db"
        sumdb = np.sum(db, 0)

        temp_array = np.array(np.arange(0, n_t))
        temp_array = temp_array[:, np.newaxis]

        sorted_ranks = np.tile(temp_array, [1, nvoxels_piece])

        if np.any(sumdb[:]):

            tie_adjust_index = np.flatnonzero(sumdb)

            for i in range(0, len(tie_adjust_index)):

                ranks = sorted_ranks[:, tie_adjust_index[i]]

                ties = db[:, tie_adjust_index[i]]

                tieloc = np.append(np.flatnonzero(ties), n_t + 2)
                maxties = len(tieloc)
                tiecount = 0

                while (tiecount < maxties - 1):
                    tiestart = tieloc[tiecount]
                    ntied = 2
                    while (tieloc[tiecount + 1] == (tieloc[tiecount] + 1)):
                        tiecount += 1
                        ntied += 1

                    ranks[tiestart:tiestart + ntied] = np.ceil(
                        np.float32(np.sum(ranks[tiestart:tiestart + ntied])) /
                        np.float32(ntied))
                    tiecount += 1

                sorted_ranks[:, tie_adjust_index[i]] = ranks

        del db, sumdb
        sort_index_base = np.tile(
            np.multiply(np.arange(0, nvoxels_piece), n_t), [n_t, 1])
        sort_index += sort_index_base
        del sort_index_base

        ranks_piece = np.zeros((n_t, nvoxels_piece))

        ranks_piece = ranks_piece.flatten(order='F')
        sort_index = sort_index.flatten(order='F')
        sorted_ranks = sorted_ranks.flatten(order='F')

        ranks_piece[sort_index] = np.array(sorted_ranks)

        ranks_piece = np.reshape(ranks_piece, (n_t, nvoxels_piece), order='F')

        del sort_index, sorted_ranks

        Ranks_res_data[:, segment[0]] = ranks_piece

        sys.stdout.write('.')

    Ranks_res_data = np.reshape(Ranks_res_data, (n_t, n_x, n_y, n_z),
                                order='F')

    K = np.zeros((n_x, n_y, n_z))

    mask_cluster = np.ones((3, 3, 3))

    if nvoxel == 19:
        mask_cluster[0, 0, 0] = 0
        mask_cluster[0, 2, 0] = 0
        mask_cluster[2, 0, 0] = 0
        mask_cluster[2, 2, 0] = 0
        mask_cluster[0, 0, 2] = 0
        mask_cluster[0, 2, 2] = 0
        mask_cluster[2, 0, 2] = 0
        mask_cluster[2, 2, 2] = 0

    elif nvoxel == 7:

        mask_cluster[0, 0, 0] = 0
        mask_cluster[0, 1, 0] = 0
        mask_cluster[0, 2, 0] = 0
        mask_cluster[0, 0, 1] = 0
        mask_cluster[0, 2, 1] = 0
        mask_cluster[0, 0, 2] = 0
        mask_cluster[0, 1, 2] = 0
        mask_cluster[0, 2, 2] = 0
        mask_cluster[1, 0, 0] = 0
        mask_cluster[1, 2, 0] = 0
        mask_cluster[1, 0, 2] = 0
        mask_cluster[1, 2, 2] = 0
        mask_cluster[2, 0, 0] = 0
        mask_cluster[2, 1, 0] = 0
        mask_cluster[2, 2, 0] = 0
        mask_cluster[2, 0, 1] = 0
        mask_cluster[2, 2, 1] = 0
        mask_cluster[2, 0, 2] = 0
        mask_cluster[2, 1, 2] = 0
        mask_cluster[2, 2, 2] = 0

    for i in range(1, n_x - 1):
        for j in range(1, n_y - 1):
            for k in range(1, n_z - 1):

                block = Ranks_res_data[:, i - 1:i + 2, j - 1:j + 2,
                                       k - 1:k + 2]
                mask_block = res_mask_data[i - 1:i + 2, j - 1:j + 2,
                                           k - 1:k + 2]

                if not (int(mask_block[1, 1, 1]) == 0):

                    if nvoxel == 19 or nvoxel == 7:
                        mask_block = np.multiply(mask_block, mask_cluster)

                    R_block = np.reshape(block, (block.shape[0], 27),
                                         order='F')
                    mask_R_block = R_block[:,
                                           np.argwhere(
                                               np.reshape(mask_block, (1, 27),
                                                          order='F') > 0)[:,
                                                                          1]]

                    K[i, j, k] = f_kendall(mask_R_block)

    img = nb.Nifti1Image(K,
                         header=res_img.get_header(),
                         affine=res_img.get_affine())
    reho_file = os.path.join(os.getcwd(), 'ReHo.nii.gz')
    img.to_filename(reho_file)
    out_file = reho_file

    return out_file
Example #40
0
async def calculate_phash_features_handler(image: bytes = File(...),
                                           image_id: str = Form(...)):
    features = get_phash(image)
    add_descriptor(int(image_id), adapt_array(features))
    index.add_with_ids(np.array([features]), np.int64([image_id]))
    return Response(status_code=status.HTTP_200_OK)
def relu_deriviate(X):
    return np.int64(X > 0)
Example #42
0
    np.timedelta64("NaT"),
] + [np.datetime64("NaT", unit) for unit in m8_units] +
           [np.timedelta64("NaT", unit) for unit in m8_units])

inf_vals = [
    float("inf"),
    float("-inf"),
    complex("inf"),
    complex("-inf"),
    np.inf,
    np.NINF,
]

int_na_vals = [
    # Values that match iNaT, which we treat as null in specific cases
    np.int64(NaT.value),
    int(NaT.value),
]

sometimes_na_vals = [Decimal("NaN")]

never_na_vals = [
    # float/complex values that when viewed as int64 match iNaT
    -0.0,
    np.float64("-0.0"),
    -0j,
    np.complex64(-0j),
]


class TestLibMissing:
Example #43
0
def _apply_spans_concat(spans, src_index, src_values, dest_index, dest_values,
                        max_index_i, max_value_i, s_start, separator, delimiter):
    if s_start == 0:
        index_i = np.uint32(1)
        index_v = np.int64(0)
        dest_index[0] = spans[0]
    else:
        index_i = np.uint32(0)
        index_v = np.int64(0)

    s_end = len(spans)-1
    for s in range(s_start, s_end):
        cur = spans[s]
        next = spans[s+1]
        cur_src_i = src_index[cur]
        next_src_i = src_index[next]

        dest_index[index_i] = next_src_i
        index_i += 1

        if next_src_i - cur_src_i > 0:
            if next - cur == 1:
                # only one entry to be copied, so commas not required
                next_index_v = next_src_i - cur_src_i + np.int64(index_v)
                dest_values[index_v:next_index_v] = src_values[cur_src_i:next_src_i]
                index_v = next_index_v
            else:
                # check to see how many non-zero-length entries there are; >1 means we must
                # separate them by commas
                non_empties = 0
                for e in range(cur, next):
                   if src_index[e] < src_index[e+1]:
                       non_empties += 1
                if non_empties == 0:
                    raise NotImplementedError()
                elif non_empties == 1:
                    # only one non-empty entry to be copied, so commas not required
                    next_index_v = next_src_i - cur_src_i + np.int64(index_v)
                    dest_values[index_v:next_index_v] = src_values[cur_src_i:next_src_i]
                    index_v = next_index_v
                else:
                    # the outer conditional already determines that we have a non-empty entry
                    # so there must be multiple non-empty entries and commas are required
                    for e in range(cur, next):
                        src_start = src_index[e]
                        src_end = src_index[e+1]
                        comma = False
                        quotes = False
                        for i_c in range(src_start, src_end):
                            if src_values[i_c] == separator:
                                comma = True
                            elif src_values[i_c] == delimiter:
                                quotes = True

                        d_index = np.int64(0)
                        if comma or quotes:
                            dest_values[d_index] = delimiter
                            d_index += 1
                            for i_c in range(src_start, src_end):
                                if src_values[i_c] == delimiter:
                                    dest_values[d_index] = src_values[i_c]
                                    d_index += 1
                                dest_values[d_index] = src_values[i_c]
                                d_index += 1
                            dest_values[d_index] = delimiter
                            d_index += 1
                        else:
                            s_len = np.int64(src_end - src_start)
                            dest_values[index_v:index_v + s_len] = src_values[src_start:src_end]
                            d_index += s_len
                        index_v += np.int64(d_index)

        # if either the index or values are past the threshold, write them
        if index_i >= max_index_i or index_v >= max_value_i:
            break
    return s+1, index_i, index_v
Example #44
0
      loc[li] = 0;

      float x = 0;
      for (int j = 0; j < 100; ++j)
      {
        #pragma unroll
        for (int k = 0; k < 10; ++k)
          x += loc[ARGUMENT * li];
      }
      loc[li] = x;
    }

    """.replace("myarg", str(arg))).build()

from time import time

ntrips = 10

queue.finish()
t1 = time()

for i in xrange(ntrips):
    prg.fill_vec(queue, (n,), (128,), a.data, np.int64(n))
queue.finish()
t2 = time()
print "arg %d elapsed: %g s" % (arg, (t2-t1)/ntrips)

# vim: filetype=pyopencl

Example #45
0
def _apply_spans_count(spans, dest_array):
    for i in range(len(spans)-1):
        dest_array[i] = np.int64(spans[i+1] - spans[i])
def grace_spatial_error(base_dir, PROC, DREL, DSET, LMAX, RAD,
    START=None,
    END=None,
    MISSING=None,
    LMIN=None,
    MMAX=None,
    LOVE_NUMBERS=0,
    REFERENCE=None,
    DESTRIPE=False,
    UNITS=None,
    DDEG=None,
    INTERVAL=None,
    BOUNDS=None,
    ATM=False,
    POLE_TIDE=False,
    DEG1=None,
    DEG1_FILE=None,
    MODEL_DEG1=False,
    SLR_C20=None,
    SLR_21=None,
    SLR_22=None,
    SLR_C30=None,
    SLR_C50=None,
    DATAFORM=None,
    MEAN_FILE=None,
    MEANFORM=None,
    OUTPUT_DIRECTORY=None,
    FILE_PREFIX=None,
    VERBOSE=False,
    MODE=0o775):

    #-- recursively create output directory if not currently existing
    if not os.access(OUTPUT_DIRECTORY, os.F_OK):
        os.makedirs(OUTPUT_DIRECTORY, mode=MODE, exist_ok=True)

    #-- list object of output files for file logs (full path)
    output_files = []

    #-- file information
    suffix = dict(ascii='txt', netCDF4='nc', HDF5='H5')

    #-- read arrays of kl, hl, and ll Love Numbers
    hl,kl,ll = load_love_numbers(LMAX, LOVE_NUMBERS=LOVE_NUMBERS,
        REFERENCE=REFERENCE)

    #-- Calculating the Gaussian smoothing for radius RAD
    if (RAD != 0):
        wt = 2.0*np.pi*gauss_weights(RAD,LMAX)
        gw_str = '_r{0:0.0f}km'.format(RAD)
    else:
        #-- else = 1
        wt = np.ones((LMAX+1))
        gw_str = ''

    #-- flag for spherical harmonic order
    MMAX = np.copy(LMAX) if not MMAX else MMAX
    order_str = 'M{0:d}'.format(MMAX) if (MMAX != LMAX) else ''
    #-- atmospheric ECMWF "jump" flag (if ATM)
    atm_str = '_wATM' if ATM else ''

    #-- reading GRACE months for input date range
    #-- replacing low-degree harmonics with SLR values if specified
    #-- include degree 1 (geocenter) harmonics if specified
    #-- correcting for Pole-Tide and Atmospheric Jumps if specified
    Ylms = grace_input_months(base_dir, PROC, DREL, DSET, LMAX,
        START, END, MISSING, SLR_C20, DEG1, MMAX=MMAX,
        SLR_21=SLR_21, SLR_22=SLR_22, SLR_C30=SLR_C30, SLR_C50=SLR_C50,
        DEG1_FILE=DEG1_FILE, MODEL_DEG1=MODEL_DEG1, ATM=ATM,
        POLE_TIDE=POLE_TIDE)
    #-- convert to harmonics object and remove mean if specified
    GRACE_Ylms = harmonics().from_dict(Ylms)
    #-- full path to directory for specific GRACE/GRACE-FO product
    GRACE_Ylms.directory = Ylms['directory']
    #-- use a mean file for the static field to remove
    if MEAN_FILE:
        #-- read data form for input mean file (ascii, netCDF4, HDF5, gfc)
        mean_Ylms = harmonics().from_file(MEAN_FILE,format=MEANFORM,date=False)
        #-- remove the input mean
        GRACE_Ylms.subtract(mean_Ylms)
    else:
        GRACE_Ylms.mean(apply=True)
    #-- date information of GRACE/GRACE-FO coefficients
    nfiles = len(GRACE_Ylms.time)

    #-- default file prefix
    if not FILE_PREFIX:
        FILE_PREFIX = '{0}_{1}_{2}{3}_'.format(PROC,DREL,DSET,Ylms['title'])

    #-- filter GRACE/GRACE-FO coefficients
    if DESTRIPE:
        #-- destriping GRACE/GRACE-FO coefficients
        ds_str = '_FL'
        GRACE_Ylms = GRACE_Ylms.destripe()
    else:
        #-- using standard GRACE/GRACE-FO harmonics
        ds_str = ''

    #-- calculating GRACE error (Wahr et al 2006)
    #-- output GRACE error file (for both LMAX==MMAX and LMAX != MMAX cases)
    args = (PROC,DREL,DSET,LMAX,order_str,ds_str,atm_str,GRACE_Ylms.month[0],
        GRACE_Ylms.month[-1],suffix[DATAFORM])
    delta_format = '{0}_{1}_{2}_DELTA_CLM_L{3:d}{4}{5}{6}_{7:03d}-{8:03d}.{9}'
    DELTA_FILE = os.path.join(GRACE_Ylms.directory,delta_format.format(*args))
    #-- full path of the GRACE directory
    #-- if file was previously calculated, will read file
    #-- else will calculate the GRACE error
    if not os.access(DELTA_FILE, os.F_OK):
        #-- add output delta file to list object
        output_files.append(DELTA_FILE)

        #-- Delta coefficients of GRACE time series (Error components)
        delta_Ylms = harmonics(lmax=LMAX,mmax=MMAX)
        delta_Ylms.clm = np.zeros((LMAX+1,MMAX+1))
        delta_Ylms.slm = np.zeros((LMAX+1,MMAX+1))
        #-- Smoothing Half-Width (CNES is a 10-day solution)
        #-- 365/10/2 = 18.25 (next highest is 19)
        #-- All other solutions are monthly solutions (HFWTH for annual = 6)
        if ((PROC == 'CNES') and (DREL in ('RL01','RL02'))):
            HFWTH = 19
        else:
            HFWTH = 6
        #-- Equal to the noise of the smoothed time-series
        #-- for each spherical harmonic order
        for m in range(0,MMAX+1):#-- MMAX+1 to include MMAX
            #-- for each spherical harmonic degree
            for l in range(m,LMAX+1):#-- LMAX+1 to include LMAX
                #-- Delta coefficients of GRACE time series
                for cs,csharm in enumerate(['clm','slm']):
                    #-- Constrained GRACE Error (Noise of smoothed time-series)
                    #-- With Annual and Semi-Annual Terms
                    val1 = getattr(GRACE_Ylms, csharm)
                    smth = tssmooth(GRACE_Ylms.time, val1[l,m,:], HFWTH=HFWTH)
                    #-- number of smoothed points
                    nsmth = len(smth['data'])
                    #-- GRACE delta Ylms
                    #-- variance of data-(smoothed+annual+semi)
                    val2 = getattr(delta_Ylms, csharm)
                    val2[l,m] = np.sqrt(np.sum(smth['noise']**2)/nsmth)

        #-- save GRACE DELTA to file
        delta_Ylms.time = np.copy(nsmth)
        delta_Ylms.month = np.copy(nsmth)
        delta_Ylms.to_file(DELTA_FILE,format=DATAFORM)
        #-- set the permissions mode of the output harmonics file
        os.chmod(DELTA_FILE, MODE)
        #-- append delta harmonics file to output files list
        output_files.append(DELTA_FILE)
    else:
        #-- read GRACE DELTA spherical harmonics datafile
        delta_Ylms = harmonics().from_file(DELTA_FILE,format=DATAFORM)
        #-- truncate grace delta clm and slm to d/o LMAX/MMAX
        delta_Ylms = delta_Ylms.truncate(lmax=LMAX, mmax=MMAX)
        nsmth = np.int64(delta_Ylms.time)

    #-- Output spatial data object
    delta = spatial()
    #-- Output Degree Spacing
    dlon,dlat = (DDEG[0],DDEG[0]) if (len(DDEG) == 1) else (DDEG[0],DDEG[1])
    #-- Output Degree Interval
    if (INTERVAL == 1):
        #-- (-180:180,90:-90)
        nlon = np.int64((360.0/dlon)+1.0)
        nlat = np.int64((180.0/dlat)+1.0)
        delta.lon = -180 + dlon*np.arange(0,nlon)
        delta.lat = 90.0 - dlat*np.arange(0,nlat)
    elif (INTERVAL == 2):
        #-- (Degree spacing)/2
        delta.lon = np.arange(-180+dlon/2.0,180+dlon/2.0,dlon)
        delta.lat = np.arange(90.0-dlat/2.0,-90.0-dlat/2.0,-dlat)
        nlon = len(delta.lon)
        nlat = len(delta.lat)
    elif (INTERVAL == 3):
        #-- non-global grid set with BOUNDS parameter
        minlon,maxlon,minlat,maxlat = BOUNDS.copy()
        delta.lon = np.arange(minlon+dlon/2.0,maxlon+dlon/2.0,dlon)
        delta.lat = np.arange(maxlat-dlat/2.0,minlat-dlat/2.0,-dlat)
        nlon = len(delta.lon)
        nlat = len(delta.lat)

    #-- Earth Parameters
    #-- output spatial units
    unit_list = ['cmwe', 'mmGH', 'mmCU', u'\u03BCGal', 'mbar']
    unit_name = ['Equivalent Water Thickness', 'Geoid Height',
        'Elastic Crustal Uplift', 'Gravitational Undulation',
        'Equivalent Surface Pressure']
    #-- dfactor is the degree dependent coefficients
    #-- for specific spherical harmonic output units
    if (UNITS == 1):
        #-- 1: cmwe, centimeters water equivalent
        dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).cmwe
    elif (UNITS == 2):
        #-- 2: mmGH, millimeters geoid height
        dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).mmGH
    elif (UNITS == 3):
        #-- 3: mmCU, millimeters elastic crustal deformation
        dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).mmCU
    elif (UNITS == 4):
        #-- 4: micGal, microGal gravity perturbations
        dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).microGal
    elif (UNITS == 5):
        #-- 5: mbar, millibar equivalent surface pressure
        dfactor = units(lmax=LMAX).harmonic(hl,kl,ll).mbar

    #-- Computing plms for converting to spatial domain
    phi = delta.lon[np.newaxis,:]*np.pi/180.0
    theta = (90.0-delta.lat)*np.pi/180.0
    PLM,dPLM = plm_holmes(LMAX,np.cos(theta))
    #-- square of legendre polynomials truncated to order MMAX
    mm = np.arange(0,MMAX+1)
    PLM2 = PLM[:,mm,:]**2

    #-- Calculating cos(m*phi)^2 and sin(m*phi)^2
    m = delta_Ylms.m[:,np.newaxis]
    ccos = np.cos(np.dot(m,phi))**2
    ssin = np.sin(np.dot(m,phi))**2

    #-- truncate delta harmonics to spherical harmonic range
    Ylms = delta_Ylms.truncate(LMAX,lmin=LMIN,mmax=MMAX)
    #-- convolve delta harmonics with degree dependent factors
    #-- smooth harmonics and convert to output units
    Ylms = Ylms.convolve(dfactor*wt).power(2.0).scale(1.0/nsmth)
    #-- Calculate fourier coefficients
    d_cos = np.zeros((MMAX+1,nlat))#-- [m,th]
    d_sin = np.zeros((MMAX+1,nlat))#-- [m,th]
    #-- Calculating delta spatial values
    for k in range(0,nlat):
        #-- summation over all spherical harmonic degrees
        d_cos[:,k] = np.sum(PLM2[:,:,k]*Ylms.clm, axis=0)
        d_sin[:,k] = np.sum(PLM2[:,:,k]*Ylms.slm, axis=0)

    #-- Multiplying by c/s(phi#m) to get spatial maps (lon,lat)
    delta.data=np.sqrt(np.dot(ccos.T,d_cos) + np.dot(ssin.T,d_sin)).T

    #-- output file format
    file_format = '{0}{1}_L{2:d}{3}{4}{5}_ERR_{6:03d}-{7:03d}.{8}'
    #-- output error file to ascii, netCDF4 or HDF5
    args = (FILE_PREFIX,unit_list[UNITS-1],LMAX,order_str,gw_str,ds_str,
        GRACE_Ylms.month[0],GRACE_Ylms.month[-1],suffix[DATAFORM])
    FILE = os.path.join(OUTPUT_DIRECTORY,file_format.format(*args))
    if (DATAFORM == 'ascii'):
        #-- ascii (.txt)
        delta.to_ascii(FILE, date=False, verbose=VERBOSE)
    elif (DATAFORM == 'netCDF4'):
        #-- netCDF4
        delta.to_netCDF4(FILE, date=False, verbose=VERBOSE,
            units=unit_list[UNITS-1], longname=unit_name[UNITS-1],
            title='GRACE/GRACE-FO Spatial Error')
    elif (DATAFORM == 'HDF5'):
        #-- HDF5
        delta.to_HDF5(FILE, date=False, verbose=VERBOSE,
            units=unit_list[UNITS-1], longname=unit_name[UNITS-1],
            title='GRACE/GRACE-FO Spatial Error')
    #-- set the permissions mode of the output files
    os.chmod(FILE, MODE)
    #-- add file to list
    output_files.append(FILE)

    #-- return the list of output files
    return output_files
Example #47
0
def check_course(d, course_id):
    try:
        return check_course_core(d.loc[d["Class"] == str(course_id)],
                                 np.int64(course_id))
    except KeyError:
        return [NOT_FOUND, 0]
Example #48
0
def _apply_spans_concat_2(spans, src_index, src_values, dest_index, dest_values,
                          max_index_i, max_value_i, separator, delimiter, sp_start, dest_start_v):


    if sp_start == 0:
        d_index_i = np.int64(1)
        d_index_v = np.int64(0)
    else:
        d_index_i = np.int64(0)
        d_index_v = np.int64(0)

    sp_end = len(spans)-1
    for s in range(sp_start, sp_end):
        sp_cur = spans[s]
        sp_next = spans[s+1]
        cur_src_i = src_index[sp_cur]
        next_src_i = src_index[sp_next]

        non_empties = 0
        if sp_next - sp_cur == 1:
            # at most one entry to be copied so no decoration required
            if next_src_i - cur_src_i > 0:
                non_empties = 1
        elif sp_next - sp_cur > 1:
            for e in range(sp_cur, sp_next):
                e_start = src_index[e]
                e_end = src_index[e+1]
                if e_end - e_start > 0:
                    non_empties += 1

        delta = 0
        if non_empties == 1:
            # single entry
            comma = False
            quotes = False
            for i_c in range(cur_src_i, next_src_i):
                if src_values[i_c] == separator:
                    comma = True
                elif src_values[i_c] == delimiter:
                    quotes = True

            if comma or quotes:
                dest_values[d_index_v + delta] = delimiter
                delta += 1

            for i_c in range(cur_src_i, next_src_i):
                if src_values[i_c] == delimiter:
                    dest_values[d_index_v + delta] = delimiter
                    delta += 1
                dest_values[d_index_v + delta] = src_values[i_c]
                delta += 1

            if comma or quotes:
                dest_values[d_index_v + delta] = delimiter
                delta += 1

        elif non_empties > 1:
            # multiple entries so find out whether there are multiple entries with values
            prev_empty = True
            for e in range(sp_cur, sp_next):
                src_start = src_index[e]
                src_end = src_index[e + 1]
                comma = False
                quotes = False
                cur_empty = src_end == src_start
                for i_c in range(src_start, src_end):
                    if src_values[i_c] == separator:
                        comma = True
                    elif src_values[i_c] == delimiter:
                        quotes = True

                if prev_empty == False and cur_empty == False:
                    if e > sp_cur:
                        dest_values[d_index_v + delta] = separator
                        delta += 1
                # `prev_empty`, once set to False, can't become True again.
                # this line ensures that, once we have encountered our first
                # non-empty entry, any following non-empty entry will get a separator,
                # even if there are empty-entries in-between.
                prev_empty = cur_empty if cur_empty == False else prev_empty

                if comma or quotes:
                    dest_values[d_index_v + delta] = delimiter
                    delta += 1

                for i_c in range(src_start, src_end):
                    if src_values[i_c] == delimiter:
                        dest_values[d_index_v + delta] = delimiter
                        delta += 1
                    dest_values[d_index_v + delta] = src_values[i_c]
                    delta += 1

                if comma or quotes:
                    dest_values[d_index_v + delta] = delimiter
                    delta += 1

        d_index_v += delta
        dest_index[d_index_i] = d_index_v + dest_start_v
        d_index_i += 1

        if d_index_i >= max_index_i or d_index_v >= max_value_i:
            break
    return s + 1, d_index_i, d_index_v
Example #49
0
def _read_int64(f):
    '''Read a signed 64-bit integer'''
    return np.int64(struct.unpack('>q', f.read(8))[0])
 def explore(self, sigma):
     self.c_w += np.int64(np.round(np.random.normal(0, np.max(self.c_w) * sigma, self.c_w.shape)))
     self.c_b += np.int64(np.round(np.random.normal(0, np.max(self.c_b) * sigma, self.c_b.shape)))
Example #51
0
def container_values():
    r"""list: Value for container tests"""
    return [np.float32(1),
            b'hello', u'hello',
            {'nested': np.int64(2)},
            [np.complex128(4), np.uint8(0)]]
Example #52
0
 def FIXED_to_numpy_int64(self, py_long):
     return numpy.int64(py_long)
def get_crystal(raw_data):  # pylint: disable=too-many-locals
    """
    Return the data in format of a CRYSTAL d3 input
    """
    def float_to_fraction(x, error=0.000001):
        """
        1D float np.array to 1D fraction (int) array
        Modified from ref: https://stackoverflow.com/questions/5124743/algorithm-for-simplifying-decimal-to-fractions
        """
        out = np.empty([len(x), 2], dtype=np.int)
        for i, value in enumerate(x):
            if value < error:
                out[i] = 0, 1
            elif 1 - error < value:
                out[i] = 1, 1
            else:
                # The lower fraction is 0/1
                lower_n = 0
                lower_d = 1
                # The upper fraction is 1/1
                upper_n = 1
                upper_d = 1
                while True:
                    # The middle fraction is (lower_n + upper_n) / (lower_d + upper_d)
                    middle_n = lower_n + upper_n
                    middle_d = lower_d + upper_d
                    # If value + error < middle

                    if middle_d * (value + error) < middle_n:
                        # middle is our new upper
                        upper_n = middle_n
                        upper_d = middle_d
                    # Else If middle < value - error
                    elif middle_n < (value - error) * middle_d:
                        # middle is our new lower
                        lower_n = middle_n
                        lower_d = middle_d
                    # Else middle is our best fraction
                    else:
                        out[i] = middle_n, middle_d
                        break
        return out

    lines = []
    lines.append("BAND")
    lines.append("<...>     !Title")
    kpath = []
    klabel = []
    for s in raw_data["path"]:
        c0 = raw_data["kpoints_rel"][s[0]]
        c1 = raw_data["kpoints_rel"][s[1]]
        klabel.append([s[0], s[1]])
        kpath.append([c0, c1])

    npath = len(kpath)
    kpath = np.float64(kpath)
    kpath_flat = kpath.flatten()
    fraction_kpath = float_to_fraction(kpath_flat)
    # numerator = fraction_kpath[:, 0]
    denominator = fraction_kpath[:, 1]
    shrinking_fac = np.lcm.reduce(denominator)
    kpath_new = shrinking_fac * kpath_flat
    kpath_new = np.int64(kpath_new.round().reshape(npath, 2, -1))
    lines.append(
        "{:d} {:d} 100 <...> <...> 1 0     !<...> - <...>: 1st band - last band"
        .format(npath, shrinking_fac))
    for i, path in enumerate(kpath_new):
        c0 = path[0]
        c1 = path[1]

        lines.append(
            "{:2d} {:2d} {:2d}  {:2d} {:2d} {:2d}    {:2s} -> {:2s}".format(
                c0[0], c0[1], c0[2], c1[0], c1[1], c1[2], klabel[i][0],
                klabel[i][1]))

    return "\n".join(lines)
Example #54
0
import unittest
import json
from fractions import Fraction

import numpy as np

from artiq.protocols import pyon

_pyon_test_object = {
    (1, 2): [(3, 4.2), (2, )],
    Fraction(3, 4): np.linspace(5, 10, 1),
    "set": {"testing", "sets"},
    "a": np.int8(9),
    "b": np.int16(-98),
    "c": np.int32(42),
    "d": np.int64(-5),
    "e": np.uint8(8),
    "f": np.uint16(5),
    "g": np.uint32(4),
    "h": np.uint64(9),
    "x": np.float16(9.0),
    "y": np.float32(9.0),
    "z": np.float64(9.0),
    1j: 1 - 9j,
    "q": np.complex128(1j),
}


class PYON(unittest.TestCase):
    def test_encdec(self):
        for enc in pyon.encode, lambda x: pyon.encode(x, True):
    estimator=model_cnn,
    param_grid=keras_param_options,
    scoring=my_custom_scorer,
    cv=5,
    n_jobs=-5,
    #verbose = -1
)

#%%
# generate training and testing data for CNN
l1 = []
with open('cnn_hp.txt') as f:
    for l in f:
        l1.append((l.strip()).split("\t"))

nxf, nyf = np.int64(l1[0][0]), np.int64(l1[0][0])
nx, ny = np.int64(l1[1][0]), np.int64(l1[1][0])
n_snapshots = np.int64(l1[2][0])
n_snapshots_train = np.int64(l1[3][0])
n_snapshots_test = np.int64(l1[4][0])
freq = np.int64(l1[5][0])
istencil = np.int64(l1[6][0])  # 1: nine point, 2: single point
ifeatures = np.int64(l1[7][0])  # 1: 6 features, 2: 2 features
ilabel = np.int64(l1[8][0])  # 1: SGS (tau), 2: eddy-viscosity (nu)

#%%
obj = DHIT(nx=nx,
           ny=ny,
           nxf=nxf,
           nyf=nyf,
           freq=freq,
Example #56
0
def ply_test_value_int64(ply_test_value):
    r"""Version of example Ply dictionary using 64bit integers."""
    out = copy.deepcopy(ply_test_value)
    for f in out['faces']:
        f['vertex_index'] = [np.int64(x) for x in f['vertex_index']]
    return out
  fc_model = tf.keras.Sequential([
      tf.keras.layers.Flatten(),
      # TODO: Define the rest of the model.
  ])
  return fc_model

def build_cnn_model():
    cnn_model = tf.keras.Sequential([
       # TODO: Define the model.
    ])
    return cnn_model

mnist = tf.keras.datasets.mnist
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = np.expand_dims(train_images, axis=-1)/255.
train_labels = np.int64(train_labels)
test_images = np.expand_dims(test_images, axis=-1)/255.
test_labels = np.int64(test_labels)

plt.figure(figsize=(10,10))
random_inds = np.random.choice(60000,36)
for i in range(36):
    plt.subplot(6,6,i+1)
    plt.xticks([])
    plt.yticks([])
    plt.grid(False)
    image_ind = random_inds[i]
    plt.imshow(np.squeeze(train_images[image_ind]), cmap=plt.cm.binary)
    plt.xlabel(train_labels[image_ind])
    
model = build_fc_model()
def Ridge_KFold_Sort_Permutation(Subjects_Data, Subjects_Score, Times_IDRange,
                                 Fold_Quantity, Alpha_Range, ResultantFolder,
                                 Parallel_Quantity, Max_Queued, Queue):

    if not os.path.exists(ResultantFolder):
        os.makedirs(ResultantFolder)
    Subjects_Data_Mat = {'Subjects_Data': Subjects_Data}
    Subjects_Data_Mat_Path = ResultantFolder + '/Subjects_Data.mat'
    sio.savemat(Subjects_Data_Mat_Path, Subjects_Data_Mat)
    Finish_File = []
    Times_IDRange_Todo = np.int64(np.array([]))
    for i in np.arange(len(Times_IDRange)):
        ResultantFolder_I = ResultantFolder + '/Time_' + str(Times_IDRange[i])
        if not os.path.exists(ResultantFolder_I):
            os.makedirs(ResultantFolder_I)
        if not os.path.exists(ResultantFolder_I + '/Res_NFold.mat'):
            Times_IDRange_Todo = np.insert(Times_IDRange_Todo,
                                           len(Times_IDRange_Todo),
                                           Times_IDRange[i])
            Configuration_Mat = {'Subjects_Data_Mat_Path': Subjects_Data_Mat_Path, 'Subjects_Score': Subjects_Score, 'Fold_Quantity': Fold_Quantity, \
                'Alpha_Range': Alpha_Range, 'ResultantFolder_I': ResultantFolder_I, 'Parallel_Quantity': Parallel_Quantity}
            sio.savemat(ResultantFolder_I + '/Configuration.mat',
                        Configuration_Mat)
            system_cmd = 'python3 -c ' + '\'import sys;\
                sys.path.append("' + CodesPath + '");\
                from Ridge_CZ_Sort_CategoricalFeatures import Ridge_KFold_Sort_Permutation_Sub;\
                import os;\
                import scipy.io as sio;\
                configuration = sio.loadmat("' + ResultantFolder_I + '/Configuration.mat");\
                Subjects_Data_Mat_Path = configuration["Subjects_Data_Mat_Path"];\
                Subjects_Score = configuration["Subjects_Score"];\
                Fold_Quantity = configuration["Fold_Quantity"];\
                Alpha_Range = configuration["Alpha_Range"];\
                ResultantFolder_I = configuration["ResultantFolder_I"];\
                Parallel_Quantity = configuration["Parallel_Quantity"];\
                Ridge_KFold_Sort_Permutation_Sub(Subjects_Data_Mat_Path[0], Subjects_Score[0], Fold_Quantity[0][0], Alpha_Range[0], ResultantFolder_I[0], Parallel_Quantity[0][0])\' '

            system_cmd = system_cmd + ' > "' + ResultantFolder_I + '/perm_' + str(
                Times_IDRange[i]) + '.log" 2>&1\n'
            Finish_File.append(ResultantFolder_I + '/Res_NFold.mat')
            script = open(ResultantFolder_I + '/script.sh', 'w')
            script.write(system_cmd)
            script.close()

    if len(Times_IDRange_Todo) > Max_Queued:
        Submit_First_Quantity = Max_Queued
    else:
        Submit_First_Quantity = len(Times_IDRange_Todo)
    for i in np.arange(Submit_First_Quantity):
        ResultantFolder_I = ResultantFolder + '/Time_' + str(
            Times_IDRange_Todo[i])
        Option = ' -V -o "' + ResultantFolder_I + '/perm_' + str(
            Times_IDRange_Todo[i]
        ) + '.o" -e "' + ResultantFolder_I + '/perm_' + str(
            Times_IDRange_Todo[i]) + '.e" '
        os.system('qsub -q ' + Queue + ' -N perm_' +
                  str(Times_IDRange_Todo[i]) + Option + ResultantFolder_I +
                  '/script.sh')
    if len(Times_IDRange_Todo) > Max_Queued:
        Finished_Quantity = 0
        while 1:
            for i in np.arange(len(Finish_File)):
                if os.path.exists(Finish_File[i]):
                    Finished_Quantity = Finished_Quantity + 1
                    print(Finish_File[i])
                    del (Finish_File[i])
                    print(
                        time.strftime('%Y-%m-%d-%H-%M-%S',
                                      time.localtime(time.time())))
                    print('Finish quantity = ' + str(Finished_Quantity))
                    time.sleep(8)
                    ResultantFolder_I = ResultantFolder + '/Time_' + str(
                        Times_IDRange_Todo[Max_Queued + Finished_Quantity - 1])
                    Option = ' -V -o "' + ResultantFolder_I + '/perm_' + str(
                        Times_IDRange_Todo[Max_Queued + Finished_Quantity - 1]
                    ) + '.o" -e "' + ResultantFolder_I + '/perm_' + str(
                        Times_IDRange_Todo[Max_Queued + Finished_Quantity -
                                           1]) + '.e" '
                    cmd = 'qsub -q ' + Queue + ' -N perm_' + str(
                        Times_IDRange_Todo[Max_Queued + Finished_Quantity - 1]
                    ) + Option + ResultantFolder_I + '/script.sh'
                    # print(cmd)
                    os.system(cmd)
                    break
            if len(Finish_File) == 0:
                break
            if Max_Queued + Finished_Quantity >= len(Finish_File):
                break
        out = model(x_train)
        loss = criterion(out, y_train)
        loss.backward()
        optimizer.step()

res = []
for i in range(len(allObj)):
    tmp = np.array(allFeature[i])
    tmp = tmp.reshape([1, 1, len(tmp)])
    x_train = torch.FloatTensor(tmp)
    y_train = torch.FloatTensor(np.array(allObj[i]).reshape(1, 1))
    x_train = Variable(x_train)
    y_train = Variable(y_train)

    out = model(x_train)
    predicted = np.int64(out.cpu().data.numpy()[0, 0] > 0.5)
    res.append(predicted)

#print(sum(allObj))
#print(sum(res))
#print(len(res))
#print(sum(np.array(res)==allObj)/len(allObj))
print(metrics.accuracy_score(allObj, res))
a, b, c, d = metrics.confusion_matrix(allObj, res).ravel()
print(a)
print(b)
print(c)
print(d)
#print(metrics.confusion_matrix(allObj, res).ravel()) # tn, fp, fn, tp
print(metrics.precision_score(allObj, res))
print(metrics.recall_score(allObj, res))
Example #60
0
from .default import _C as cfg
from .default import update_config

import numpy as np 

# size
inp_size = 512 
inp_scales = [512, 256, 128]
out_size = 128
scales = [4, 2, 1]
base_sigma = 2.5
num_pts = 17
offset_area = 2

# point index 
COCO_index = np.int64([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])
COCO_reorder = np.int64([0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16])

# augmentation 
rotation = 30
min_scale = 0.75
max_scale = 1.05
max_translate = 50

blur_prob = 0.0
blur_size = [7, 11, 15, 21]
blur_type = ['vertical','horizontal','mean']

# training 
max_iter = 50000
max_epoch = 140