Esempio n. 1
0
def conv_int8( value ):
    if( len(value) == 0 ):
        rval = imiss
    else:
        rval = int( value )
    assert numpy.int8( rval ) == numpy.int32( rval ) , " conv_int8: value out of range"
    return numpy.int8( rval )
Esempio n. 2
0
def multi_where(vec1, vec2):
    '''Given two vectors, multi_where returns a tuple of indices where those
    two vectors overlap.
    ****THIS FUNCTION HAS NOT BEEN TESTED ON N-DIMENSIONAL ARRAYS*******
    Inputs:
           2 numpy vectors
    Output:
           (xy, yx) where xy is a numpy vector containing the indices of the
           elements in vector 1 that are also in vector 2. yx is a vector
           containing the indices of the elements in vector 2 that are also
           in vector 1.
    Example:
           >> x = np.array([1,2,3,4,5])
           >> y = np.array([3,4,5,6,7])
           >> (xy,yx) = multi_where(x,y)
           >> xy
           array([2,3,4])
           >> yx
           array([0,1,2])
    '''

    OneInTwo = np.array([])
    TwoInOne = np.array([])
    for i in range(vec1.shape[0]):
        if np.where(vec2 == vec1[i])[0].shape[0]:
            OneInTwo = np.append(OneInTwo,i)
            TwoInOne = np.append(TwoInOne, np.where(vec2 == vec1[i])[0][0])

    return (np.int8(OneInTwo), np.int8(TwoInOne))
Esempio n. 3
0
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
    def test_NumPy_arrayview_deletion_sitkImage_1(self):
      # 2D image
      image = sitk.Image(sizeX, sizeY, sitk.sitkInt32)
      for j in range(sizeY):
          for i in range(sizeX):
              image[i, j] = j*sizeX + i

      npview = sitk.GetArrayFromImage(image, arrayview = True, writeable = True)
      image.SetPixel(0,0, newSimpleITKPixelValueInt32)

      del image

      carr = np.array(npview, copy = False)
      rarr = np.reshape(npview, (1, npview.size))
      varr = npview.view(dtype=np.int8)

      self.assertEqual( carr[0,0],newSimpleITKPixelValueInt32)
      self.assertEqual( rarr[0,0],newSimpleITKPixelValueInt32)
      self.assertEqual( varr[0,0],np.int8(newSimpleITKPixelValueInt32))

      npview[0,0] = newNumPyElementValueInt32

      del npview

      self.assertEqual( carr[0,0],newNumPyElementValueInt32)
      self.assertEqual( rarr[0,0],newNumPyElementValueInt32)
      self.assertEqual( varr[0,0],np.int8(newNumPyElementValueInt32))
Esempio n. 5
0
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Esempio n. 6
0
def chain2image(chaincode,start_pix):

    """
    Method to compute the pixel contour providing the chain code string
    and the starting pixel location [X,Y].
    Author: Xavier Bonnin (LESIA)
    """

    if (type(chaincode) != str):
        print "First input argument must be a string!"
        return None

    if (len(start_pix) != 2):
        print "Second input argument must be a 2-elements vector!"
        return None

    ardir = np.array([[-1,0],[-1,1],[0,1],[1,1],[1,0],[1,-1],[0,-1],[-1,-1]])
    ccdir = np.array([0,7,6,5,4,3,2,1])

    X=[start_pix[0]]
    Y=[start_pix[1]]
    for c in chaincode:
        if (abs(np.int8(c)) > 7):
            print "Wrong chain code format!"
            return None
        wc = np.where(np.int8(c) == np.int8(ccdir))[0]
        X.append(X[-1] + np.int(ardir[wc,0]))
        Y.append(Y[-1] + np.int(ardir[wc,1]))
    return X,Y
Esempio n. 7
0
 def test_numpy_scalar_conversion_values(self):
     self.assertEqual(nd.as_py(nd.array(np.bool_(True))), True)
     self.assertEqual(nd.as_py(nd.array(np.bool_(False))), False)
     self.assertEqual(nd.as_py(nd.array(np.int8(100))), 100)
     self.assertEqual(nd.as_py(nd.array(np.int8(-100))), -100)
     self.assertEqual(nd.as_py(nd.array(np.int16(20000))), 20000)
     self.assertEqual(nd.as_py(nd.array(np.int16(-20000))), -20000)
     self.assertEqual(nd.as_py(nd.array(np.int32(1000000000))), 1000000000)
     self.assertEqual(nd.as_py(nd.array(np.int64(-1000000000000))),
                      -1000000000000)
     self.assertEqual(nd.as_py(nd.array(np.int64(1000000000000))),
                      1000000000000)
     self.assertEqual(nd.as_py(nd.array(np.int32(-1000000000))),
                      -1000000000)
     self.assertEqual(nd.as_py(nd.array(np.uint8(200))), 200)
     self.assertEqual(nd.as_py(nd.array(np.uint16(50000))), 50000)
     self.assertEqual(nd.as_py(nd.array(np.uint32(3000000000))), 3000000000)
     self.assertEqual(nd.as_py(nd.array(np.uint64(10000000000000000000))),
                      10000000000000000000)
     self.assertEqual(nd.as_py(nd.array(np.float32(2.5))), 2.5)
     self.assertEqual(nd.as_py(nd.array(np.float64(2.5))), 2.5)
     self.assertEqual(nd.as_py(nd.array(np.complex64(2.5-1j))), 2.5-1j)
     self.assertEqual(nd.as_py(nd.array(np.complex128(2.5-1j))), 2.5-1j)
     if np.__version__ >= '1.7':
         self.assertEqual(nd.as_py(nd.array(np.datetime64('2000-12-13'))),
                          date(2000, 12, 13))
def add_vars_to_grp(grp,types, **kwargs):
    v = grp.createVariable(kwargs.get('var1','var1'),numpy.int8)
    v[:] = numpy.int8(8)
    v.foo = 'bar'
    
    v = grp.createVariable(kwargs.get('var2','var2'),numpy.int8, (dim3._name,), fill_value=5)
    v[:] = numpy.int8(8)
    v.foo = 'bar'
    
    v = grp.createVariable(kwargs.get('var3','var3'),numpy.int8, (dim1._name,dim4._name,))
    v[:] = numpy.arange(8,dtype=numpy.int8).reshape(2,4)
    v.foo = 'bar'
    
    v = grp.createVariable(kwargs.get('var4','var4'),'S1', (dim1._name,dim4._name,))
    #v[:] = numpy.ndarray(8,dtype='S1').reshape(2,4)
    v[:] = 'a'
    v.foo = 'bar'
    
    for num,type in enumerate(types):    
        default_name = 'var{}'.format(num+5)
        print default_name
        v = grp.createVariable(kwargs.get(default_name,default_name),type, (dim4._name,))
        try:
            v[:] = numpy.iinfo(type).max
            continue
        except ValueError:
            pass
        try:
            for i,c in enumerate('char'):
                v[i] = c 
            continue
        except ValueError:
            pass
       
        v[:] = numpy.pi
Esempio n. 9
0
    def test_valid(self):
        prop = bcpp.Int()

        assert prop.is_valid(None)

        assert prop.is_valid(0)
        assert prop.is_valid(1)

        assert prop.is_valid(np.int8(0))
        assert prop.is_valid(np.int8(1))
        assert prop.is_valid(np.int16(0))
        assert prop.is_valid(np.int16(1))
        assert prop.is_valid(np.int32(0))
        assert prop.is_valid(np.int32(1))
        assert prop.is_valid(np.int64(0))
        assert prop.is_valid(np.int64(1))
        assert prop.is_valid(np.uint8(0))
        assert prop.is_valid(np.uint8(1))
        assert prop.is_valid(np.uint16(0))
        assert prop.is_valid(np.uint16(1))
        assert prop.is_valid(np.uint32(0))
        assert prop.is_valid(np.uint32(1))
        assert prop.is_valid(np.uint64(0))
        assert prop.is_valid(np.uint64(1))

        # TODO (bev) should fail
        assert prop.is_valid(False)
        assert prop.is_valid(True)
Esempio n. 10
0
File: preproc.py Progetto: kif/pyFAI
    def process(self, image,
                dark=None,
                variance=None,
                dark_variance=None,
                normalization_factor=1.0
                ):
        """Perform the pixel-wise operation of the array

        :param raw: numpy array with the input image
        :param dark: numpy array with the dark-current image
        :param variance: numpy array with the variance of input image
        :param dark_variance: numpy array with the variance of dark-current image
        :param normalization_factor: divide the result by this
        :return: array with processed data,
                may be an array of (data,variance,normalization) depending on class initialization
        """
        with self.sem:
            if id(image) != id(self.on_device.get("image")):
                self.send_buffer(image, "image")

            if dark is not None:
                do_dark = numpy.int8(1)
                if id(dark) != id(self.on_device.get("dark")):
                    self.send_buffer(dark, "dark")
            else:
                do_dark = numpy.int8(0)
            if (variance is not None) and self.on_host.get("calc_variance"):
                if id(variance) != id(self.on_device.get("variance")):
                    self.send_buffer(variance, "variance")
            if (dark_variance is not None) and self.on_host.get("calc_variance"):
                if id(dark_variance) != id(self.on_device.get("dark_variance")):
                    self.send_buffer(dark_variance, "dark_variance")

            if self.on_host.get("poissonian"):
                kernel_name = "corrections3Poisson"
            elif self.on_host.get("calc_variance"):
                kernel_name = "corrections3"
            elif self.on_host.get("split_result"):
                kernel_name = "corrections2"
            else:
                kernel_name = "corrections"
            kwargs = self.cl_kernel_args[kernel_name]
            kwargs["do_dark"] = do_dark
            kwargs["normalization_factor"] = numpy.float32(normalization_factor)
            if (kernel_name == "corrections3") and (self.on_device.get("dark_variance") is not None):
                kwargs["do_dark_variance"] = do_dark
            kernel = self.kernels.get_kernel(kernel_name)
            evt = kernel(self.queue, (self.size,), None, *list(kwargs.values()))
            if kernel_name.startswith("corrections3"):
                dest = numpy.empty(self.on_device.get("image").shape + (3,), dtype=numpy.float32)
            elif kernel_name == "corrections2":
                dest = numpy.empty(self.on_device.get("image").shape + (2,), dtype=numpy.float32)
            else:
                dest = numpy.empty(self.on_device.get("image").shape, dtype=numpy.float32)

            copy_result = pyopencl.enqueue_copy(self.queue, dest, self.cl_mem["output"])
            copy_result.wait()
            if self.profile:
                self.events += [EventDescription("preproc", evt), EventDescription("copy result", copy_result)]
        return dest
Esempio n. 11
0
    def test_numpy(self):
        assert chash(np.bool_(True)) == chash(np.bool_(True))

        assert chash(np.int8(1)) == chash(np.int8(1))
        assert chash(np.int16(1))
        assert chash(np.int32(1))
        assert chash(np.int64(1))

        assert chash(np.uint8(1))
        assert chash(np.uint16(1))
        assert chash(np.uint32(1))
        assert chash(np.uint64(1))

        assert chash(np.float32(1)) == chash(np.float32(1))
        assert chash(np.float64(1)) == chash(np.float64(1))
        assert chash(np.float128(1)) == chash(np.float128(1))

        assert chash(np.complex64(1+1j)) == chash(np.complex64(1+1j))
        assert chash(np.complex128(1+1j)) == chash(np.complex128(1+1j))
        assert chash(np.complex256(1+1j)) == chash(np.complex256(1+1j))

        assert chash(np.datetime64('2000-01-01')) == chash(np.datetime64('2000-01-01'))
        assert chash(np.timedelta64(1,'W')) == chash(np.timedelta64(1,'W'))

        self.assertRaises(ValueError, chash, np.object())

        assert chash(np.array([[1, 2], [3, 4]])) == \
            chash(np.array([[1, 2], [3, 4]]))
        assert chash(np.array([[1, 2], [3, 4]])) != \
            chash(np.array([[1, 2], [3, 4]]).T)
        assert chash(np.array([1, 2, 3])) == chash(np.array([1, 2, 3]))
        assert chash(np.array([1, 2, 3], dtype=np.int32)) != \
            chash(np.array([1, 2, 3], dtype=np.int64))
Esempio n. 12
0
def result_dict_to_hdf5(f, rd):
    for name, data in rd.items():
        flag = None
        # beware: isinstance(True/False, int) == True
        if isinstance(data, bool):
            data = np.int8(data)
            flag = "py_bool"
        elif isinstance(data, int):
            data = np.int64(data)
            flag = "py_int"

        if isinstance(data, np.ndarray):
            dataset = f.create_dataset(name, data=data)
        else:
            ty = type(data)
            if ty is str:
                ty_h5 = "S{}".format(len(data))
                data = data.encode()
            else:
                try:
                    ty_h5 = _type_to_hdf5[ty]
                except KeyError:
                    raise TypeError("Type {} is not supported for HDF5 output"
                                    .format(ty)) from None
            dataset = f.create_dataset(name, (), ty_h5)
            dataset[()] = data

        if flag is not None:
            dataset.attrs[flag] = np.int8(1)
Esempio n. 13
0
 def classify(self, inputs):
     # switch to test mode
     self.mode.set_value(np.int8(1))
     rval = self._classify(inputs)
     # switch to train mode
     self.mode.set_value(np.int8(0))
     return rval
Esempio n. 14
0
def get_sites_summary(noaa_dir, stemxy=False):
    """create a pandas data frame with site code, lon and lat for all
    sites with data files in the specified directory

    ARGS:
    noaa_dir (string): full path to a directory containing NOAA OCS observation
        files

    RETURNS:
    pandas DataFrame object with columns site_code, lon, lat
    """
    all_sites_df = get_all_NOAA_airborne_data(noaa_dir)
    if stemxy:
        dom = domain.STEM_Domain()
        all_sites_df.get_stem_xy(dom.get_lon(), dom.get_lat())
    summary_df = all_sites_df.obs.groupby('sample_site_code').mean()
    if stemxy:
        summary_df = summary_df[['sample_longitude', 'sample_latitude',
                                 'x_stem', 'y_stem']]
        # make sure x, y indices are integers
        summary_df['x_stem'] = np.int8(np.round(summary_df['x_stem']))
        summary_df['y_stem'] = np.int8(np.round(summary_df['y_stem']))
    else:
        summary_df = summary_df[['sample_longitude', 'sample_latitude']]
    summary_df = summary_df.reset_index()
    summary_df.rename(columns={k: k.replace('sample_', '')
                               for k in summary_df.columns.values},
                      inplace=True)
    return(summary_df)
Esempio n. 15
0
    def process(self, target, **kwargs):
        """ Filter the image leaving only the required annulus. """
        # Check target type
        if target.name() != 'Image':
            self.logger.warning("[%s] Input variable is not an image. Skipping.", self.name())
            return {'output': None}
        # Check that inner and outer diameter are defined
        if self._inner is None or self._outer is None:
            self.logger.warning("[%s] One of the required diameter is not set. Skipping.", self.name())
            return {'output': None}

        # If size has changed or mask is not defined, we prepare it
        if self._mask is None or target.value.shape[1:] != self._size:
            self._size = target.value.shape[1:]
            if self._center is None:
                self._center = [int(target.value.shape[2] / 2), int(target.value.shape[1] / 2)]

            # Build the mask
            y, x = np.ogrid[0:self._size[0], 0:self._size[1]]
            x -= self._center[0]
            y -= self._center[1]
            r_in = x ** 2 / self._inner[0] + y ** 2 / self._inner[1]
            r_out = x ** 2 / self._outer[0] + y ** 2 / self._outer[1]
            self._mask = np.int8(r_in > 1) * np.int8(r_out < 1)

        # Filter the image using the defined mask
        self.logger.debug("[%s] Image shape %s, Mask shape %s.", self.name(), target.value.shape, self._mask.shape)
        out = Image()
        out.value = np.copy(target.value) * np.tile(self._mask, (target.value.shape[0], 1, 1))
        return {'output': out}
Esempio n. 16
0
def test_apply_scaling():
    # Null scaling, same array returned
    arr = np.zeros((3,), dtype=np.int16)
    assert_true(apply_read_scaling(arr) is arr)
    assert_true(apply_read_scaling(arr, np.float64(1.0)) is arr)
    assert_true(apply_read_scaling(arr, inter=np.float64(0)) is arr)
    f32, f64 = np.float32, np.float64
    f32_arr = np.zeros((1,), dtype=f32)
    i16_arr = np.zeros((1,), dtype=np.int16)
    # Check float upcast (not the normal numpy scalar rule)
    # This is the normal rule - no upcast from scalar
    assert_equal((f32_arr * f64(1)).dtype, np.float32)
    assert_equal((f32_arr + f64(1)).dtype, np.float32)
    # The function does upcast though
    ret = apply_read_scaling(np.float32(0), np.float64(2))
    assert_equal(ret.dtype, np.float64)
    ret = apply_read_scaling(np.float32(0), inter=np.float64(2))
    assert_equal(ret.dtype, np.float64)
    # Check integer inf upcast
    big = f32(type_info(f32)['max'])
    # Normally this would not upcast
    assert_equal((i16_arr * big).dtype, np.float32)
    # An equivalent case is a little hard to find for the intercept
    nmant_32 = type_info(np.float32)['nmant']
    big_delta = np.float32(2**(floor_log2(big)-nmant_32))
    assert_equal((i16_arr * big_delta + big).dtype, np.float32)
    # Upcasting does occur with this routine
    assert_equal(apply_read_scaling(i16_arr, big).dtype, np.float64)
    assert_equal(apply_read_scaling(i16_arr, big_delta, big).dtype, np.float64)
    assert_equal(apply_read_scaling(np.int8(0), -1.0, 0.0).dtype, np.float32)
    assert_equal(apply_read_scaling(np.int8(0), 1e38, 0.0).dtype, np.float64)
    assert_equal(apply_read_scaling(np.int8(0), -1e38, 0.0).dtype, np.float64)
def test_is_int():
    # is int
    assert isinstance(1, int) is True
    assert isinstance(np.int(1), int) is True
    assert isinstance(np.int8(1), int) is False
    assert isinstance(np.int16(1), int) is False

    if PY3:
        assert isinstance(np.int32(1), int) is False
    elif PY2:
        assert isinstance(np.int32(1), int) is True

    assert isinstance(np.int64(1), int) is False

    # is np.int
    assert isinstance(np.int(1), np.int) is True
    assert isinstance(np.int8(1), np.int) is False
    assert isinstance(np.int16(1), np.int) is False

    if PY3:
        assert isinstance(np.int32(1), np.int) is False
    elif PY2:
        assert isinstance(np.int32(1), np.int) is True

    assert isinstance(np.int64(1), np.int) is False
Esempio n. 18
0
 def parse_objects(self, data):
     x = y = dx = dy = count = 0
     addr = None
     objects = []
     data = np.array(data, dtype=np.uint8)
     last = len(data)
     index = 0
     while index < last:
         c = data[index]
         log.debug("index=%d, command=%x" % (index, c))
         self.pick_index += 1
         index += 1
         command = None
         if c < 0xfb:
             if addr is not None:
                 obj = self.get_object(self.pick_index, x, y, c, dx, dy, addr)
                 objects.append(obj)
         elif c >= 0xfc and c <= 0xfe:
             arg1 = data[index]
             arg2 = data[index + 1]
             index += 2
             if c == 0xfc:
                 addr = arg2 * 256 + arg1
             elif c == 0xfd:
                 x = int(arg1)
                 y = int(arg2)
             else:
                 dx = int(np.int8(arg1))  # signed!
                 dy = int(np.int8(arg2))
         elif c == 0xff:
             last = 0  # force the end
     return objects
def moveObject(initial_depthMAT):
    ###########################################################################
    ## Checking each pixel and invoking functions to process pixels on the vessels.
    ###########################################################################
    global done,startx,starty,clock,screen,endx,endy
    global im
    global endpoint
    global current_depth
    global depthMAT
    global rough_range
    global modified_depthMAT,mask_depthMAT
    
    if np.amax(im)!=1:
        ret,im = cv2.threshold(im, 250, 1, cv2.THRESH_BINARY)
    else:
        print np.amax(im)
        np.int8(im)
    mask_depthMAT=initial_depthMAT
    modified_depthMAT=initial_depthMAT
    depthMAT=getDepth(im,modified_depthMAT)
    rough_range=np.median(depthMAT[depthMAT>0])/LEVEL
    im0=np.copy(im)

    convalue_n=Neigh_Cov(im0)

    cols=im.shape[1]
    rows=im.shape[0]

    for y in range(0,rows,1): 
        for x in range(0,cols,1):
            if im[y][x]==1:
                fillPixel(x,y,convalue_n)

    return convalue_n
def xeng_in_unpack(oob, start_index):
    sum_polQ_r = 0
    sum_polQ_i = 0
    sum_polI_r = 0
    sum_polI_i = 0
    rcvd_errs = 0
    flag_errs = 0
    #average the packet contents from the very first entry
    for slice_index in range(c.config['xeng_acc_len']):
        abs_index = start_index + slice_index
        polQ_r = (oob[abs_index]['data'] & ((2**(16)) - (2**(12))))>>(12)
        polQ_i = (oob[abs_index]['data'] & ((2**(12)) - (2**(8))))>>(8)
        polI_r = (oob[abs_index]['data'] & ((2**(8)) - (2**(4))))>>(4)
        polI_i = (oob[abs_index]['data'] & ((2**(4)) - (2**(0))))>>0

        #square each number and then sum it
        sum_polQ_r += (float(((numpy.int8(polQ_r << 4)>> 4)))/(2**binary_point))**2
        sum_polQ_i += (float(((numpy.int8(polQ_i << 4)>> 4)))/(2**binary_point))**2
        sum_polI_r += (float(((numpy.int8(polI_r << 4)>> 4)))/(2**binary_point))**2
        sum_polI_i += (float(((numpy.int8(polI_i << 4)>> 4)))/(2**binary_point))**2

        if not oob[abs_index]['received']: rcvd_errs += 1
        if oob[abs_index]['flag']: flag_errs += 1

    num_accs = c.config['xeng_acc_len']

    level_polQ_r = numpy.sqrt(float(sum_polQ_r)/ num_accs)
    level_polQ_i = numpy.sqrt(float(sum_polQ_i)/ num_accs)
    level_polI_r = numpy.sqrt(float(sum_polI_r)/ num_accs)
    level_polI_i = numpy.sqrt(float(sum_polI_i)/ num_accs)

    rms_polQ = numpy.sqrt(((level_polQ_r)**2)  +  ((level_polQ_i)**2))
    rms_polI = numpy.sqrt(((level_polI_r)**2)  +  ((level_polI_i)**2))

    if level_polQ_r < 1.0/(2**num_bits):
        ave_bits_used_Q_r = 0
    else:
        ave_bits_used_Q_r = numpy.log2(level_polQ_r*(2**binary_point))
    if level_polQ_i < 1.0/(2**num_bits):
        ave_bits_used_Q_i = 0
    else:
        ave_bits_used_Q_i = numpy.log2(level_polQ_i*(2**binary_point))
    if level_polI_r < 1.0/(2**num_bits):
        ave_bits_used_I_r = 0
    else:
        ave_bits_used_I_r = numpy.log2(level_polI_r*(2**binary_point))
    if level_polI_i < 1.0/(2**num_bits):
        ave_bits_used_I_i = 0
    else:
        ave_bits_used_I_i = numpy.log2(level_polI_i*(2**binary_point))

    return {'rms_polQ':rms_polQ,\
            'rms_polI':rms_polI,\
            'rcvd_errs':rcvd_errs,\
            'flag_errs':flag_errs,\
            'ave_bits_used_Q_r':ave_bits_used_Q_r,\
            'ave_bits_used_Q_i':ave_bits_used_Q_i,\
            'ave_bits_used_I_r':ave_bits_used_I_r,\
            'ave_bits_used_I_i':ave_bits_used_I_i}
Esempio n. 21
0
def main_boost():
    train_ratio = 0.5
    src = Searcher('nail_image500.db')
    # get histogram
    HSV_hist = np.loadtxt(HSV_fname) # fHSVのヒストグラムを読み込む
    
    # normalize HSV hist 
    HSV_max = np.max(HSV_hist, axis = 1)
    HSV_min = np.min(HSV_hist, axis = 1)
    HSV_rang = (HSV_max -HSV_min).reshape((len(HSV_max), 1))
    HSV_hist_norm = [(HSV_hist[i] - HSV_min[i])/HSV_rang[i] for i in xrange(len(HSV_rang))]
    
    samples = np.array( src.get_hist() , dtype = np.float32)

    # normalize BoW(samples) hist
    smp_max = np.max(samples, axis = 1)
    smp_min = np.min(samples, axis = 1)
    smp_rang = (smp_max - smp_min).reshape((len(smp_max), 1))
    smp_norm = [(samples[i] - smp_min[i])/smp_rang[i] for i in xrange(len(smp_rang))]

    samples = np.hstack((smp_norm, HSV_hist_norm))
    #samples = np.hstack((samples, HSV_hist))
    
    # get labels(object)
    responses = np.int8(next(csv.reader(file(csvfile, 'r'), delimiter=',')))
    responses[responses==-1] = 0

    # get HSV area flag()
    hsv_flag = np.int8(next(csv.reader(file(hsv_flag_fname, 'r'), delimiter=',')))
    
    # get sample indexes
    train_idx = random.sample( range(len(samples)), int(len(samples)*train_ratio) )  
    test_idx = range(len(samples)) # initialize test index
    for t in train_idx: test_idx.remove(t) # remove train index. and create test index.
    
    # implement Model
    boost = Boost()
    # create model
    boost.train(samples[train_idx,:], responses[train_idx])
    
    boost.save('test_boost')
    # predict use created model
    print "raw",boost.predict(samples[test_idx])
    print "test_rst",responses[test_idx]
    print "hsv_flag", hsv_flag[test_idx]
    train_rate = np.mean(boost.predict(samples[train_idx]) == responses[train_idx])
    train_res = boost.predict(samples[test_idx])
    test_rate = np.mean(train_res == responses[test_idx])
    
    # テストの結果をhsvによって補正する
    revision = deepcopy(train_res)
    revision[ hsv_flag[test_idx]==1 ] = 1
    res_rate = np.mean(revision == responses[test_idx])
    
    print "revision", revision
    
    print "train",train_rate
    print "test",test_rate
    print "res_rate",res_rate
Esempio n. 22
0
def read_serial_thread():
    # all global variables this function can modify:
    global IR_0, IR_1, IR_2, IR_3, IR_4, IR_Yaw_right, IR_Yaw_left, Yaw, p_part, alpha, Kp, Kd, AUTO_STATE, manual_state, mode, blue_percentage

    while 1:
        no_of_bytes_waiting = serial_port.inWaiting()
        if no_of_bytes_waiting > 19: # the ardu sends 20 bytes at the time (18 data, 2 control)
            # read the first byte (read 1 byte): (ord: gives the actual value of the byte)
            first_byte = np.uint8(ord(serial_port.read(size = 1))) 
            
            # read all data bytes if first byte was the start byte:
            if first_byte == 100:
                serial_data = []
                # read all data bytes:
                for counter in range(18): # 18 data bytes is sent from the ardu
                    serial_data.append(ord(serial_port.read(size = 1)))
                
                # read the received checksum:
                checksum = np.uint8(ord(serial_port.read(size = 1)))
                
                # calculate checksum for the received data bytes: (pleae note that the use of uint8 and int8 exactly match what is sent from the arduino)
                calc_checksum = np.uint8(np.uint8(serial_data[0]) + np.uint8(serial_data[1]) + 
                    np.uint8(serial_data[2]) + np.uint8(serial_data[3]) + np.uint8(serial_data[4]) + 
                    np.int8(serial_data[5]) + np.int8(serial_data[6]) + np.int8(serial_data[7]) + 
                    np.int8(serial_data[8]) + np.int8(serial_data[9]) + np.int8(serial_data[10]) + 
                    np.uint8(serial_data[11]) + np.uint8(serial_data[12]) + np.uint8(serial_data[13]) + 
                    np.uint8(serial_data[14]) + np.uint8(serial_data[15]) + np.uint8(serial_data[16]) + np.uint8(serial_data[17]))

                # update the variables with the read serial data only if the checksums match:
                if calc_checksum == checksum:
                    IR_0 = int(np.uint8(serial_data[0])) # (int() is needed to convert it to something that can be sent to the webpage) 
                    IR_1 = int(np.uint8(serial_data[1]))
                    IR_2 = int(np.uint8(serial_data[2]))
                    IR_3 = int(np.uint8(serial_data[3]))
                    IR_4 = int(np.uint8(serial_data[4]))
                    IR_Yaw_right = int(np.int8(serial_data[5])) # IR_Yaw_right was sent as an int8_t, but in order to make python treat it as one we first need to tell it so explicitly with the help of numpy, before converting the result (a number between -128 and +127) to the corresponding python int 
                    IR_Yaw_left = int(np.int8(serial_data[6]))
                    Yaw = int(np.int8(serial_data[7]))
                    p_part = int(np.int8(serial_data[8]))
                    alpha_low_byte = np.uint8(serial_data[9])
                    alpha_high_byte = np.uint8(serial_data[10]) # yes, we need to first treat both the low and high bytes as uint8:s, try it by hand and a simple example (try sending -1)
                    alpha = int(np.int16(alpha_low_byte + alpha_high_byte*256)) # (mult with 256 corresponds to a 8 bit left shift)
                    Kp = int(np.uint8(serial_data[11]))
                    Kd_low_byte = np.uint8(serial_data[12])
                    Kd_high_byte = np.uint8(serial_data[13])
                    Kd = int(Kd_low_byte + Kd_high_byte*256)
                    AUTO_STATE = auto_states[int(np.uint8(serial_data[14]))] # look up the received integer in the auto_states dict
                    manual_state = manual_states[int(np.uint8(serial_data[15]))]
                    mode = mode_states[int(np.uint8(serial_data[16]))]
                    blue_percentage = int(np.uint8(serial_data[17]))
                else: # if the checksums doesn't match: something weird has happened during transmission: flush input buffer and start over
                    serial_port.flushInput()
                    print("Something went wrong in the transaction: checksums didn't match!")                      
            else: # if first byte isn't the start byte: we're not in sync: just read the next byte until we get in sync (until we reach the start byte)
                pass
        else: # if not enough bytes for entire transmission, just wait for more data:
            pass

        time.sleep(0.025) # Delay for ~40 Hz loop frequency (faster than the sending frequency)
Esempio n. 23
0
 def __call__(self, data):
     ts = data[self.key]
     date = datetime.datetime.utcfromtimestamp(ts)
     yearweek = date.isocalendar()[1] - 1
     info = (numpy.int8(51 if yearweek == 52 else yearweek),
             numpy.int8(date.weekday()),
             numpy.int8(date.hour * 4 + date.minute / 15))
     return info
Esempio n. 24
0
def feng_unpack(f, hdr_index, pkt_len):
    sum_polQ_r = 0
    sum_polQ_i = 0
    sum_polI_r = 0
    sum_polI_i = 0

    #average the packet contents from the very first entry
    for pkt_index in range(0, pkt_len):
        pkt_64bit = snap_data[f]['data'][pkt_index].data

        for offset in range(0,64,16):
            polQ_r = (pkt_64bit & ((2**(offset+16)) - (2**(offset+12))))>>(offset+12)
            polQ_i = (pkt_64bit & ((2**(offset+12)) - (2**(offset+8))))>>(offset+8)
            polI_r = (pkt_64bit & ((2**(offset+8)) - (2**(offset+4))))>>(offset+4)
            polI_i = (pkt_64bit & ((2**(offset+4)) - (2**(offset))))>>offset

            #square each number and then sum it
            sum_polQ_r += (float(((numpy.int8(polQ_r << 4)>> 4)))/(2**binary_point))**2
            sum_polQ_i += (float(((numpy.int8(polQ_i << 4)>> 4)))/(2**binary_point))**2
            sum_polI_r += (float(((numpy.int8(polI_r << 4)>> 4)))/(2**binary_point))**2
            sum_polI_i += (float(((numpy.int8(polI_i << 4)>> 4)))/(2**binary_point))**2

    num_accs = (pkt_len-1)*(64/16)

    level_polQ_r = numpy.sqrt(float(sum_polQ_r)/ num_accs)
    level_polQ_i = numpy.sqrt(float(sum_polQ_i)/ num_accs)
    level_polI_r = numpy.sqrt(float(sum_polI_r)/ num_accs)
    level_polI_i = numpy.sqrt(float(sum_polI_i)/ num_accs)

    rms_polQ = numpy.sqrt(((level_polQ_r)**2)  +  ((level_polQ_i)**2))
    rms_polI = numpy.sqrt(((level_polI_r)**2)  +  ((level_polI_i)**2))

    if level_polQ_r < 1.0/(2**num_bits):
        ave_bits_used_Q_r = 0
    else:
        ave_bits_used_Q_r = numpy.log2(level_polQ_r*(2**num_bits))

    if level_polQ_i < 1.0/(2**num_bits):
        ave_bits_used_Q_i = 0
    else:
        ave_bits_used_Q_i = numpy.log2(level_polQ_i*(2**num_bits))

    if level_polI_r < 1.0/(2**num_bits):
        ave_bits_used_I_r = 0
    else:
        ave_bits_used_I_r = numpy.log2(level_polI_r*(2**num_bits))

    if level_polI_i < 1.0/(2**num_bits):
        ave_bits_used_I_i = 0
    else:
        ave_bits_used_I_i = numpy.log2(level_polI_i*(2**num_bits))

    return {'rms_polQ':rms_polQ,\
            'rms_polI':rms_polI,\
            'ave_bits_used_Q_r':ave_bits_used_Q_r,\
            'ave_bits_used_Q_i':ave_bits_used_Q_i,\
            'ave_bits_used_I_r':ave_bits_used_I_r,\
            'ave_bits_used_I_i':ave_bits_used_I_i}
Esempio n. 25
0
 def test_byteDblArray(self):
     print "Testing double byteArray"
     arr = javaPrimativeArray.make_dbl_array('byte',2,3)
     self.assertIsInstance(arr, javaArray.get_array_type(javaPrimativeArray.get_array_type('byte')))
     self.assertIsInstance(arr.o, javabridge.JB_Object)
     arr[0][0] = np.int8(2)
     arr[1][1] = np.int8(3)
     self.assertIsInstance(arr[0][0], metaByte)
     self.assertEqual(arr[1][1],np.int8(3))
def MAP(z, M500):
#Create the two arrays we need (radial distance and temp)
    R500 = ((M500)/((4./3.)*(np.pi)*(500.)*(Rho_Crit(z))))**(1./3.)
    PNORM = (1.65e-3)*((E_FACT(z))**(8./3.))*((((hubble70)*(M500))/(3.0e14))**(2./3. + alpha_p))*((hubble70)**2.)*((8.403)/((hubble70)**(3./2.)))*(1.0e6)
    x = np.arange(0,(100.)*(6.)*(R500)/(100.), 0.01)
    q = np.zeros(len(x))
    for i in range(len(x)):
        y1= x[i]
        r = y1
        upperlim = np.sqrt(((6.)*(R500))**(2.) - (r)**(2.))
        def ARNAUD_PROJ(x1):
            return (1.)/(((((c500/R500)**2.)*((x1**2. + y1**2.)))**(gamma/2.))*((1. + (((c500/R500)**2.)*(x1**2. + y1**2.))**(alpha/2))**(index)))
        if r < (6.)*(R500):
            q[i] = scipy.integrate.romberg(ARNAUD_PROJ,0.001, upperlim, divmax=20)
        elif r >(6.)*(R500):
            break
    c = ((x)*(c500))/(R500)
    f = (y_const)*(q)*(PNORM)*(2.)*(mpc)
    r_over_r500= (c)/((c500))*(R500)
    r_arcmin =(r_over_r500)/(ANG_DIAM_DIST(z))*(180.)/(np.pi)*(60.)
    dT_uK = (f)*(1.0e6)*(2.73)
    r = r_arcmin
    t = dT_uK
#Round the radial distance array up to the nearest odd number
    MaxR = np.int8(np.ceil(np.max(r)))
    if MaxR %2 == 0:
        MaxR = MaxR +1
    else:
        MaxR = MaxR
    Dim = np.int8(np.ceil((MaxR)/(np.sqrt(2))))
#Create meshgrid of 2*MaxR by 2*MaxR by quarter units
    vects = np.linspace(0,2*Dim,2*Dim*4+1)
    x,y = np.meshgrid(vects, vects)
#Create empty 2d arrays for radial distance calculation and temp population
#with same dimesions of the meshgrid
    DistR = np.zeros((2*Dim*4+1, 2*Dim*4+1))
    T_at_R = np.zeros((2*Dim*4+1, 2*Dim*4+1))
#Populate DistR with radial distances from center to all other points
    for i in range(2*Dim*4+1):
        for j in range(2*Dim*4+1):
            DistR[i,j] = np.sqrt((x[Dim*4,Dim*4] - x[i,j])**2 +(y[Dim*4,Dim*4] - y[i,j])**2)
#Populate T_at_R with temp values corresponding to radial distances
    R = r
    T = t

    interpol = interp1d(R,T, kind='cubic', bounds_error=False, fill_value=0) 

    for i in range(2*Dim*4+1):
        for j in range(2*Dim*4+1):
            T_at_R[i,j] = interpol(DistR[i,j])

    plt.imshow(T_at_R)
    #for i in range(2*Dim*4+1):
        #for j in range(2*Dim*4+1):
            #if T_at_R[i,j] <=np.min(t):
                #T_at_R[i,j] = 0
    return DistR, T_at_R[27]
def xaui_feng_unpack(xeng, xaui_port, bram_dump, hdr_index, pkt_len, skip_indices):
    '''
    Unpack F-engine data?
    '''
    pkt_64bit = struct.unpack('>Q',bram_dmp['bram_msb'][(4 * hdr_index) : (4 * hdr_index) + 4] + bram_dmp['bram_lsb'][(4 * hdr_index):(4 * hdr_index) + 4])[0]
    pkt_mcnt =(pkt_64bit&((2**64) - (2**16))) >> 16
    pkt_ant  = xeng*c.config['n_xaui_ports_per_xfpga'] * c.config['n_ants_per_xaui'] + xaui_port * c.config['n_ants_per_xaui'] + pkt_64bit & ((2**16)-1)
    pkt_freq = pkt_mcnt % n_chans
    sum_polQ_r = 0
    sum_polQ_i = 0
    sum_polI_r = 0
    sum_polI_i = 0

    # average the packet contents - ignore first entry (header)
    for pkt_index in range(1, pkt_len):
        abs_index = hdr_index + pkt_index

        if skip_indices.count(abs_index)>0:
            print 'Skipped %i' % abs_index
            continue

        pkt_64bit = struct.unpack('>Q',bram_dmp['bram_msb'][(4 * abs_index):(4 * abs_index) + 4] + bram_dmp['bram_lsb'][(4 * abs_index):(4 * abs_index) + 4])[0]

        for offset in range(64, 0, -16):
            polQ_r = (pkt_64bit & ((2**(offset + 16)) - (2**(offset + 12)))) >> (offset + 12)
            polQ_i = (pkt_64bit & ((2**(offset + 12)) - (2**(offset + 8))))  >> (offset + 8)
            polI_r = (pkt_64bit & ((2**(offset + 8))  - (2**(offset + 4))))  >> (offset + 4)
            polI_i = (pkt_64bit & ((2**(offset + 4))  - (2**(offset))))      >>  offset

            # square each number and then sum it
            sum_polQ_r += (float(((numpy.int8(polQ_r << 4)>> 4)))/(2**binary_point))**2
            sum_polQ_i += (float(((numpy.int8(polQ_i << 4)>> 4)))/(2**binary_point))**2
            sum_polI_r += (float(((numpy.int8(polI_r << 4)>> 4)))/(2**binary_point))**2
            sum_polI_i += (float(((numpy.int8(polI_i << 4)>> 4)))/(2**binary_point))**2

        # print 'Processed %i. Sum Qr now %f...'%(abs_index, sum_polQ_r)

    num_accs = (pkt_len - len(skip_indices)) * (64 / 16)

    level_polQ_r = numpy.sqrt(float(sum_polQ_r) / num_accs)
    level_polQ_i = numpy.sqrt(float(sum_polQ_i) / num_accs)
    level_polI_r = numpy.sqrt(float(sum_polI_r) / num_accs)
    level_polI_i = numpy.sqrt(float(sum_polI_i) / num_accs)

    rms_polQ = numpy.sqrt(((level_polQ_r)**2) + ((level_polQ_i)**2))
    rms_polI = numpy.sqrt(((level_polI_r)**2) + ((level_polI_i)**2))

    return {'pkt_mcnt': pkt_mcnt,\
            'pkt_ant': pkt_ant,\
            'pkt_freq': pkt_freq,\
            'rms_polQ': rms_polQ,\
            'rms_polI': rms_polI,\
            'level_polQ_r': level_polQ_r,\
            'level_polQ_i': level_polQ_i,\
            'level_polI_r': level_polI_r,\
            'level_polI_i': level_polI_i}
Esempio n. 28
0
def conv_ofc(a,b):
    la = np.int8(len(a)) # dimensão do vetor de entrada
    lb = np.int8(len(b)) # dimensão vetor de resposta ao impulso    
    ly = la + lb - 1 # dimensão do vetor da resposta do sistema
    y = np.zeros(ly,dtype = np.int8) # preenche o vetor com zeros

    for n in np.arange(0, la) : # n vairia no intervalo [0,lx]
        for k in np.arange(0, lb) : # k vairia no intervalo [0,lh]
            y[n + k] += a[n] * b[k] # calcula a convolução entre x[n] e  h[n]
    return y
def VD(pre: np.array, label: np.array):
    '''
    相对体积误差
    :param pre:分割图像
    :param label:标签
    :return:误差率,float
    '''
    pre_mask = np.int8(pre > 0)
    label_mask = np.int8(label > 0)
    return (np.sum(pre_mask) - np.sum(label_mask)) * 1.0 / np.sum(label_mask)
Esempio n. 30
0
def conv_int8( value ):
    try:
        if( len(value) == 0 ):
            rval = imiss
        else:
            rval = int( value )
        assert np.int8( rval ) == np.int32( rval ) , " conv_int8: value out of range"
        return np.int8( rval )
    except:
        return imiss
Esempio n. 31
0
 def _init_quantized_weight(self, _, arr):
     _arr = random.randint(-127, 127, dtype='int32').asnumpy()
     arr[:] = np.int8(_arr)
Esempio n. 32
0
    def apply_augmentation(self, example, is_train, output_size):

        im = cv.imread(example[0]['image'], 1)
        height, width = im.shape[:2]

        crop_pos = [width // 2, height // 2]
        max_d = max(height, width)

        scales = [output_size / float(max_d), output_size / float(max_d)]
        centers = []
        corners = []

        m = np.zeros((height, width))
        for j in example:
            if j['iscrowd']:
                rle = mask.frPyObjects(j['mask'], height, width)
                m += mask.decode(rle)
        m = m < 0.5
        m = m.astype(np.float32)

        for ann in example:
            if ann['iscrowd']:
                continue
            poly = ann['mask']
            if len(poly[0]) < 25:
                continue
            N = len(poly[0])
            X = poly[0][0:N:2]
            Y = poly[0][1:N:2]
            c = np.ones((3, 1))
            x1, y1 = ann['bbox'][0], ann['bbox'][1]
            w, h = ann['bbox'][2], ann['bbox'][3]
            c[0], c[1] = x1 + w / 2, y1 + h / 2
            centers.append(c)
            corner = np.ones((3, len(X)))
            corner[0, :] = X
            corner[1, :] = Y
            corners.append(corner)

        param = {'rot': 0,
                 'scale': scales[0],  # scale,
                 'flip': 0,
                 'tx': 0,
                 'ty': 0}

        if is_train:
            np.random.seed()
            param['scale'] = scales[0] * (np.random.random() + .5)  # scale * (np.random.random() + .5)
            param['flip'] = np.random.binomial(1, 0.5)
            param['rot'] = (np.random.random() * (40 * 0.0174532)) - 20 * 0.0174532
            param['tx'] = np.int8((np.random.random() * 10) - 5)
            param['ty'] = np.int8((np.random.random() * 10) - 5)

        a = param['scale'] * np.cos(param['rot'])
        b = param['scale'] * np.sin(param['rot'])

        shift_to_upper_left = np.identity(3)
        shift_to_center = np.identity(3)

        t = np.identity(3)
        t[0][0] = a
        if param['flip']:
            t[0][0] = -a

        t[0][1] = -b
        t[1][0] = b
        t[1][1] = a

        shift_to_upper_left[0][2] = -crop_pos[0] + param['tx']
        shift_to_upper_left[1][2] = -crop_pos[1] + param['ty']
        shift_to_center[0][2] = output_size / 2
        shift_to_center[1][2] = output_size / 2
        t_form = np.matmul(t, shift_to_upper_left)
        t_form = np.matmul(shift_to_center, t_form)

        centers_warped = []
        corners_warped = []

        im_cv = cv.warpAffine(im, t_form[0:2, :], (output_size, output_size))

        for i, c in enumerate(centers):
            ct = np.matmul(t_form, c)
            cr = np.matmul(t_form, corners[i])
            cr[0, :] *= cr[0, :] > -1
            cr[0, :] *= cr[0, :] < output_size
            cr[1, :] *= cr[1, :] > 0
            cr[1, :] *= cr[1, :] < output_size

            x1, x2 = min(cr[0, :]), max(cr[0, :])
            y1, y2 = min(cr[1, :]), max(cr[1, :])
            if x1 == -1 or y1 == -1 or x2 >= output_size or y2 >= output_size:
                print("prob")
            w = np.abs(x1 - x2)
            h = np.abs(y1 - y2)
            m_dim = np.maximum(w, h)
            if m_dim == 0:
                continue
            corners_warped.append(np.log(m_dim))
            centers_warped.append(ct)

        mw = cv.warpAffine(m*255, t_form[0:2, :], (output_size, output_size))/255
        mw = cv.resize(mw, (self.heatmapres, self.heatmapres))
        mw = (mw > 0.5).astype(np.float32)
        hms = self.generate_heatmaps(centers_warped)
        votes, masks = self.generate_offsets(centers_warped, corners_warped)

        img = cv.cvtColor(im_cv, cv.COLOR_BGR2RGB)
        img = torch.from_numpy(img).float()
        img = torch.transpose(img, 1, 2)
        img = torch.transpose(img, 0, 1)
        img /= 255
        hms = torch.from_numpy(hms).float()
        mw = torch.from_numpy(mw)
        mw = mw.view(1, self.heatmapres, self.heatmapres)

        return img, hms, mw, votes, masks
Esempio n. 33
0
    QDOUBLE: 'd',
    QSTRING: 's',
    QSYMBOL: 'S',
    QCHAR: 'b',
    QMONTH: 'i',
    QDATE: 'i',
    QDATETIME: 'd',
    QMINUTE: 'i',
    QSECOND: 'i',
    QTIME: 'i',
    QTIMESPAN: 'q',
    QTIMESTAMP: 'q',
}

# null definitions
_QNULL1 = numpy.int8(-2**7)
_QNULL2 = numpy.int16(-2**15)
_QNULL4 = numpy.int32(-2**31)
_QNULL8 = numpy.int64(-2**63)
_QNAN32 = numpy.frombuffer(b'\x00\x00\xc0\x7f', dtype=numpy.float32)[0]
_QNAN64 = numpy.frombuffer(b'\x00\x00\x00\x00\x00\x00\xf8\x7f',
                           dtype=numpy.float64)[0]
_QNULL_BOOL = numpy.bool_(False)
_QNULL_SYM = numpy.string_('')
_QNULL_GUID = uuid.UUID('00000000-0000-0000-0000-000000000000')

QNULLMAP = {
    QGUID: ('0Ng', _QNULL_GUID, lambda v: v == _QNULL_GUID),
    QBOOL: ('0b', _QNULL_BOOL, lambda v: v == numpy.bool_(False)),
    QBYTE: ('0x00', _QNULL1, lambda v: v == _QNULL1),
    QSHORT: ('0Nh', _QNULL2, lambda v: v == _QNULL2),
folder = '/home/mateus/Downloads/iCloud_Photos/'
filename = 'imagem8.jpeg'
analise = cv2.imread(os.path.join(folder, filename))

analise_data = analise.reshape(
    [analise.shape[0] * analise.shape[1], analise.shape[2]])

#%%

random_forest = RandomForestClassifier(max_depth=12, random_state=0)

random_forest.fit(data[:, 0:3], data[:, 3])

#%%

result = np.int8(random_forest.predict(analise_data))

#%%

codebook = np.linspace(0, 1, n_labels)

#codebook = shuffle(colors, random_state=0)[:n_labels]

w, h = analise.shape[0:2]
image_class = np.zeros([w, h])
label_idx = 0
for i in range(w):
    for j in range(h):
        image_class[i, j] = codebook[result[label_idx]]
        label_idx += 1
Esempio n. 35
0
    def calculate_sim_v_list(self, user, users_area, score_thr,
                             items_types_weights_list):
        """ To calculate user's similarity vectors for each items_types_weights """
        # Validation
        if user < 0 or user >= self._USERS_SIZE:
            raise Exception(
                'SimVCalculator: calculate_sim_v_list: user is incorrect')
        if np.any(users_area < 0) or np.any(users_area >= self._USERS_SIZE):
            raise Exception(
                'SimVCalculator: calculate_sim_v_list: users_area contains incorrect user'
            )
        if score_thr <= np.int8(0) or score_thr > np.int8(100):
            raise Exception(
                'SimVCalculator: calculate_sim_v_list: score_thr should be in (0, 100]'
            )
        if items_types_weights_list.shape[0] < 0:
            raise Exception(
                'SimVCalculator: calculate_sim_v_list: empty items_types_weights_list'
            )
        if items_types_weights_list.shape[1] != 4:
            raise Exception(
                'SimVCalculator: calculate_sim_v_list: incorrect items_types_weights_list'
            )
        if np.any(items_types_weights_list < np.float32(0)):
            raise Exception(
                'SimVCalculator: calculate_sim_v_list: items_types_weights_list contains weight < 0 (it should be >= 0)'
            )

        # Get user's corresponding items and scores
        items_1, scores_1 = self._scores_data.get_user_data(user=user)
        # Create empty NumPy Array for similarity vectors' list
        sim_v_list = np.empty(shape=(items_types_weights_list.shape[0],
                                     users_area.shape[0]),
                              dtype=np.float32)
        # For each user_2 in users' area
        for users_area_index in np.arange(users_area.shape[0]):
            user_2 = users_area[users_area_index]
            # Get user_2's corresponding items and scores
            items_2, scores_2 = self._scores_data.get_user_data(user=user_2)
            # To find items, which are scored by user and user_2
            intersect1d, comm1, comm2 = np.intersect1d(items_1,
                                                       items_2,
                                                       assume_unique=False,
                                                       return_indices=True)
            cmp_v = get_p_v(scores=scores_1[comm1],
                            score_thr=score_thr) * get_p_v(
                                scores=scores_2[comm2], score_thr=score_thr)
            # For each items_types_weights in items_types_weights_list
            for items_types_weights_index in np.arange(
                    items_types_weights_list.shape[0]):
                items_types_weights = items_types_weights_list[
                    items_types_weights_index]
                p_weighted_scalar = np.sum(
                    cmp_v *
                    get_items_weights(items=intersect1d,
                                      items_types=self._items_types,
                                      items_types_weights=items_types_weights))
                p_weighted_mod_1 = get_p_weighted_mod(
                    items=items_1,
                    items_types=self._items_types,
                    items_types_weights=items_types_weights)
                p_weighted_mod_2 = get_p_weighted_mod(
                    items=items_2,
                    items_types=self._items_types,
                    items_types_weights=items_types_weights)
                sim_v_list[items_types_weights_index,
                           users_area_index] = p_weighted_scalar / (
                               p_weighted_mod_1 * p_weighted_mod_2)
        return sim_v_list
Esempio n. 36
0
def main():
    args = docopt("""
    Usage:
      {} [options]

    Options:
      --unit UNIT  [default: 100]
      --out-unit OUT_UNIT  [default: 10]
      --font FONT  [default: /usr/share/fonts/truetype/takao-gothic/TakaoExGothic.ttf]
      --batch-size BATCH_SIZE  [default: 100]
      --epoch EPOCH  [default: 20]
      --out OUT  [default: result]
      --resume RESUME
      --data-size DATA_SIZE  [default: 1000]
      --test DATA_SIZE  [default: 0]
    """.format(sys.argv[0]))

    unit = int(args['--unit'])
    out_unit = int(args['--out-unit'])
    batch_size = int(args['--batch-size'])
    epoch = int(args['--epoch'])
    out = args['--out']
    resume = args['--resume']
    data_size = int(args['--data-size'])
    test_size = int(args['--test'])

    fonts = list(glob.glob('C:\Windows\Fonts/meiryo.ttc'))
    test_fonts = list(glob.glob('C:\Windows\Fonts/meiryo.ttc'))

    font_min = 8
    font_max = 12
    # create dataset
    candidates = '0123456789.'  #ABCDEFGHIJKLMNOPQRSTUVWXYZ'
    print(candidates, len(candidates))
    out_unit = len(candidates)
    gen = DataGenerator(fonts,
                        font_min,
                        font_max,
                        number=data_size,
                        candidates=candidates,
                        displacement=1,
                        noise=False)
    test_gen = DataGenerator(test_fonts,
                             font_min,
                             font_max,
                             number=data_size,
                             candidates=candidates,
                             displacement=1,
                             noise=False)

    model = MLP(font_max**2, unit, out_unit)

    if test_size:
        chainer.serializers.load_npz('./model.npz', model)

        N = test_size
        hit = 0
        for x, y in [gen.get() for _ in range(N)]:
            v = model(np.array([x], dtype=np.float32))
            _y = np.argmax(v.data)
            print(y, _y, v.data)
            if y == _y:
                hit += 1
            img = Image.fromarray(
                np.int8(np.array([x], dtype=np.float32)).reshape((32, 32)))
            # img.show()
        print('HitRate: {}%'.format(hit / N * 100))

        return
    train_iter = chainer.iterators.SerialIterator(gen,
                                                  batch_size,
                                                  repeat=True,
                                                  shuffle=False)
    test_iter = chainer.iterators.SerialIterator(test_gen,
                                                 batch_size,
                                                 repeat=False,
                                                 shuffle=False)

    train_model(model, train_iter, test_iter, batch_size, epoch, -1, out,
                resume)
    chainer.serializers.save_npz('./model.npz', model)
Esempio n. 37
0
from functions.network.densenet_angio_perf import _densenet

import SimpleITK as sitk

eps = 1E-5
plot_tag = 'log'
# densnet_unet_config = [1, 3, 3, 3, 1]
# ct_cube_size = 255
# db_size1 = np.int32(ct_cube_size - 2)
# db_size2 = np.int32(db_size1 / 2)
# db_size3 = np.int32(db_size2 / 2)
# crop_size1 = np.int32(((db_size3 - 2) * 2 + 1.0))
# crop_size2 = np.int32((crop_size1 - 2) * 2 + 1)
in_dim = 101
in_size0 = (0)
in_size1 = np.int8(in_dim)
in_size2 = np.int8(in_size1)  # conv stack
in_size3 = np.int8((in_size2 - 2))  # level_design1
in_size4 = np.int8(in_size3 / 2 - 2)  # downsampleing1+level_design2
in_size5 = np.int8(in_size4 / 2 - 2)  # downsampleing2+level_design3
# in_size6 = int(in_size5 / 2 -2)  # downsampleing2+level_design3
crop_size0 = (0)
crop_size1 = np.int8(2 * in_size5 + 1)
crop_size2 = np.int8(2 * crop_size1 + 1)
final_layer = crop_size2
label_patchs_size = final_layer
patch_window = in_dim  # 89
#
# in_size0 = (0)
# in_size1 = (input_dim)
# in_size2 = (in_size1)  # conv stack
        else:
            E -= np.log( 1 - Y[i] )
    return E

# list for class_0 or others, class_1 or others, and class_2 or others
w_list = []
learning_rate = 0.001
l2_regularization = 0.1

#
# Logistic Regression for each class
#
for i in range(3): # there are 3 classes in wine data
    class_name = 'class_' + str(i)
    w = np.random.randn( nXb.shape[1] )
    T = np.int8( y==class_name )
    z = sigmoid( nXb.dot(w) )
    cost_hist = [ cross_entropy( T, z ) ]
    for j in range(10000):
        if j % 100 == 0:
            print( cross_entropy( T, z ) )
        w -= learning_rate * ( nXb.T.dot( z - T ) - l2_regularization*w )
        z  = sigmoid( nXb.dot(w) )
        cost_hist.append( cross_entropy( T, z ) )
    w_list.append( w )

T[ y=='class_0' ] = 0
T[ y=='class_1' ] = 1
T[ y=='class_2' ] = 2
from mpl_toolkits.mplot3d import Axes3D
fig = plt.figure()
Esempio n. 39
0
def generate_train_test(ml_method, data_version, data_process, period):
    if ml_method == 'tsc':
        training_dataset = h5py.File('data_set/{}.h5'.format(data_version),
                                     'r')
    elif ml_method == 'mlp':
        training_dataset = h5py.File('data_set/mlp_{}.h5'.format(data_version),
                                     'r')
    else:
        raise ValueError('The data version is not recognized')

    x_train = training_dataset['x_{}'.format(period)][()]
    y_train = training_dataset['y_{}'.format(period)][()]
    x_test = training_dataset['x_2019'][()]
    y_test = training_dataset['y_2019'][()]

    if 'no_flux' in data_process:
        # remove the last 5 features
        x_train = x_train[..., :-5]
        x_test = x_test[..., :-5]

    if 'fusion' in data_process:
        y_train[y_train == 7] = 2
        y_train[y_train == 9] = 2
        #y_train[y_train == 8] = 0
        y_train[y_train == 8] = 7

        y_test[y_test == 7] = np.int8(2)
        y_test[y_test == 9] = np.int8(2)
        #y_test[y_test == 8] = np.int8(0)
        y_test[y_test == 8] = np.int8(7)

    if 'shuffle' in data_process:
        x_total = np.concatenate((x_train, x_test), axis=0)
        y_total = np.concatenate((y_train, y_test), axis=0)

        x_train, x_test, y_train, y_test = train_test_split(x_total,
                                                            y_total,
                                                            test_size=0.25,
                                                            random_state=1)
        x_train, x_val, y_train, y_val = train_test_split(x_train,
                                                          y_train,
                                                          test_size=0.25,
                                                          random_state=1)

        if 'oversampling' in data_process:
            x_train_foreshock = x_train[y_train == 5]
            y_train_foreshock = y_train[y_train == 5]
            print(x_train_foreshock.shape)

            x_train = np.concatenate((x_train, x_train_foreshock))
            y_train = np.concatenate((y_train, y_train_foreshock))

            x_train, x_empty, y_train, y_empy = train_test_split(
                x_train, y_train, test_size=0.001, random_state=1)

    else:
        x_val = x_test
        y_val = y_test

    if 'minmax' in data_process:
        x_train_normalize = normalize_ts(x_train, x_train)
        x_val_normalize = normalize_ts(x_val, x_train)
        x_test_normalize = normalize_ts(x_test, x_train)

        x_train = x_train_normalize
        x_val = x_val_normalize
        x_test = x_test_normalize

    elif 'znorm' in data_process:
        x_train_normalize = standardize_ts(x_train, x_train)
        x_val_normalize = standardize_ts(x_val, x_train)
        x_test_normalize = standardize_ts(x_test, x_train)

        x_train = x_train_normalize
        x_val = x_val_normalize
        x_test = x_test_normalize

    return x_train, y_train, x_test, y_test, x_val, y_val
Esempio n. 40
0
class A:
    def __float__(self) -> float:
        return 4.0


np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")

np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")

np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")