Esempio n. 1
0
 def set_proj_plane_info(self,xsize,ysize,lonra,latra):
     if lonra is None: lonra = [-180.,180.]
     if latra is None: latra = [-90.,90.]
     if (len(lonra)!=2 or len(latra)!=2 or lonra[0]<-180. or lonra[1]>180.
         or latra[0]<-90 or latra[1]>90 or lonra[0]>=lonra[1] or latra[0]>=latra[1]):
         raise TypeError("Wrong argument lonra or latra. Must be lonra=[a,b],latra=[c,d] "
                         "with a<b, c<d, a>=-180, b<=180, c>=-90, d<=+90")
     lonra = self._flip*np.float64(lonra)[::self._flip]
     latra = np.float64(latra)
     xsize = np.long(xsize)
     if ysize is None:
         ratio = (latra[1]-latra[0])/(lonra[1]-lonra[0])
         ysize = np.long(round(ratio*xsize))
     else:
         ysize = np.long(ysize)
         ratio = float(ysize)/float(xsize)
     if max(xsize,ysize) > 2000:
         if max(xsize,ysize) == xsize:
             xsize = 2000
             ysize = np.long(round(ratio*xsize))
         else:
             ysize = 2000
             xsize = np.long(round(ysize/ratio))
     super(CartesianProj,self).set_proj_plane_info(xsize=xsize, lonra=lonra, latra=latra, 
                                                     ysize=ysize, ratio=ratio)
Esempio n. 2
0
 def set_proj_plane_info(self, xsize, ysize, lonra, latra):
     if lonra is None:
         lonra = [-180., 180.]
     else:
         # shift lonra[1] into the range [lonra[0], lonra[0]+360]
         lonra_span = np.mod(lonra[1] - lonra[0], 360)
         if lonra_span == 0:
             lonra_span = 360
         lonra[1] = lonra[0] + lonra_span
     if latra is None:
         latra = [-90., 90.]
     if (
         len(lonra) != 2
         or len(latra) != 2
         or latra[0] < -90
         or latra[1] > 90
         or latra[0] >= latra[1]
     ):
         raise TypeError(
             "Wrong argument lonra or latra. Must be lonra=[a,b],latra=[c,d] "
             "c<d, c>=-90, d<=+90"
         )
     lonra = self._flip * np.float64(lonra)[:: self._flip]
     latra = np.float64(latra)
     xsize = np.long(xsize)
     if ysize is None:
         ratio = (latra[1] - latra[0]) / (lonra[1] - lonra[0])
         ysize = np.long(round(ratio * xsize))
     else:
         ysize = np.long(ysize)
         ratio = float(ysize) / float(xsize)
     super(CartesianProj, self).set_proj_plane_info(
         xsize=xsize, lonra=lonra, latra=latra, ysize=ysize, ratio=ratio
     )
Esempio n. 3
0
def ExpandDims(inputs, axis=-1, **kwargs):
    """ExpandDims interface of NDArray.

    Parameters
    ----------
    inputs : Tensor
        The input tensor.
    axis : int
        The insert position of new dimension. Default is ``-1`` (Push Back).

    Returns
    -------
    Tensor
        The output tensor.

    Examples
    --------
    >>> a = Tensor(shape=[1, 2, 3, 4]).Variable()
    >>> print ExpandDims(a).shape

    >>> print ExpandDims(a, axis=2).shape

    """
    CheckInputs(inputs, 1)
    arguments = ParseArguments(locals())

    output = Tensor.CreateOperator(nout=1, op_type='ExpandDims', **arguments)

    if inputs.shape is not None:
        output.shape = inputs.shape[:]
        if axis == -1 or axis >= len(inputs.shape):
            output.shape.append(np.long(1))
        else: output.shape.insert(axis, np.long(1))

    return output
Esempio n. 4
0
 def test_intp(self,level=rlevel):
     """Ticket #99"""
     i_width = np.int_(0).nbytes*2 - 1
     long('0x' + 'f'*i_width,16)
     #self.failUnlessRaises(OverflowError,np.intp,'0x' + 'f'*(i_width+1),16)
     #self.failUnlessRaises(ValueError,np.intp,'0x1',32)
     assert_equal(255,np.long('0xFF',16))
     assert_equal(1024,np.long(1024))
Esempio n. 5
0
    def _create_objects(self, diaobject_data):
        """
        Create a dict of diaObjects formatted according to the
        appropriate avro schema

        Parameters
        ----------
        diaobject_data is a numpy recarray containing all of the
        data needed for the diaObject

        Returns
        -------
        A dict keyed on uniqueId (the CatSim unique identifier for each
        astrophysical source).  Each value is a properly formatted
        diaObject corresponding to its key.
        """
        diaobject_dict = {}
        for i_object in range(len(diaobject_data)):
            diaobject = diaobject_data[i_object]

            avro_diaobject = {}
            avro_diaobject['flags'] = np.long(self._rng.randint(10, 1000))
            avro_diaobject['diaObjectId'] = np.long(diaobject['uniqueId'])
            avro_diaobject['ra'] = diaobject['ra']
            avro_diaobject['decl'] = diaobject['dec']

            ra_dec_cov = {}
            ra_dec_cov['raSigma'] = self._rng.random_sample()*0.001
            ra_dec_cov['declSigma'] = self._rng.random_sample()*0.001
            ra_dec_cov['ra_decl_Cov'] = self._rng.random_sample()*0.001

            avro_diaobject['ra_decl_Cov'] = ra_dec_cov
            avro_diaobject['radecTai'] = diaobject['TAI']

            avro_diaobject['pmRa'] = diaobject['pmRA']
            avro_diaobject['pmDecl'] = diaobject['pmDec']
            avro_diaobject['parallax'] = diaobject['parallax']
            pm_parallax_cov = {}

            for field in ('pmRaSigma', 'pmDeclSigma', 'parallaxSigma', 'pmRa_pmDecl_Cov',
                          'pmRa_parallax_Cov', 'pmDecl_parallax_Cov'):
                pm_parallax_cov[field] = 0.0

            avro_diaobject['pm_parallax_Cov'] = pm_parallax_cov

            avro_diaobject['pmParallaxLnL'] = self._rng.random_sample()
            avro_diaobject['pmParallaxChi2'] = self._rng.random_sample()
            avro_diaobject['pmParallaxNdata'] = 0

            diaobject_dict[diaobject['uniqueId']] = avro_diaobject
        return diaobject_dict
Esempio n. 6
0
 def run(self):
     # Run until turned off
     while 1:
         if self.on():
             # Read bytes in chunks of meaningful size
             if self.ser.inWaiting() > 3:
                 bytes = bytearray(3)
                 self.ser.readinto(bytes)
                 height = (bytes[0] << 16) + (bytes[1] << 8) + bytes[2]
                     
                 # convert to signed long
                 if (height >= 0x800000): # = 2^23
                     height = height - 0x1000000 # = 2^24 
                 
                 height = np.long(height)
                 sampleLock.acquire()
                 samples[self.pos, self.channel] = height
                 sampleLock.release()
                 
                 # Update array indices
                 self.channel += 1
                 if self.channel == channels:
                     self.channel = 0
                     self.pos += 1
                     if self.pos == BUF_LEN:
                         self.pos = 0
Esempio n. 7
0
    def detectionOutput_fprop(self, conf_view, loc_view, detection, prior_boxes,
                              proposals, nms_top_k, image_top_k, score_threshold, nms_threshold):
        conf = c_longlong(conf_view._tensor.ctypes.data)
        loc = c_longlong(loc_view._tensor.ctypes.data)
        detection = c_longlong(detection._tensor.ctypes.data)
        prior_boxes = c_longlong(prior_boxes._tensor.ctypes.data)
        L, num_class, bs = conf_view.shape
        proposals = c_longlong(proposals._tensor.ctypes.data)
        result = np.zeros((bs, image_top_k, 6), dtype=np.float32)
        result_ptr = c_longlong(result.ctypes.data)
        result_len = np.zeros(bs, dtype=np.int64)
        result_len_ptr = c_longlong(result_len.ctypes.data)

        self.mklEngine.detection_fprop(conf, loc, result_ptr, prior_boxes,
                                       result_len_ptr, c_longlong(L), c_longlong(num_class),
                                       c_longlong(bs), c_longlong(nms_top_k),
                                       c_longlong(image_top_k),
                                       c_float(score_threshold),
                                       c_float(nms_threshold))
        batch_all_detections = [None] * self.bsz
        for i in range(bs):
            leng = np.long(result_len[i])
            res_batch = np.zeros((leng, 6))
            res_batch[:] = result[i, 0:leng, :]
            batch_all_detections[i] = res_batch
        return batch_all_detections
Esempio n. 8
0
def produce_regions(masks, visualize=False):
    """given the proposal segmentation masks for an image as a [width, height, proposal_num]
    matrix outputs all regions in the image"""
    width, height, n_prop = masks.shape
    t = ('u8,'*int(np.math.ceil(float(n_prop) / 64)))[:-1]
    bv = np.zeros((width, height), dtype=np.dtype(t))
        
    for i in range(n_prop):
        m = masks[:, :, i]
        a = 'f%d' % (i / 64)    
        h = m * np.long(2 ** (i % 64))
        if n_prop >= 64:
            bv[a] += h
        else:
            bv += h


    un = np.unique(bv)
    regions = np.zeros((width, height), dtype="uint16")
    for i, e in enumerate(un):
        regions[bv == e] = i
    if visualize:
        plt.figure()
        plt.imshow(regions)
        plt.set_cmap('prism')
        plt.colorbar()
    return regions
Esempio n. 9
0
    def _get_window_sub(self, window='None'):
        """Returns the window time series and amplitude normalization term

        :param window: window string
        :return: w, amplitude_norm
        """
        window = window.split(':')

        if window[0] in ['Hamming', 'Hann']:
            w = np.hanning(self.samples)
        elif window[0] == 'Force':
            w = np.zeros(self.samples)
            force_window = float(window[1])
            to1 = np.long(force_window * self.samples)
            w[:to1] = 1.
        elif window[0] == 'Exponential':
            w = np.arange(self.samples)
            exponential_window = float(window[1])
            w = np.exp(np.log(exponential_window) * w / (self.samples - 1))
        else:  # window = 'None'
            w = np.ones(self.samples)


        if window[0] == 'Force':
            amplitude_norm = 2 / len(w)
        else:
            amplitude_norm = 2 / np.sum(w)

        return w, amplitude_norm
def order_paths_by_preference(sorts_list,id_list,paths_list,file_type_list,data_node_list,check_dimensions,
                                semaphores=dict(),time_var='time',session=None,remote_netcdf_kwargs=dict()):
    #FIND ORDERING:
    paths_desc = []
    for id in sorts_list:
        paths_desc.append((id,np.int64))
    for id in id_list:
        paths_desc.append((id,'a255'))
    paths_ordering = np.empty((len(paths_list),), dtype=paths_desc)

    if check_dimensions:
        dimension_type_list = ['unqueryable',]

    for file_id, file in enumerate(paths_list):
        paths_ordering['path'][file_id] = file['path'].split('|')[0]
        #Convert path name to 'unique' integer using hash.
        #The integer will not really be unique but collisions
        #should be extremely rare for similar strings with only small variations.
        paths_ordering['path_id'][file_id] = hash(
                                                paths_ordering['path'][file_id]
                                                    )
        for unique_file_id in unique_file_id_list:
            paths_ordering[unique_file_id][file_id] = file['path'].split('|')[unique_file_id_list.index(unique_file_id)+1]
        paths_ordering['version'][file_id] = np.long(file['version'][1:])

        paths_ordering['file_type'][file_id] = file['file_type']
        paths_ordering['data_node'][file_id] = remote_netcdf.get_data_node(file['path'],paths_ordering['file_type'][file_id])
        
        if check_dimensions:
            #Dimensions types. Find the different dimensions types:
            if not paths_ordering['file_type'][file_id] in queryable_file_types:
                paths_ordering['dimension_type_id'][file_id] = dimension_type_list.index('unqueryable')
            else:
                remote_data = remote_netcdf.remote_netCDF(paths_ordering['path'][file_id],
                                                        paths_ordering['file_type'][file_id],
                                                        semaphores=semaphores,
                                                        session=session,
                                                        **remote_netcdf_kwargs)
                dimension_type = remote_data.safe_handling(netcdf_utils.find_dimension_type,time_var=time_var)
                if not dimension_type in dimension_type_list: dimension_type_list.append(dimension_type)
                paths_ordering['dimension_type_id'][file_id] = dimension_type_list.index(dimension_type)

    if check_dimensions:
        #Sort by increasing number. Later when we sort, we should get a uniform type:
        dimension_type_list_number = [ sum(paths_ordering['dimension_type_id']==dimension_type_id)
                                        for dimension_type_id,dimension_type in enumerate(dimension_type_list)]
        sort_by_number = np.argsort(dimension_type_list_number)[::-1]
        paths_ordering['dimension_type_id'] = sort_by_number[paths_ordering['dimension_type_id']]

    #Sort paths from most desired to least desired:
    #First order desiredness for least to most:
    data_node_order = copy.copy(data_node_list)[::-1]#list(np.unique(paths_ordering['data_node']))
    file_type_order = copy.copy(file_type_list)[::-1]#list(np.unique(paths_ordering['file_type']))
    for file_id, file in enumerate(paths_list):
        paths_ordering['data_node_id'][file_id] = data_node_order.index(paths_ordering['data_node'][file_id])
        paths_ordering['file_type_id'][file_id] = file_type_order.index(paths_ordering['file_type'][file_id])
    #'version' is implicitly from least to most

    #sort and reverse order to get from most to least:
    return np.sort(paths_ordering,order = sorts_list)[::-1]
Esempio n. 11
0
    def run(self):
        self.pos = 0
        # This t is a global variable shared with display
        while 1:
            if self.on():
                dataLock.acquire()
                packages = len(data)
                dataLock.release()
                if packages > channels:
                    for i in range(channels):
                        # Don't read and write data simultaneously; acquire lock
                        dataLock.acquire()
                        bytes = data.popleft()
                        dataLock.release()
                        # construct y value
                        height = (bytes[0] << 16) + (bytes[1] << 8) + bytes[2]
                        
                        # convert to signed long
                        if (height >= 0x800000): # = 2^23
                            height = height - 0x1000000 # = 2^24 
                        
                        height = np.long(height)
                        sampleLock.acquire()
                        samples[self.pos,i] = height
                        sampleLock.release()

                    self.pos += 1
                    if (self.pos == BUF_LEN):
                        self.pos = 0
Esempio n. 12
0
    def run(self):
        self.pos = 0
        samples = np.ctypeslib.as_array(self.raw_samples.get_obj())
        samples = samples.reshape(self.BUF_LEN, self.channels)
        # This t is a global variable shared with display
        while self.on:
            if not self.cmds.empty():
                cmd = self.cmds.get()
                if cmd == '+':
                    self.ready()
                elif cmd == '-':
                    self.reset()
                elif cmd == "Exit!":
                    self.on = False
                else:
                    print cmd
            if not self.paused:
                for i in range(self.channels):
                    bytes = self.data.recv()
                    # construct y value
                    height = (bytes[0] << 16) + (bytes[1] << 8) + bytes[2]
                    
                    # convert to signed long
                    if (height >= 0x800000): # = 2^23
                        height = height - 0x1000000 # = 2^24 
                    
                    height = np.long(height)
                    samples[self.pos, i] = height

                self.pos += 1
                if (self.pos == self.BUF_LEN):
                    self.pos = 0
Esempio n. 13
0
def Stack(inputs, axis=0, **kwargs):
    """Stack the inputs along the given axis.

    All dimensions of inputs should be same.

    The ``axis`` can be negative.

    Parameters
    ----------
    inputs : list of Tensor
        The inputs.
    axis : int
        The axis to stack.

    Returns
    -------
    Tensor
        The output tensor.

    """
    CheckInputs(inputs, 1, INT_MAX)
    arguments = ParseArguments(locals())
    arguments['num_input'] = len(inputs)

    output = Tensor.CreateOperator(nout=1, op_type='Stack', **arguments)

    if all(input.shape is not None for input in inputs):
        while axis < 0: axis += (len(inputs[0].shape) + 1)
        output.shape = inputs[0].shape
        output.shape.insert(axis, np.long(len(inputs)))

    return output
Esempio n. 14
0
    def xml2field(self, elem, name=None):
        typElem = elem.find('datatype')
        dType = typElem.text
        dDim = typElem.attrib['length']
        dDim = np.asarray([np.long(d) for d in dDim.split()])[::-1]
        dLen = np.prod(dDim)

        if name is None:
            name = elem.attrib['name']

        valElem = elem.find('value')
        if dType == 'pointer':
            self.xml2field(valElem.find('parameter'), elem.attrib['name'])
            return

        if dType == 'struct':
            o = Xml2Py(None, valElem[0])
            setattr(self, name, o)
            return

        val = elem.find('value').text
        if dLen > 1:
            val = val.strip('[]').split(',')

        conv = {'int': np.int, 'long': np.long, 'float': np.float, 'double': np.double, 'string': lambda s: s}
        try:
            if (dLen > 1):
                val = np.asarray([conv[dType](v) for v in val]).reshape(dDim)
            else:
                val = conv[dType](val)
        except KeyError:
            print('WARNING: Unsupported data type {} in field {}! Ignoring...'.format(dType, name))
            return

        setattr(self, name, val)
Esempio n. 15
0
    def order_paths_by_preference(self):
        #FIND ORDERING:
        paths_desc=[]
        for id in self.sorts_list:
            paths_desc.append((id,np.int32))
        for id in self.id_list:
            paths_desc.append((id,'a255'))
        paths_ordering=np.empty((len(self.paths_list),), dtype=paths_desc)
        for file_id, file in enumerate(self.paths_list):
            paths_ordering['path'][file_id]=file['path'].split('|')[0]
            #Convert path name to 'unique' integer using hash.
            #The integer will not really be unique but collisions
            #should be extremely rare for similar strings with only small variations.
            paths_ordering['path_id'][file_id]=hash(
                                                    paths_ordering['path'][file_id]
                                                        )
            paths_ordering['checksum'][file_id]=file['path'].split('|')[1]
            paths_ordering['version'][file_id]=np.long(file['version'][1:])

            paths_ordering['file_type'][file_id]=file['file_type']
            paths_ordering['data_node'][file_id]=retrieval_utils.get_data_node(file['path'],paths_ordering['file_type'][file_id])

        #Sort paths from most desired to least desired:
        #First order desiredness for least to most:
        data_node_order=copy.copy(self.data_node_list)[::-1]#list(np.unique(paths_ordering['data_node']))
        file_type_order=copy.copy(self.file_type_list)[::-1]#list(np.unique(paths_ordering['file_type']))
        for file_id, file in enumerate(self.paths_list):
            paths_ordering['data_node_id'][file_id]=data_node_order.index(paths_ordering['data_node'][file_id])
            paths_ordering['file_type_id'][file_id]=file_type_order.index(paths_ordering['file_type'][file_id])
        #'version' is implicitly from least to most

        #sort and reverse order to get from most to least:
        return np.sort(paths_ordering,order=self.sorts_list)[::-1]
Esempio n. 16
0
def OneHot(inputs, depth, on_value=1, off_value=0, **kwargs):
    """Generate the one-hot representation of inputs.

    Parameters
    ----------
    inputs : Tensor
        The input tensor.
    depth : int
        The depth of one-hot representation.
    on_value : int
        The value when ``indices[j] = i``.
    off_value : int
        The value when ``indices[j] != i``.

    Returns
    -------
    Tensor
        The output tensor.

    """
    CheckInputs(inputs, 1)
    arguments = ParseArguments(locals())

    output = Tensor.CreateOperator(nout=1, op_type='OneHot', **arguments)

    if inputs.shape is not None:
        output.shape = inputs.shape[:]
        output.shape.append(np.long(depth))

    return output
Esempio n. 17
0
File: runner.py Progetto: kklmn/xrt
def start_jobs():
    """
    Restores the plots if requested and if the persistent files exist and
    starts the qt timer of the 1st plot.
    """
    for plot in _plots:
        if plot.persistentName:
            plot.restore_plots()
        plot.fig.canvas.set_window_title(plot.title)

    runCardVals.iteration = np.long(0)
    noTimer = len(_plots) == 0 or\
        (plt.get_backend().lower() in (x.lower() for x in
                                       mpl.rcsetup.non_interactive_bk))
    if noTimer:
        print("The job is running... ")
        while True:
            msg = '{0} of {1}'.format(
                runCardVals.iteration+1, runCardVals.repeats)
            if os.name == 'posix':
                sys.stdout.write("\r\x1b[K " + msg)
            else:
                sys.stdout.write("\r  ")
                print(msg+' ')
            sys.stdout.flush()
            res = dispatch_jobs()
            if res:
                return
    else:
        plot = _plots[0]
        plot.areProcessAlreadyRunning = False
        plot.timer = plot.fig.canvas.new_timer()
        plot.timer.add_callback(plot.timer_callback)
        plot.timer.start()
Esempio n. 18
0
def ref_mjd(fits_file, hdu=1):
    """Read MJDREFF+ MJDREFI or, if failed, MJDREF, from the FITS header.

    Parameters
    ----------
    fits_file : str

    Returns
    -------
    mjdref : numpy.longdouble
        the reference MJD

    Other Parameters
    ----------------
    hdu : int
    """
    import collections

    if isinstance(fits_file, collections.Iterable) and\
            not is_string(fits_file):  # pragma: no cover
        fits_file = fits_file[0]
        logging.info("opening %s" % fits_file)

    try:
        ref_mjd_int = np.long(read_header_key(fits_file, 'MJDREFI'))
        ref_mjd_float = np.longdouble(read_header_key(fits_file, 'MJDREFF'))
        ref_mjd_val = ref_mjd_int + ref_mjd_float
    except:  # pragma: no cover
        ref_mjd_val = np.longdouble(read_header_key(fits_file, 'MJDREF'))
    return ref_mjd_val
Esempio n. 19
0
    def dimshuffle(self, *args, **kwargs):
        """Shuffle the dimensions. [**Theano Style**]

        Parameters
        ----------
        dimensions : list
            The desired dimensions.

        Returns
        -------
        Tensor
            The dimshuffled output.

        """
        dimensions = list(args)
        perms = []
        for dim in dimensions:
            if dim != 'x':
                if not isinstance(dim, int):
                    raise ValueError('The type of dimension should be int.')
                perms.append(dim)

        # transpose
        output = Tensor.CreateOperator(inputs=self, nout=1,
                                       op_type='Transpose', perms=perms, **kwargs)
        if self.shape is not None:
            if len(self.shape) != len(perms):
                raise ValueError('The ndim of inputs is {}, but perms provide {}'. \
                                 format(len(self.shape), len(perms)))
            output.shape = self.shape[:]
            for i, axis in enumerate(perms):
                output.shape[i] = self.shape[axis]

        # expand dims
        for i in xrange(len(dimensions) - len(perms)):
            flag = False
            input_shape = output.shape
            axis = -1
            for idx in xrange(len(dimensions)):
                if idx >= len(perms): continue
                cur_dim = perms[idx]; exp_dim = dimensions[idx]
                if cur_dim != exp_dim:
                    axis = idx
                    output = Tensor.CreateOperator(inputs=output, nout=1,
                                    op_type='ExpandDims', axis=axis)
                    perms.insert(axis, 'x')
                    flag = True
                    break
            if not flag:
                axis = len(perms)
                output = Tensor.CreateOperator(inputs=output, nout=1,
                                    op_type='ExpandDims', axis=len(perms))
                perms.append('x')

            if self.shape is not None:
                output.shape = input_shape[:]
                output.shape.insert(axis, np.long(1))

        return output
Esempio n. 20
0
def const_rebin(x, y, factor, yerr=None, normalize=True):
    """Rebin any pair of variables.

    Might be time and counts, or freq and pds.
    Also possible to rebin the error on y.

    Parameters
    ----------
    x : array-like
    y : array-like
    factor : int
        Rebin factor
    yerr : array-like, optional
        Uncertainties of y values (it is assumed that the y are normally
        distributed)

    Returns
    -------
    new_x : array-like
        The rebinned x array
    new_y : array-like
        The rebinned y array
    new_err : array-like
        The rebinned yerr array

    Other Parameters
    ----------------
    normalize : bool
    """
    arr_dtype = y.dtype
    if factor <= 1:
        res = [x, y]
        if yerr is not None:
            res.append(yerr)
        else:
            res.append(np.zeros(len(y), dtype=arr_dtype))
        return res
    factor = np.long(factor)
    nbin = len(y)

    new_nbins = np.int(nbin / factor)

    y_resh = np.reshape(y[:new_nbins * factor], (new_nbins, factor))
    x_resh = np.reshape(x[:new_nbins * factor], (new_nbins, factor))

    new_y = np.sum(y_resh, axis=1)
    new_x = np.sum(x_resh, axis=1) / factor

    if yerr is not None:
        yerr_resh = np.reshape(yerr[:new_nbins * factor], (new_nbins, factor))
        new_yerr = np.sum(yerr_resh ** 2, axis=1)
    else:
        new_yerr = np.zeros(len(new_x), dtype=arr_dtype)

    if normalize:
        return new_x, new_y / factor, np.sqrt(new_yerr) / factor
    else:
        return new_x, new_y, np.sqrt(new_yerr)
Esempio n. 21
0
def run(data, p, m, n, b, c):
    """
    Starts the main LSH process.
    Parameters
    ----------
    zdata : RDD[Vector]
        RDD of data points. Acceptable vector types are numpy.ndarray
        or PySpark SparseVector.
    p : integer, larger than the largest value in data.
    m : integer, number of bins for hashing.
    n : integer, number of rows to split the signatures into.
    b : integer, number of bands.
    c : integer, minimum allowable cluster size.
    """
    #data.zipWithIndex()
    zdata = data
    seeds = np.vstack([np.random.random_integers(p, size = n), np.random.random_integers(0, p, size = n)]).T
    hashes = [functools.partial(minhash, a = s[0], b = s[1], p = p, m = m) for s in seeds]

    # Start by generating the signatures for each data point.
    # Output format is:
    # <(vector idx, band idx), minhash>
    sigs = zdata.flatMap(lambda x: [[(x[1], i % b), hashes[i](x[0])] for i, h in enumerate(hashes)]).cache()

    # Put together the vector minhashes in the same band.
    # Output format is:
    # <(band idx, minhash list), vector idx>
    bands = sigs.groupByKey() \
        .map(lambda x: [(x[0][1], hash(frozenset(x[1].data))), x[0][0]]) \
        .groupByKey().cache()

    # Should we filter?
    if c > 0:
        bands = bands.filter(lambda x: len(x[1]) > c).cache()

    # Remaps each element to a cluster / bucket index.
    # Output format is:
    # <vector idx, bucket idx>
    vector_bucket = bands.map(lambda x: frozenset(sorted(x[1]))).distinct() \
        .zipWithIndex().flatMap(lambda x: map(lambda y: (np.long(y), x[1]), x[0])) \
        .cache()

    # Reverses indices, to key the vectors by their buckets.
    # Output format is:
    # <bucket idx, vector idx>
    bucket_vector = vector_bucket.map(lambda x: (x[1], x[0])).cache()

    # Joins indices up with original data to provide clustering results.
    # Output format is:
    # <bucket idx, list of vectors>
    buckets = zdata.map(lambda x: (x[1], x[0])).join(vector_bucket) \
        .map(lambda x: (x[1][1], x[1][0])).groupByKey().cache()

    # Computes Jaccard similarity of each bucket.
    scores = buckets.map(distance_metric).cache()

    # Return a wrapper object around the metrics of interest.
    return PyLSHModel(sigs, bands, vector_bucket, bucket_vector, buckets, scores)
Esempio n. 22
0
def get_coincidences(fp, st_start_ch0, st_start_ch1, st_len, pulse_sep):

    pqf  = h5py.File(fp,  'r')
    sp = pqf['/PQ_special-1'].value      
    ch = pqf['/PQ_channel-1'].value
    sn = pqf['/PQ_sync_number-1'].value
    st = pqf['/PQ_sync_time-1'].value
    tt = pqf['/PQ_time-1'].value 
    pqf.close()

    fltr_w1_ch0 = (((st_start_ch0 <= st)  & (st < (st_start_ch0 + st_len))) & (ch == 0) & (sp == 0))
    fltr_w1_ch1 = (((st_start_ch1 <= st)  & (st < (st_start_ch1 + st_len))) & (ch == 1) & (sp == 0)) 
    fltr_w2_ch0 = (((st_start_ch0 +  pulse_sep <= st) & (st < (st_start_ch0 + pulse_sep + st_len))) & (ch == 0) & (sp == 0)) 
    fltr_w2_ch1 = (((st_start_ch1 +  pulse_sep <= st) & (st < (st_start_ch1 + pulse_sep + st_len))) & (ch == 1) & (sp == 0))
    fltr0 = fltr_w1_ch0 | fltr_w2_ch0
    fltr1 = fltr_w1_ch1 | fltr_w2_ch1

    st0 = st[fltr0]
    t0  = tt[fltr0]
    sn0 = sn[fltr0]
    
    st1 = st[fltr1]
    t1 =  tt[fltr1]
    sn1 = sn[fltr1]

    samesync0 = np.in1d(sn0, sn1)
    samesync1 = np.in1d(sn1, sn0)
    
    c_st0 = st0[samesync0]
    c_st1 = st1[samesync1]
    c_t0 = t0[samesync0]
    c_sn0 = sn0[samesync0]
    c_t1 = t1[samesync1]
    c_sn1 = sn1[samesync1]
    
    coincidences = np.empty((0,4), dtype = np.int64)
    for _sn0, _t0, _st0 in zip(c_sn0, c_t0, c_st0):
        _c = c_sn1==_sn0
        
        for _t1, _st1 in zip(c_t1[_c], c_st1[_c]):
            dt = np.long(_t0) - np.long(_t1)
            coincidences = np.vstack((coincidences, np.array([dt, _st0, _st1, _sn0], dtype = _tpqi_dtype)))

    return coincidences
Esempio n. 23
0
    def get_ts(self, idx):
        """
        Get a time series from the binary file.

        Parameters
        ----------
        idx : tuple of ints, or a list of a tuple of ints
            idx can be (layer, row, column) or it can be a list in the form
            [(layer, row, column), (layer, row, column), ...].  The layer,
            row, and column values must be zero based.

        Returns
        ----------
        out : numpy array
            Array has size (ntimes, ncells + 1).  The first column in the
            data array will contain time (totim).

        See Also
        --------

        Notes
        -----

        The layer, row, and column values must be zero-based, and must be
        within the following ranges: 0 <= k < nlay; 0 <= i < nrow; 0 <= j < ncol

        Examples
        --------

        """
        kijlist = self._build_kijlist(idx)
        nstation = self._get_nstation(idx, kijlist)

        # Initialize result array and put times in first column
        result = self._init_result(nstation)

        istat = 1
        for k, i, j in kijlist:
            recordlist = []
            ioffset = (i * self.ncol + j) * self.realtype(1).nbytes
            for irec, header in enumerate(self.recordarray):
                ilay = header['ilay'] - 1  # change ilay from header to zero-based
                if ilay != k:
                    continue
                ipos = self.iposarray[irec]

                # Calculate offset necessary to reach intended cell
                self.file.seek(ipos + np.long(ioffset), 0)

                # Find the time index and then put value into result in the
                # correct location.
                itim = np.where(result[:, 0] == header['totim'])[0]
                result[itim, istat] = binaryread(self.file, self.realtype)
            istat += 1
        return result
Esempio n. 24
0
    def predict_entry(self, entry):
        inner = torch.ones(self.rank)
        for dim, col in enumerate(entry):
            col = Variable(torch.LongTensor([np.long(col)]))
            inner *= self.factors[dim](col)[0]

        if self.datatype == "real":
            return float(torch.sum(inner))
        elif self.datatype == "binary":
            return 1 if torch.sum(inner) > 0 else 0
        elif self.datatype == "count":
            return float(torch.sum(inner))
Esempio n. 25
0
    def run(self):
        self.pos = 0
        self.channel = 0
        
        samples = np.ctypeslib.as_array(self.raw_samples.get_obj())
        samples = samples.reshape(self.BUF_LEN, self.channels)
        
        self.ser = serial.Serial(6, baudrate=57600, timeout=1)
        self.ser.flushInput()
        self.reset()
        self.ready()
        ctrl_bytes = bytearray(3)
        bytes = bytearray(3 * self.channels)
        # Run until turned off
        while self.on:
            # Read and execute control commands from main process
            if not self.cmds.empty():
                cmd = self.cmds.get()
                if cmd == '+':
                    self.ready()
                elif cmd == '-':
                    self.reset()
                elif cmd == "Exit!":
                    self.on = False
                else:
                    print cmd
            # While on, continuously empty serial port
            if not self.paused:
                # Read bytes in chunks of meaningful size
                if self.ser.inWaiting() > 27:
                    self.ser.readinto(ctrl_bytes)
                    
                    if (ctrl_bytes[0] == 192 and ctrl_bytes[1] == 0 and ctrl_bytes[2] == 0):
                        self.ser.readinto(bytes)
                        for channel in range(self.channels):
                            offset = 3 * channel
                            height = (bytes[offset] << 16) + (bytes[offset + 1] << 8) + bytes[offset + 2]
                                
                            # convert to signed long
                            if (height >= 0x800000): # = 2^23
                                height = height - 0x1000000 # = 2^24
                            
                            height = np.long(height)
                            samples[self.pos, channel] = height

                        self.pos += 1
                        self.sample_count.value += 1
                        if self.pos == self.BUF_LEN:
                            self.pos = 0
                            
                    else:
                        shift = bytearray(1)
                        self.ser.readinto(shift)
Esempio n. 26
0
def communicate_smart_meter(conn):
    global sm_conns, NUM_TIME_INSTANCES, ppn,recv_shares_count,e,s,times

    lock.acquire()
    s = time.time()
    string = ""
    string += receive_shares(conn)
    shares_time_instances = string.split(" ")
    # print(shares_time_instances)
    meter_id = int(shares_time_instances[0])
    ppn.update_billing_dict(meter_id, shares_time_instances[1])
    ppn.update_spatial_counter(shares_time_instances[1])
    constant = long(ppn.get_spatial_counter()) * long(ppn.get_lagrange_multiplier())
    ppn.calc_sum(constant)
    # print("const", constant)
    ppn.set_total_consumption(ppn.sumofshares)
    recv_shares_count += 1
    # time.sleep(0.015)
    e = time.time()
    # print( e - s)
    lock.release()
Esempio n. 27
0
def getHeader(f):
    """Extract scan information such as
        "scanSize" (number of energy points in the XAS scan
        "detSize"  (number of detector pixels for multi-element dets.; e.g. 100)
    Returns:
    list of above parameters
    """

    # work through header to get data dimensions (as LONGintegers)
    #  (do step-by-step for better code readability)
    #
    found = searchStrStarts(f, '# Total req')
    info = found.split('=')[1]
    info = info.split('x')
    scanSize = np.long(info[0])     # "scanSize" = number of energy points in XAFS scan
    if len(info) > 1:
        detSize = np.long(info[1])  # check for 2nd dimension
    else:                           # if not present, then detSize = 0
        detSize = 0                 # (i.e., transmission XAS scan only)

    return scanSize, detSize
Esempio n. 28
0
def master_wigner3j2(il1, il2, il3, lnwpro):
    l1 = np.long(il1)
    l2 = np.long(il2)
    l3 = il3  #np.int(il3)
    L = l1 + l2 + l3
    L_2 = L / 2
    L_2 = L_2.astype(int)
    min = abs(l1 - l2)
    max = l1 + l2
    c = l3 * 0.
    w = np.logical_and(np.logical_and((L_2 * 2 - L) == 0., l3 >= min),
                       l3 <= max)
    good = np.where(w == True)[0]
    if len(w[good]) > 0:
        lnw1 = lnwpro[L_2[good] - l1]
        lnw2 = lnwpro[L_2[good] - l2]
        lnw3 = lnwpro[L_2[good] - l3[good]]
        lnwl = lnwpro[L_2[good]]
        lnc = -np.log(L[good] + 1.0) - lnwl + lnw1 + lnw2 + lnw3
        c[good] = np.exp(lnc)
    return c
Esempio n. 29
0
 def __init__(self,
              bit,
              num_filter,
              kernel,
              stride=[],
              pad=[],
              no_bias=False,
              workspace=1024,
              name=None):
     super(IntConvProp, self).__init__(True)
     # we use constant bias here to illustrate how to pass arguments
     # to operators. All arguments are in string format so you need
     # to convert them back to the type you want.
     self.num_bit = np.long(bit)
     self.num_filter = np.long(num_filter)
     self.kernel = kernel
     self.stride = stride
     self.pad = pad
     self.no_bias = no_bias
     self.workspace = workspace
     self.name = name
Esempio n. 30
0
def de_weight(face_info, cache):
    appid = face_info['app_id']
    people_id = face_info['id']

    #    print(people_id, type(people_id))
    #    if type(people_id)
    #    print(people_id)
    if np.long(people_id) not in cache:
        cache[people_id] = appid
        return True
    else:
        return False
    def __getitem__(self, index):
        'Generates one sample of data'
        # Select sample
        ID = self.list_IDs[index, :, :, :]

        #Load data and get label
        X = torch.from_numpy(ID).float()
        X_data = X.permute(2, 0, 1)

        y = (np.long(self.labels[index]))

        return X_data, y
Esempio n. 32
0
def bin_intervals_from_gtis(gtis, chunk_length, time):
    """Similar to intervals_from_gtis, but given an input time array.

    Used to start each FFT/PDS/cospectrum from the start of a GTI,
    and stop before the next gap in data (end of GTI).
    In this case, it is necessary to specify the time array containing the
    times of the light curve bins.
    Returns start and stop bins of the intervals to use for the PDS

    Parameters
    ----------
    gtis : 2-d float array
        [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
    chunk_length : float
        Length of the chunks
    time : array-like
        Times of light curve bins

    Returns
    -------
    spectrum_start_bins : array-like
        List of starting bins in the original time array to use in spectral
        calculations.

    spectrum_stop_bins : array-like
        List of end bins to use in the spectral calculations.
    """
    bintime = time[1] - time[0]
    nbin = np.long(chunk_length / bintime)

    spectrum_start_bins = np.array([], dtype=np.long)
    for g in gtis:
        if g[1] - g[0] < chunk_length:
            continue
        good = (time - bintime / 2 > g[0])&(time + bintime / 2 < g[1])
        t_good = time[good]
        if len(t_good) == 0:
            continue
        startbin = np.argmin(np.abs(time - bintime / 2 - g[0]))
        stopbin = np.argmin(np.abs(time + bintime / 2 - g[1]))

        if time[startbin] < g[0]: startbin += 1
        if time[stopbin] < g[1] + bintime/2: stopbin += 1

        newbins = np.arange(startbin, stopbin - nbin + 1, nbin,
                            dtype=np.long)
        spectrum_start_bins = \
            np.append(spectrum_start_bins,
                      newbins)
    assert len(spectrum_start_bins) > 0, \
        ("No GTIs are equal to or longer than chunk_length.")
    return spectrum_start_bins, spectrum_start_bins + nbin
Esempio n. 33
0
def getHeader(f):
    """Extract scan information such as
        "scanSize" (number of energy points in the XAS scan
        "detSize"  (number of detector pixels for multi-element dets.; e.g. 100)
    Returns:
    list of above parameters
    """

    # work through header to get data dimensions (as LONGintegers)
    #  (do step-by-step for better code readability)
    #
    found = searchStrStarts(f, '# Total req')
    info = found.split('=')[1]
    info = info.split('x')
    scanSize = np.long(
        info[0])  # "scanSize" = number of energy points in XAFS scan
    if len(info) > 1:
        detSize = np.long(info[1])  # check for 2nd dimension
    else:  # if not present, then detSize = 0
        detSize = 0  # (i.e., transmission XAS scan only)

    return scanSize, detSize
Esempio n. 34
0
def load_model(projection_matrix_txt, vocab_file, latent_dims=100):
    
    v2i = {}
    with codecs.open(vocab_file, 'r', 'utf-8') as vf:
        for idx, line in enumerate(vf):
            v2i[line.strip()] = idx

    with open(projection_matrix_txt, 'r') as pf:
        header_items = pf.readline().strip().split(' ')
        assert len(header_items) == 2
        rows, cols = np.long(header_items[0]), np.long(header_items[1])
        mat = np.empty((rows, cols), dtype=np.double)
        for row, line in enumerate(pf):
            assert row < rows
            elems = [np.double(el) for el in line.strip().split(' ')]
            assert len(elems) == cols
            mat[row,:] = elems

    P = mat.T 


    return WtmfModel(P, v2i, latent_dims=latent_dims)
Esempio n. 35
0
 def to_binary(self, val):
     #        """
     #        PWM value (100% == 156)
     #        Bit select for PWM repetition which have value PWM+1
     #        """
     val = val / 1.8
     assert val >= 0
     assert val <= 1
     out = intbv(0, min=0, max=2**32)
     tmp = np.long(np.round(val * (17 * 157 - 1)))
     out[32:16] = tmp // 17
     out[16:0] = 2**tmp % 17 - 1
     return out[32:]._val
Esempio n. 36
0
    def to_binary(self, val):
#        """
#        PWM value (100% == 156)
#        Bit select for PWM repetition which have value PWM+1
#        """
        val = val/1.8
        assert val>=0
        assert val<=1
        out = intbv(0, min=0, max=2**32)
        tmp = np.long(np.round(val*(17*157-1)))
        out[32:16] = tmp//17
        out[16:0] = 2**tmp%17-1
        return out[32:]._val
Esempio n. 37
0
def bin_intervals_from_gtis(gtis, chunk_length, time):
    """Similar to intervals_from_gtis, but given an input time array.

    Used to start each FFT/PDS/cospectrum from the start of a GTI,
    and stop before the next gap in data (end of GTI).
    In this case, it is necessary to specify the time array containing the
    times of the light curve bins.
    Returns start and stop bins of the intervals to use for the PDS

    Parameters
    ----------
    gtis : 2-d float array
        [[gti0_0, gti0_1], [gti1_0, gti1_1], ...]
    chunk_length : float
        Length of the chunks
    time : array-like
        Times of light curve bins

    Returns
    -------
    spectrum_start_bins : array-like
        List of starting bins in the original time array to use in spectral
        calculations.

    spectrum_stop_bins : array-like
        List of end bins to use in the spectral calculations.
    """
    bintime = time[1] - time[0]
    nbin = np.long(chunk_length / bintime)

    spectrum_start_bins = np.array([], dtype=np.long)
    for g in gtis:
        if g[1] - g[0] < chunk_length:
            continue
        good = (time - bintime / 2 > g[0]) & (time + bintime / 2 < g[1])
        t_good = time[good]
        if len(t_good) == 0:
            continue
        startbin = np.argmin(np.abs(time - bintime / 2 - g[0]))
        stopbin = np.argmin(np.abs(time + bintime / 2 - g[1]))

        if time[startbin] < g[0]: startbin += 1
        if time[stopbin] < g[1] + bintime / 2: stopbin += 1

        newbins = np.arange(startbin, stopbin - nbin + 1, nbin, dtype=np.long)
        spectrum_start_bins = \
            np.append(spectrum_start_bins,
                      newbins)
    assert len(spectrum_start_bins) > 0, \
        ("No GTIs are equal to or longer than chunk_length.")
    return spectrum_start_bins, spectrum_start_bins + nbin
Esempio n. 38
0
def get_simple_landmarks(image):

	faces = faceCascade.detectMultiScale(image, scaleFactor=1.3, minNeighbors=4, minSize=(100, 100), flags=cv2.CASCADE_SCALE_IMAGE)

	if len(faces) == 0:
		return []

	for (x,y,w,h) in faces:
		dlib_rect = dlib.rectangle( np.long(x),np.long(y),np.long(x+w),np.long(y+h))
		detected_landmarks = predictor(image, dlib_rect).parts()
		#print(detected_landmarks)

		points = []
		points = [[p.x, p.y] for p in detected_landmarks]
		# points to array
		landmarks = np.matrix(points)

		# Calculate NoseAngle from landmarks
		noseAngle = head.CalculateNoseAngle(landmarks)
		#print(noseAngle)
		# we need a gray image copy first
		gray_copy = cv2.cvtColor(image,cv2.COLOR_RGB2GRAY)
		# perform Edge Detection 
		edgesImage = head.PerformEdgeDetection(gray_copy)

		topOfHeadCoords,ToH_YMin,ToH_YMax,ToH_YMaxEdge,ToH_final = head.GetTopOfHeadValues(landmarks,edgesImage)

		# append Toh to array
		points.append([ToH_final[0],ToH_final[1]])

		# create matrix 
		landmarks = np.matrix(points)

		# add new toh to landmark matrix

	#print(landmarks)

	return landmarks # TODOOOOOOOOOOO - return additional parameter as TOP OF HEAD AND REDO EVERYTHING !!!!!!!!!!!!!!!
Esempio n. 39
0
    def __data_generation(self, samples, fields, IDs):
        'Generates data containing batch_size samples'  # X : (n_samples, *dim, n_channels)
        # Initialization

        # Generate data
        NameA = fields[IDs]
        va = np.long(fields[IDs + 1])
        NameB = fields[IDs + 2]
        vb = np.long(fields[IDs + 3])
        Name = NameA[0:2]
        x = vb - va
        dis = 150
        win = x if x < dis else dis

        diff = x - dis

        start = 0 if diff <= 0 else rd.randrange(0, diff, 30)
        print('diff, start: %d,%d' % (diff, start))

        X = np.empty((self.samples, vb - va, *self.dim, self.n_channels))
        y = np.empty((self.samples), dtype=int)
        for i in range(0, win):
            # load the image
            fileName = self.root + samples + '/_Color_' + str(va + i +
                                                              start) + '.png'
            image = cv2.imread(fileName)
            # convert image to numpy array
            # Store sample
            gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            gray_image2 = cv2.resize(gray_image, (100, 100))
            data = np.expand_dims(gray_image2, axis=-1)
            X[0, i, ] = data

        # Store class
        y[0] = self.labels[Name]

        return X, tensorflow.keras.utils.to_categorical(
            y, num_classes=self.n_classes)
Esempio n. 40
0
    def _callback(res):
        nonlocal frame_count, last_frame, last_1_frame_cost, last_2_frame_cost, last_3_frame_cost, time_count, mach_time_factor, \
            jank_count, big_jank_count, jank_time_count, _list, count_time
        if type(res.plist) is InstrumentRPCParseError:
            for args in kperf_data(res.raw.get_selector()):
                _time, code = args[0], args[7]
                if code == 830472984:
                    if not last_frame:
                        last_frame = long(_time)
                    else:
                        this_frame_cost = (long(_time) - last_frame) * mach_time_factor
                        if all([last_3_frame_cost != 0, last_2_frame_cost != 0, last_1_frame_cost != 0]):
                            if this_frame_cost > mean([last_3_frame_cost, last_2_frame_cost, last_1_frame_cost]) * 2 \
                                    and this_frame_cost > MOVIE_FRAME_COST * NANO_SECOND * 2:
                                jank_count += 1
                                jank_time_count += this_frame_cost
                                if this_frame_cost > mean(
                                        [last_3_frame_cost, last_2_frame_cost, last_1_frame_cost]) * 3 \
                                        and this_frame_cost > MOVIE_FRAME_COST * NANO_SECOND * 3:
                                    big_jank_count += 1

                        last_3_frame_cost, last_2_frame_cost, last_1_frame_cost = last_2_frame_cost, last_1_frame_cost, this_frame_cost
                        time_count += this_frame_cost
                        last_frame = long(_time)
                        frame_count += 1
                else:
                    time_count = (datetime.now().timestamp() - count_time) * NANO_SECOND
                if time_count > NANO_SECOND:
                    callback(
                        {"currentTime": str(datetime.now()), "FPS": frame_count / time_count * NANO_SECOND,
                         "jank": jank_count,
                         "big_jank": big_jank_count, "stutter": jank_time_count / time_count})
                    jank_count = 0
                    big_jank_count = 0
                    jank_time_count = 0
                    frame_count = 0
                    time_count = 0
                    count_time = datetime.now().timestamp()
Esempio n. 41
0
def decrypt(encryptedBlock, key):
    encoded = blockConverter(encryptedBlock[0] + encryptedBlock[1] +
                             encryptedBlock[2] + encryptedBlock[3])
    enlength = len(encoded)
    A = long(encoded[0], 2)
    B = long(encoded[1], 2)
    C = long(encoded[2], 2)
    D = long(encoded[3], 2)
    cipher = []
    cipher.append(A)
    cipher.append(B)
    cipher.append(C)
    cipher.append(D)
    r = 10
    w = 32
    modulo = 2**32
    lgw = 5
    C = (C - key[2 * r + 3]) % modulo
    A = (A - key[2 * r + 2]) % modulo
    for j in range(1, r + 1):
        i = r + 1 - j
        (A, B, C, D) = (D, A, B, C)
        u_temp = (D * (2 * D + 1)) % modulo
        u = ROL(u_temp, lgw, 32)
        t_temp = (B * (2 * B + 1)) % modulo
        t = ROL(t_temp, lgw, 32)
        tmod = t % 32
        umod = u % 32
        C = (ROR((C - key[2 * i + 1]) % modulo, tmod, 32) ^ u)
        A = (ROR((A - key[2 * i]) % modulo, umod, 32) ^ t)
    D = (D - key[1]) % modulo
    B = (B - key[0]) % modulo
    orgi = []
    orgi.append(A)
    orgi.append(B)
    orgi.append(C)
    orgi.append(D)
    return cipher, orgi
Esempio n. 42
0
def enable_auto_reply():
    """Enable auto reply.
    Returns:Draft object, including reply message and response meta data.

    Load pre-authorized user credentials from the environment.
    TODO(developer) - See https://developers.google.com/identity
    for guides on implementing OAuth2 for the application.
    """
    creds, _ = google.auth.default()

    try:
        # create gmail api client
        service = build('gmail', 'v1', credentials=creds)

        epoch = datetime.utcfromtimestamp(0)
        now = datetime.now()
        start_time = (now - epoch).total_seconds() * 1000
        end_time = (now + timedelta(days=7) - epoch).total_seconds() * 1000
        vacation_settings = {
            'enableAutoReply': True,
            'responseBodyHtml': "I am on vacation and will reply when I am "
            "back in the office. Thanks!",
            'restrictToDomain': True,
            'startTime': long(start_time),
            'endTime': long(end_time)
        }

        # pylint: disable=E1101
        response = service.users().settings().updateVacation(
            userId='me', body=vacation_settings).execute()
        print(F'Enabled AutoReply with message: '
              F'{response.get("responseBodyHtml")}')

    except HttpError as error:
        print(F'An error occurred: {error}')
        response = None

    return response
Esempio n. 43
0
def ExpandDims(inputs, axis, **kwargs):
    """Expand the new dimension with size 1 to specific axis.

    Negative ``axis`` is equal to ``axis = axis + num_axes + 1``.

    Parameters
    ----------
    inputs : Tensor
        The input tensor.
    axis : int
        The insert axis of new dimension.

    Returns
    -------
    Tensor
        The output tensor.

    Examples
    --------
    >>> a = Tensor(shape=[1, 2, 3, 4]).Variable()
    >>> print(ExpandDims(a).shape)
    >>> print(ExpandDims(a, axis=2).shape)

    """
    CheckInputs(inputs, 1)
    arguments = ParseArguments(locals())

    output = Tensor.CreateOperator(nout=1, op_type='ExpandDims', **arguments)

    if inputs.shape is not None:
        output.shape = inputs.shape[:]
        axis += (0 if axis >= 0 else len(inputs.shape) + 1)
        if axis < 0 or axis >= len(inputs.shape):
            output.shape.append(np.long(1))
        else:
            output.shape.insert(axis, np.long(1))

    return output
def arduino_dataIn(channel, data):
    # read the available data
    data_available = arduino_in.decode(data)
    global sensors_ir, encoders, motor_status, extra_io, Data_Unpacked, sensor_distance

    # update the IR sensors values
    sensors_ir = np.array([data_available.extreme_left, data_available.left, data_available.center, data_available.right, data_available.extreme_right])

    # update the encoders
    encoders = np.array([np.long(data_available.encoder_left), np.long(data_available.encoder_right)])

    # update the motor status
    motor_status = data_available.motorEnable

    # update the extra IOs data
    extra_io = np.array([data_available.extra_io1, data_available.extra_io2])

    # update the sensor distance read
    temp = data_available.distance
    if temp < 450:
        sensor_distance = temp
    # for debugging
    Data_Unpacked = str(sensors_ir) + ',' + str(encoders) + ',' + str(motor_status) + ',' + str(extra_io) + ',' + str(sensor_distance)
Esempio n. 45
0
def get_rng_states(size, seed=1):
    "Return `size` number of CUDA random number generator states."
    rng_states = driver.mem_alloc(np.long(size * SIZEOF_GENERATOR))

    init_rng = mod.get_function('init_rng')

    init_rng(np.int32(size),
             rng_states,
             np.uint64(seed),
             np.uint64(0),
             block=(BLOCK_SIZE, 1, 1),
             grid=(math.ceil(float(size) / BLOCK_SIZE), 1))

    return rng_states
Esempio n. 46
0
 def set_proj_plane_info(self, xsize, ysize, lonra, latra):
     if lonra is None: lonra = [-180., 180.]
     if latra is None: latra = [-90., 90.]
     if (len(lonra) != 2 or len(latra) != 2 or lonra[0] < -180.
             or lonra[1] > 180. or latra[0] < -90 or latra[1] > 90
             or lonra[0] >= lonra[1] or latra[0] >= latra[1]):
         raise TypeError(
             "Wrong argument lonra or latra. Must be lonra=[a,b],latra=[c,d] "
             "with a<b, c<d, a>=-180, b<=180, c>=-90, d<=+90")
     lonra = self._flip * np.float64(lonra)[::self._flip]
     latra = np.float64(latra)
     xsize = np.long(xsize)
     if ysize is None:
         ratio = (latra[1] - latra[0]) / (lonra[1] - lonra[0])
         ysize = np.long(round(ratio * xsize))
     else:
         ysize = np.long(ysize)
         ratio = float(ysize) / float(xsize)
     super(CartesianProj, self).set_proj_plane_info(xsize=xsize,
                                                    lonra=lonra,
                                                    latra=latra,
                                                    ysize=ysize,
                                                    ratio=ratio)
Esempio n. 47
0
 def expand_rpeaks(self, rpeaks):
     if len(rpeaks) == 0:
         return np.zeros(self.all_ws), np.zeros(self.all_ws)
     normal_rpeaks = np.zeros(self.all_ws)
     onehot_rpeaks = np.zeros(self.all_ws)
     for rpeak in rpeaks:
         nor_rpeaks = st.norm.pdf(np.arange(0, self.all_ws),
                                  loc=rpeak,
                                  scale=self.sigma)
         normal_rpeaks = normal_rpeaks + nor_rpeaks
         # onthot expands (-50 ~ +70)
         st_ix = np.clip(rpeak - 50 // (1000 / self.sampling_rate), 0,
                         self.all_ws)
         ed_ix = np.clip(rpeak + 70 // (1000 / self.sampling_rate), 0,
                         self.all_ws)
         st_ix = np.long(st_ix)
         ed_ix = np.long(ed_ix)
         onehot_rpeaks[st_ix:ed_ix] = 1
     # scale to [0, 1]
     if self.scaled_exp:
         normal_rpeaks = (normal_rpeaks - np.min(normal_rpeaks))
         normal_rpeaks = normal_rpeaks / np.ptp(normal_rpeaks)
     return np.array(normal_rpeaks), np.array(onehot_rpeaks)
Esempio n. 48
0
 def __getitem__(self, index):
     """Get images and target for data loader.
     Args:
         index (int): Index
     Returns:
         tuple: (image, target) where target is index of the target class.
     """
     img, label = self.train_data[index, ::], self.train_labels[index]
     if self.transform is not None:
         img = self.transform(img)
     label = np.long(label)
     # label = torch.LongTensor([np.int64(label).item()])
     # label = torch.FloatTensor([label.item()])
     return img, label
Esempio n. 49
0
def Arange(start, stop=None, step=1, dtype='FLOAT32', **kwargs):
    """Return a vector of elements by arange.

    If ``stop`` is None, use the range: [0, start).

    Parameters
    ----------
    start : int or Tensor
        The start of the range.
    stop : int or Tensor
        The stop of range.
    step : int or Tensor
        The interval between two elements.
    dtype : str
        The data type. ``FLOAT32`` or ``INT32``.

    Returns
    -------
    Tensor
        The vector.

    """
    arguments = ParseArguments(locals())
    arguments['extra_inputs'] = []
    if not isinstance(start, Tensor): arguments['static_start'] = int(start)
    else:
        arguments['dynamic_start'] = start.name
        arguments['extra_inputs'].append(start)
    if stop is not None:
        if not isinstance(stop, Tensor): arguments['static_stop'] = int(stop)
        else:
            arguments['dynamic_stop'] = stop.name
            arguments['extra_inputs'].append(stop)
        del arguments['stop']
    if not isinstance(step, Tensor): arguments['static_step'] = int(step)
    else:
        arguments['dynamic_step'] = step.name
        arguments['extra_inputs'].append(step)
    del arguments['start']; del arguments['step']

    output = Tensor.CreateOperator([], nout=1, op_type='Arange', **arguments)

    if 'static_start' in arguments and \
       'static_step' in arguments:
        if 'dynamic_stop' not in arguments:
            if stop is None: stop = start; start = 0
            count = (stop - start - 1) / step + 1
            output.shape = [np.long(count)]

    return output
Esempio n. 50
0
    def read_keyword(self, ii):
        ''' --------------------------------------------------------------
        Read the content of keyword of given index.

        Parameters:
        ----------
        - ii: index of the keyword to read
        -------------------------------------------------------------- '''
        kwsz = self.kwsz  # keyword SHM data structure size
        k0 = self.im_offset + self.img_len + ii * kwsz  # kword offset

        # ------------------------------------------
        #             read from SHM
        # ------------------------------------------
        kwlen = struct.calcsize(self.kwfmt0)
        kname, ktype = struct.unpack(self.kwfmt0, self.buf[k0:k0 + kwlen])

        # ------------------------------------------
        # depending on type, select parsing strategy
        # ------------------------------------------
        kwfmt = '16s 80s'

        if ktype == b'L':  # keyword value is int64
            kwfmt = 'q 8x 80s'
        elif ktype == b'D':  # keyword value is double
            kwfmt = 'd 8x 80s'
        elif ktype == b'S':  # keyword value is string
            kwfmt = '16s 80s'
        elif ktype == b'N':  # keyword is unused
            kwfmt = '16s 80s'

        kval, kcomm = struct.unpack(kwfmt, self.buf[k0 + kwlen:k0 + kwsz])

        if kwfmt == '16s 80s':
            kval = str(kval).strip('\x00')

        # ------------------------------------------
        #    fill in the dictionary of keywords
        # ------------------------------------------
        if (ktype == b'L'):
            self.kwds[ii]['value'] = np.long(kval)
        elif (ktype == b'D'):
            self.kwds[ii]['value'] = np.double(kval)
        else:
            self.kwds[ii]['value'] = ktype.decode('ascii').strip('\x00')

        self.kwds[ii]['name'] = kname.decode('ascii').strip('\x00')
        self.kwds[ii]['type'] = ktype.decode('ascii')
        self.kwds[ii]['comment'] = kcomm.decode('ascii').strip('\x00')
Esempio n. 51
0
    def test_type_aliases(self):
        # from builtins
        self.assert_deprecated(lambda: np.bool(True))
        self.assert_deprecated(lambda: np.int(1))
        self.assert_deprecated(lambda: np.float(1))
        self.assert_deprecated(lambda: np.complex(1))
        self.assert_deprecated(lambda: np.object())
        self.assert_deprecated(lambda: np.str('abc'))

        # from np.compat
        self.assert_deprecated(lambda: np.long(1))
        self.assert_deprecated(lambda: np.unicode('abc'))

        # from np.core.numerictypes
        self.assert_deprecated(lambda: np.typeDict)
Esempio n. 52
0
def ExpandDims(inputs, axis=-1, **kwargs):
    """ExpandDims interface of NDArray.

    Parameters
    ----------
    inputs : Tensor
        The input tensor.
    axis : int
        The insert position of new dimension. Default is ``-1`` (Push Back).

    Returns
    -------
    Tensor
        The output tensor.

    Examples
    --------
    >>> a = Tensor(shape=[1, 2, 3, 4]).Variable()
    >>> print ExpandDims(a).shape

    >>> print ExpandDims(a, axis=2).shape

    """
    CheckInputs(inputs, 1)
    arguments = ParseArguments(locals())

    output = Tensor.CreateOperator(nout=1, op_type='ExpandDims', **arguments)

    if inputs.shape is not None:
        output.shape = inputs.shape[:]
        if axis == -1 or axis >= len(inputs.shape):
            output.shape.append(np.long(1))
        else:
            output.shape.insert(axis, np.long(1))

    return output
Esempio n. 53
0
def metatimes_to_seconds_since_start(metatimes):
    """Convert metatime array to seconds since first datetime in array.

    Inputs:
        metatimes [np.array or pd.DatetimeIndex]: Array of metatimes.
                                                  metatimes are either:
                                                  np.array of datetime.datetime,
                                                  np.array of np.datetime64,
                                                  np.array of pd.Timestamp,
                                                  pd.DatetimeIndex.

    Optional Inputs:
        None.

    Outputs:
        seconds_since_start [np.array]: Array of long integers
                                        indicating number of seconds since the first time in the arrray.

    Optional Outputs:
        None.

    Example:
        seconds_since_start = metatimes_to_seconds_since_start(metatimes)
    """

    # Check type of input and do conversion accordingly
    if isinstance(metatimes[0], datetime.datetime):
        dt_since_start = metatimes - metatimes[0]
        return np.array(
            [np.long(metatime.total_seconds()) for metatime in dt_since_start])
    elif isinstance(metatimes[0], np.datetime64 or pd.Timestamp):
        npdts_since_start = metatimes - metatimes[0]
        return np.array([
            np.long(npdt_since_start / np.timedelta64(1, 's'))
            for npdt_since_start in npdts_since_start
        ])
Esempio n. 54
0
def apols(lin, amaxin):

# adapted from IDL procedure apols.pro written by Jesper Schou

  l=np.long(lin)
  amax=np.minimum(amaxin,2*l)

  pols=np.zeros((2*l+1,amaxin+1))
# It is ESSENTIAL that x is set up such that x(-m)=-x(m) to full machine
# accuracy or that the re-orthogonalization is done with respect to all
# previous polynomials (second option below).
# x=(dindgen(2*l+1)-l)/l
  x=np.linspace(-1,1,2*l+1)

  pols[:,0]=1/np.sqrt(2*l+1.0)
  if (amax >= 1):
    pols[:,1]=x/np.sqrt((l+1.0)*(2*l+1.0)/(3.0*l))
# for n=2l,amax do begin
# Set up polynomials using exact recurrence relation.
  for n in range(2,amax+1):
    a0=2.0*l*(2*n-1.0)/(n*(2*l-n+1))
    b0=-(n-1.0)/n*(2*l+n)/(2*l-n+1)
    a=a0*np.sqrt((2*n+1.0)/(2*n-1.0)*(2*l-n+1.0)/(2*l+n+1.0))
    b=b0*np.sqrt((2*n+1.0)/(2*n-3.0)*(2*l-n+1.0)*(2*l-n+2.0)/(2*l+n+1.0)/(2*l+n))
    help=a*x*pols[:,n-1]+b*pols[:,n-2]
# Turns out that roundoff kills the algorithm, so we have to re-orthogonalize.
# Two choices here. First one is twice as fast and more accurate, but
# depends on the rounding being done in the standard IEEE way.
#  for j=n-2,0,-2 do begin 
    for j in range(n-2,-1,-2):    
# This choice is robust to the roundoff, but twice as slow and generally
# somewhat less accurate
# for j=n-1,0,-1 do begin
      help=help-pols[:,j]*np.sum(help*pols[:,j])
#  end
# Reset norm to 1.
    pols[:,n]=help/np.sqrt(np.sum(np.square(help)))

# Reset polynomials to have P_l(l)=l by setting overall norm.
# Note that this results in more accurate overall normalization, but
# that P_l(l) may be very far from l. This is the key to the algorithm.
  c=l**2*(2*l+1.0)
# for n=0l,amax do begin
  for n in range(0,amax+1):
    c=c*(2*l+n+1.0)/(2*l-n+1.0)
    pols[:,n]=pols[:,n]*np.sqrt(c/(2*n+1))

  return pols
Esempio n. 55
0
 def find_maximum_weight_graph(self):
     vertices = {-1: 0, 0: 0, 1: self.data[0]}
     for i in range(1, len(self.data)):
         vertices[i] = np.long(
             max(vertices[i - 1], vertices[i - 2] + self.data[i - 1]))
     maximum_weight_graph = set()
     position = len(self.data)
     while position >= 1:
         if vertices[position - 1] >= (vertices[position - 2] +
                                       self.data[position - 1]):
             position = position - 1
         else:
             maximum_weight_graph.add(position)
             position = position - 2
     self.maximum_weight_graph = maximum_weight_graph
     return maximum_weight_graph
Esempio n. 56
0
def sim_drift(v, V, a, z, Z, t, T, size=512, dt=1e-4, update=False, return_gpu=False):
    global _generator, _out, _generators_per_block, _block_count
    size = np.long(size)
    if _generator is None or update:
        _generator = pycuda.curandom.XORWOWRandomNumberGenerator()
        _block_count = _generator.block_count
        _generators_per_block = _generator.generators_per_block
    if _out is None or update:
        _out = gpuarray.empty(size, dtype=np.float32)

    _sim_drift_cuda(_generator.state, np.float32(v), np.float32(V), np.float32(a), np.float32(z), np.float32(Z), np.float32(t), np.float32(T), np.float32(dt), np.float32(1), _out, np.uint32(size), block=(_generators_per_block, 1, 1), grid=(_block_count, 1))

    if return_gpu:
        return _out
    else:
        return _out.get()
Esempio n. 57
0
 def __init__(self, x_csv_file, y_csv_file):
     self.x_scv_file_name = x_csv_file
     self.y_scv_file_name = y_csv_file
     self.len = 0
     self.x_matrix = []
     self.y_matrix = []
     for line in open(self.x_scv_file_name):
         self.len += 1
         x = line.split(',')
         x = [float(i) / 255 for i in x]
         self.x_matrix.append(x)
     for label in open(self.y_scv_file_name):
         self.y_matrix.append(np.long(label))
     self.x_matrix = np.array(self.x_matrix)
     self.y_matrix = np.array(self.y_matrix)
     pass
Esempio n. 58
0
    def __getitem__(self, index):
        img = Image.open(self.img_path[index]).convert('RGB')
        # img = ImageEnhance.Contrast(img)
        # img.enhance(2.0)  # 所有图片对比度调到2

        if self.transform is not None:
            img = self.transform(img)

        # 原始SVHN中类别10为数字0
        lbl = []
        if len(self.img_label) > 0:
            lbl = [np.long(x) for x in self.img_label[index]]
        for i in range(4 - len(lbl)):
            lbl.append(10)  # 补齐4个字符,用10填充,定长字符识别

        return img, torch.from_numpy(np.array(lbl)).long()  # 这部分有问题
Esempio n. 59
0
    def _create_buffers(self, parallelism=5):
        """
        Define the necessary input and ouptut Buffer objects for the Kernel,
        store in self.input_buffers and self.output_buffers. Subclasses should
        overwrite this.
        """
        mf = cl.mem_flags

        self.size = parallelism

        parallelism = numpy.long(parallelism)
        #self.buffers.append((parallelism,))
        # local_size = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=parallelism)
        # self.buffers.append(local_size)

        output = numpy.zeros(parallelism)
        self.buffers.append(cl.Buffer(self.ctx, mf.WRITE_ONLY, output.nbytes))