def peaks_ridges1d(wavetrset, ridgesset, scalevals, padfirstdimintoarray=True, minridgelength=3, minchildlength=0., maxscale_localmax=None, minridgewtsum=100., minchildwtsum=0., verbose=False): 
    """
    peaks_ridges1d(wavetrset, ridgesset, scalevals, padfirstdimintoarray=True, minridgelength=3, minchildlength=0., maxscale_localmax=None, minridgewtsum=100., minchildwtsum=0., verbose=False)
    wavetrset, ridgesset, scalevals are from previous function runs, see example.py
    minridgelength: minimum length (i.e. number of wavelet scales) of ridge for it to count as a peak
    minchildlength: same thing but for length of the child ridge alone, not count its mother's length
    maxscale_localmax: the largest scale in which the ridge can be locally maximum and still count as a peak
    minridgewtsum: critical value for the sum of the wavelet transform over the ridge. This will depend on the number of wavelet scales used and the nominal intensities of the original data.
    minchildwtsum: same thing except for the child ridge. The child ridge must have this total intensity to count as a peak, but for the "minridgewtsum", the child and mother intensities are added together.
    """
    minridgelength=max(1, minridgelength)

    ridgescalevals=scalevals[::-1] #ordered big->small
    if maxscale_localmax is None:
        maxscale_localmax=max(scalevals)
    ridgescalecritind=numpy.where(ridgescalevals<=maxscale_localmax)[0]
    if len(ridgescalecritind)<2:
        print 'aborted: the set of qscales does not include more than 1 point in the specified qwidthrange'
        return 'aborted: the set of qscales does not include more than 1 point in the specified qwidthrange'
    ridgescalecritind=ridgescalecritind[0]

    peaks=[perform_peaks_ridges1d(wt, ridges, ridgescalecritind=ridgescalecritind, minridgelength=minridgelength, minchildlength=minchildlength, minridgewtsum=minridgewtsum, minchildwtsum=minchildwtsum, verbose=verbose) for wt, ridges in zip(wavetrset, ridgesset)]
    
    numpks=[len(p) for p in peaks]
    maxnp=max(numpks)
    filler=[[32767]*2]*maxnp
    
    if padfirstdimintoarray:
        for p in peaks:
            p+=filler[:len(filler)-len(p)]
        return numpy.uint32([pksort(numpy.uint32(p).T) for p in peaks])
    else:
        return [pksort(numpy.uint32(p).T) for p in peaks]#list of 2 x numpeaks arrays,m the 2 are scaleind and posnind
Beispiel #2
0
    def calculateComplexDerefOpAddress(complexDerefOp, registerMap):

        match = re.match("((?:\\-?0x[0-9a-f]+)?)\\(%([a-z0-9]+),%([a-z0-9]+),([0-9]+)\\)", complexDerefOp)
        if match != None:
            offset = 0L
            if len(match.group(1)) > 0:
                offset = long(match.group(1), 16)

            regA = RegisterHelper.getRegisterValue(match.group(2), registerMap)
            regB = RegisterHelper.getRegisterValue(match.group(3), registerMap)

            mult = long(match.group(4), 16)

            # If we're missing any of the two register values, return None
            if regA == None or regB == None:
                if regA == None:
                    return (None, "Missing value for register %s" % match.group(2))
                else:
                    return (None, "Missing value for register %s" % match.group(3))

            if RegisterHelper.getBitWidth(registerMap) == 32:
                val = int32(uint32(regA)) + int32(uint32(offset)) + (int32(uint32(regB)) * int32(uint32(mult)))
            else:
                # Assume 64 bit width
                val = int64(uint64(regA)) + int64(uint64(offset)) + (int64(uint64(regB)) * int64(uint64(mult)))
            return (long(val), None)

        return (None, "Unknown failure.")
Beispiel #3
0
    def testInt(self):
        num = np.int(2562010)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(127)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(2562010)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(2562010)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.int64(2562010)
        self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(255)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(2562010)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(2562010)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        num = np.uint64(2562010)
        self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
Beispiel #4
0
 def radixSortKeysOnly(self, keyBits):
     i = numpy.uint32(0)
     bitStep = self.bitStep
     
     while (keyBits > i*bitStep):
         self.radixSortStepKeysOnly(bitStep, i*bitStep)
         i+=numpy.uint32(1)
Beispiel #5
0
    def testIntMax(self):
        num = np.int(np.iinfo(np.int).max)
        self.assertEqual(np.int(ujson.decode(ujson.encode(num))), num)

        num = np.int8(np.iinfo(np.int8).max)
        self.assertEqual(np.int8(ujson.decode(ujson.encode(num))), num)

        num = np.int16(np.iinfo(np.int16).max)
        self.assertEqual(np.int16(ujson.decode(ujson.encode(num))), num)

        num = np.int32(np.iinfo(np.int32).max)
        self.assertEqual(np.int32(ujson.decode(ujson.encode(num))), num)

        num = np.uint8(np.iinfo(np.uint8).max)
        self.assertEqual(np.uint8(ujson.decode(ujson.encode(num))), num)

        num = np.uint16(np.iinfo(np.uint16).max)
        self.assertEqual(np.uint16(ujson.decode(ujson.encode(num))), num)

        num = np.uint32(np.iinfo(np.uint32).max)
        self.assertEqual(np.uint32(ujson.decode(ujson.encode(num))), num)

        if platform.architecture()[0] != '32bit':
            num = np.int64(np.iinfo(np.int64).max)
            self.assertEqual(np.int64(ujson.decode(ujson.encode(num))), num)

            # uint64 max will always overflow as it's encoded to signed
            num = np.uint64(np.iinfo(np.int64).max)
            self.assertEqual(np.uint64(ujson.decode(ujson.encode(num))), num)
def load_stack(folder_name):

    stack = None
    input_files = sorted(glob.glob(os.path.join(folder_name, '*')))

    for i, file_name in enumerate(input_files):

        print file_name

        if file_name.endswith('h5') or file_name.endswith('hdf5'):
            infile = h5py.File(file_name)
            im = infile['/probabilities'][...]
        else:
            im = mahotas.imread(file_name)
            if len(im.shape) == 3:
                im = np.uint32(im[ :, :, 0 ]) + np.uint32(im[ :, :, 1 ]) * 2**8 + np.uint32(im[ :, :, 2 ]) * 2**16

        if im.shape[0] > 400:
            im = im[60:60+400, 210:210+400]

        if rot != 0:
            im = np.rot90(im, rot)

        if stack is None:
            stack = np.zeros((len(input_files), im.shape[0], im.shape[1]), dtype=im.dtype)
            print 'Stack size={0}, dtype={1}.'.format(stack.shape, stack.dtype)
        stack[i,:,:] = im

        #print file_name

    return stack
Beispiel #7
0
    def _set_kernel_arguments(self):
        """Tie arguments of OpenCL kernel-functions to the actual kernels

        set_kernel_arguments() is a private method, called by configure().
        It uses the dictionary _cl_kernel_args.

        Note that by default, since TthRange is disabled, the
        integration kernels have tth_min_max tied to the tthRange
        argument slot.

        When setRange is called it replaces that argument with
        tthRange low and upper bounds. When unsetRange is called, the
        argument slot is reset to tth_min_max.
        """
        self._cl_kernel_args["bsort_vertical"] = [self._cl_mem["input_data"].data, None]
        self._cl_kernel_args["bsort_horizontal"] = [self._cl_mem["input_data"].data, None]

        self._cl_kernel_args["filter_vertical"] = [self._cl_mem["input_data"].data,
                                                   self._cl_mem["vector_vertical"].data,
                                                   numpy.uint32(self.npt_width),
                                                   numpy.uint32(self.npt_height),
                                                   numpy.float32(0), numpy.float32(0.5), ]
        self._cl_kernel_args["filter_horizontal"] = [self._cl_mem["input_data"].data,
                                                     self._cl_mem["vector_horizontal"].data,
                                                     numpy.uint32(self.npt_width),
                                                     numpy.uint32(self.npt_height),
                                                     numpy.float32(0), numpy.float32(0.5), ]
Beispiel #8
0
def add_bucket_entry(uhash, pieces, first_bucket_vector, second_bucket_vector, point_index):
    h_index = np.uint64(first_bucket_vector[0]) + np.uint64(second_bucket_vector[0 + 2])
    if h_index >= const.prime_default:
        h_index -= const.prime_default
    assert(h_index < const.prime_default)
    h_index = np.uint32(h_index)
    h_index = h_index % uhash.table_size
    
    control = np.uint64(first_bucket_vector[1]) + np.uint64(second_bucket_vector[1 + 2])
    if control >= const.prime_default:
        control -= const.prime_default
    assert(control < const.prime_default)
    control = np.uint32(control)

    if uhash.t == 1:
        b = uhash.ll_hash_table[h_index] 
        while b and b.control_value != control:
            b = b.next_bucket_in_chain
        # if bucket does not exist
        if b is None:
            uhash.buckets += 1
            uhash.ll_hash_table[h_index] = lsh_structs.bucket(control, point_index, uhash.ll_hash_table[h_index])
        else:
            bucket_entry = lsh_structs.bucket_entry(point_index, b.first_entry.next_entry)
            b.first_entry.next_entry = bucket_entry
    uhash.points += 1
Beispiel #9
0
def get_bucket(uhash, pieces, first_bucket_vector, second_bucket_vector):
    h_index = np.uint64(first_bucket_vector[0]) + np.uint64(second_bucket_vector[0 + 2])
    if h_index >= const.prime_default:
        h_index -= const.prime_default
    assert(h_index < const.prime_default)
    h_index = np.uint32(h_index)
    h_index = h_index % uhash.table_size

    control = np.uint64(first_bucket_vector[1]) + np.uint64(second_bucket_vector[1 + 2])
    if control >= const.prime_default:
        control -= const.prime_default
    assert(control < const.prime_default)
    control = np.uint32(control)
    
    if uhash.t == 2:
        index_hybrid = uhash.hybrid_hash_table[h_index]

        while index_hybrid:
            if index_hybrid.control_value == control:
                index_hybrid = C.pointer(index_hybrid)[1]
                return index_hybrid
            else:
                index_hybrid = C.pointer(index_hybrid)[1]
                if index_hybrid.point.is_last_bucket:
                    return None
                l = index_hybrid.point.bucket_length
                index_hybrid = C.pointer(index_hybrid)[l]
        return None
Beispiel #10
0
    def arm_timed_latch(self, latch_name, time=None, force=False):
        ''' Arm a timed latch. Use force=True to force even if already armed 
        @param fpga host
        @param latch_name: base name for latch
        @param time: time in adc samples since epoch
        @param force: force arm of latch that is already armed
        '''
        status = self.host.device_by_name('%s_status' %(latch_name)).read()['data']

        # get armed, arm and load counts
        armed_before = status['armed']
        arm_count_before = status['arm_count']
        load_count_before = status['load_count']

        # if not forcing it, check for already armed first
        if armed_before == True:
            if force == False:
                LOGGER.info('forcing arm of already armed timed latch %s' %latch_name)
            else:
                LOGGER.error('timed latch %s already armed, use force=True' %latch_name)
                return

        # we load immediate if not given time
        if time == None:
            self.host.device_by_name('%s_control0' %(latch_name)).write(arm=0, load_immediate=1)
            self.host.device_by_name('%s_control0' %(latch_name)).write(arm=1, load_immediate=1)
                
            LOGGER.info('Timed latch %s arm-for-immediate-loading attempt' %latch_name)
            
        else: 
            # TODO check time values
            time_samples = numpy.uint64(time)
            time_msw = numpy.uint32((time_samples & 0x0000FFFFFFFFFFFF) >> 32)
            time_lsw = numpy.uint32((time_samples & 0x00000000FFFFFFFF))
            self.host.device_by_name('%s_control' %(latch_name)).write(load_time_lsw=time_lsw)
            
            self.host.device_by_name('%s_control0' %(latch_name)).write(arm=0, load_immediate=0, load_time_msw=time_msw)
            self.host.device_by_name('%s_control0' %(latch_name)).write(arm=1, load_immediate=0, load_time_msw=time_msw)

            LOGGER.info('Timed latch %s arm-for-loading-at-time attempt' %latch_name)

        # TODO check that arm count increased as expected
        status = self.host.device_by_name('%s_status' %(latch_name)).read()['data']
        
        # get armed, arm and load counts
        armed_after = status['armed']
        arm_count_after = status['arm_count']
        load_count_after = status['load_count']
        
        # armed count did not succeed
        if arm_count_after != (arm_count_before+1):
            # TODO check time
            LOGGER.error('Timed latch %s arm count at %i instead of %i' %(latch_name, arm_count_after, (arm_count_before+1)))
        else:
            # check load count increased as expected
            if time == None: 
                if load_count_after != (load_count_before+1):
                    LOGGER.error('Timed latch %s load count at %i instead of %i' %(latch_name, load_count_after, (load_count_before+1)))
            else:
                LOGGER.info('Timed latch %s successfully armed' %(latch_name))
Beispiel #11
0
def combine_scaled(r, g, b, a):
    """Combine components in [0, 1] to rgba uint32"""
    r2 = min(255, np.uint32(r * 255))
    g2 = min(255, np.uint32(g * 255))
    b2 = min(255, np.uint32(b * 255))
    a2 = min(255, np.uint32(a * 255))
    return np.uint32((a2 << 24) | (b2 << 16) | (g2 << 8) | r2)
Beispiel #12
0
    def test_valid(self):
        prop = bcpp.Int()

        assert prop.is_valid(None)

        assert prop.is_valid(0)
        assert prop.is_valid(1)

        assert prop.is_valid(np.int8(0))
        assert prop.is_valid(np.int8(1))
        assert prop.is_valid(np.int16(0))
        assert prop.is_valid(np.int16(1))
        assert prop.is_valid(np.int32(0))
        assert prop.is_valid(np.int32(1))
        assert prop.is_valid(np.int64(0))
        assert prop.is_valid(np.int64(1))
        assert prop.is_valid(np.uint8(0))
        assert prop.is_valid(np.uint8(1))
        assert prop.is_valid(np.uint16(0))
        assert prop.is_valid(np.uint16(1))
        assert prop.is_valid(np.uint32(0))
        assert prop.is_valid(np.uint32(1))
        assert prop.is_valid(np.uint64(0))
        assert prop.is_valid(np.uint64(1))

        # TODO (bev) should fail
        assert prop.is_valid(False)
        assert prop.is_valid(True)
    def __call__(self, queue, tgt, src, shape):
        w, h = shape
        assert w % block_size == 0
        assert h % block_size == 0

        return self.kernel(queue, (w, h), None,
            tgt, src, numpy.uint32(w), numpy.uint32(h))
Beispiel #14
0
    def set_data(self, time, lcids=None, pbids=None, nsamples=None, exptimes=None):
        mf = cl.mem_flags

        if self._b_time is not None:
            self._b_time.release()
            self._b_lcids.release()
            self._b_pbids.release()
            self._b_nsamples.release()
            self._b_etimes.release()

        self.nlc = uint32(1 if lcids is None else unique(lcids).size)
        self.npb = uint32(1 if pbids is None else unique(pbids).size)
        self.nptb = time.size

        self.time = asarray(time, dtype='float32')
        self.lcids = zeros(time.size, 'uint32') if lcids is None else asarray(lcids, dtype='uint32')
        self.pbids = zeros(self.nlc, 'uint32') if pbids is None else asarray(pbids, dtype='uint32')
        self.nsamples = ones(self.nlc, 'uint32') if nsamples is None else asarray(nsamples, dtype='uint32')
        self.exptimes = ones(self.nlc, 'float32') if exptimes is None else asarray(exptimes, dtype='float32')

        self._b_time = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.time)
        self._b_lcids = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.lcids)
        self._b_pbids = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.pbids)
        self._b_nsamples = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.nsamples)
        self._b_etimes = cl.Buffer(self.ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=self.exptimes)
    def pad(self, max_pad):
        """
        Pad the timeseries data so that there are no missing values. We fill in
        missing power values using the previous power value in the series.
        """
        width = self.times[-1] - self.times[0] + 1
        padded_array = np.rec.array((0, 2), dtype=[('time', np.uint32),
                                                   ('power', np.float32)])
        padded_array.resize(width)

        cnt = 0
        for i in xrange(len(self.times)-1):
            padded_array[cnt] = (np.uint32(self.times[i]), self.powers[i])
            cnt += 1

            if self.times[i+1] - self.times[i] > max_pad:
                continue

            for t in xrange(self.times[i]+1, self.times[i+1]):
                padded_array[cnt] = (np.uint32(t), self.powers[i])
                cnt += 1

        padded_array[cnt] = (np.uint32(self.times[-1]), self.powers[-1])
        padded_array.resize(cnt + 1)

        self.array = padded_array
 def X_div_Y(self, alpha, X, x0, Y, y0, beta, result = None):
     '''
     retutn alpha*(X[,] + x0) / (Y[,] + y0) + beta
     
     X_div_Y(float alpha, float* X, float x0, float* Y, float y0, float beta,
             float* result, uint Y_col, uint Y_row)
     '''
     if result is None:
         Y_col = Y.shape[0]
         Y_row = Y.shape[1]
         self.X_div_Y_kernel(np.float32(alpha), X.gpudata, np.float32(x0), \
                             Y.gpudata, np.float32(y0), \
                             np.float32(beta), Y.gpudata, \
                             np.uint32(Y_col), np.uint32(Y_row), \
                             block = (32, 32, 1), \
                             grid = (int(Y_row / 32) + 1, int(Y_col / 32) + 1) \
                             )
     else:
         Y_col = Y.shape[0]
         Y_row = Y.shape[1]
         self.X_div_Y_kernel(np.float32(alpha), X.gpudata, np.float32(x0), \
                             Y.gpudata, np.float32(y0), \
                             np.float32(beta), result.gpudata, \
                             np.uint32(Y_col), np.uint32(Y_row), \
                             block = (32, 32, 1), \
                             grid = (int(Y_row / 32) + 1, int(Y_col / 32) + 1) \
                             )    
 def index_to_array(self, index, dim, result = None):
     '''
     index_to_array(float* index, float* result, uint r_col, uint r_row)
     '''
     if result is None:
         result = gpuarray.GPUArray([index.size, dim], dtype = self.dtype)
           
         self.index_to_array_kernel(index.gpudata, result.gpudata, \
                                    np.uint32(r_col), np.uint32(r_row), \
                                    block = self._2d_block, \
                                    grid = self._2d_grid(r_col, r_row) \
                                    )     
         return result   
     
     else:
         r_col = result.shape[0]
         r_row = result.shape[1]
         
         if r_col != index.size or r_row != dim:
             raise ValueError('index_to_array: the dim of the result dont match the input')
           
         self.index_to_array_kernel(index.gpudata, result.gpudata, \
                                    np.uint32(r_col), np.uint32(r_row), \
                                    block = self._2d_block, \
                                    grid = self._2d_grid(r_col, r_row) \
                                    )      
def _fold_exp_and_coh(t_array, w, tz, tau_arr):
    if tz != 0.:
        t_array -= tz

    shape = t_array.shape
    t_array = t_array.astype(np.float32)

    t_arr_gpu = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR, hostbuf=t_array)
    tau_buf = cl.Buffer(ctx, mf.READ_ONLY | mf.COPY_HOST_PTR,
                        hostbuf=(1/tau_arr).astype(np.float32))
    shape = (shape[0], shape[1], tau_arr.size)
    shape_coh = (shape[0], shape[1], 3)
    out = cl_array.empty(queue, shape=shape, dtype=np.float32)
    out_coh = cl_array.empty(queue, shape=shape_coh, dtype=np.float32)

    global_work_size = t_array.size + (work_size[0] - t_array.size % work_size[0])

    prg.fold_exp(queue, (global_work_size, tau_arr.size), work_size, t_arr_gpu, np.float32(w),
                 tau_buf, out.data, np.uint32(t_array.size))

    coh_no_div.coh_gauss(queue, (global_work_size, 3), work_size, t_arr_gpu,
                   np.float32(w/1.4142), out_coh.data, np.uint32(t_array.size))

    queue.finish()
    a = out.get(async_=True)
    b = out_coh.get(async_=True)
    b /= np.abs(b).max(0)
    queue.finish()
    return a, b
Beispiel #19
0
    def geo_mask(self):
        """Masking the space pixels from geometry info."""
        cfac = np.uint32(self.proj_info['CFAC'])
        lfac = np.uint32(self.proj_info['LFAC'])
        coff = np.float32(self.proj_info['COFF'])
        loff = np.float32(self.proj_info['LOFF'])
        nlines = int(self.data_info['number_of_lines'])
        ncols = int(self.data_info['number_of_columns'])

        # count starts at 1
        local_coff = 1
        local_loff = (self.total_segments - self.segment_number) * nlines + 1

        xmax, ymax = get_geostationary_angle_extent(self.area)

        pixel_cmax = np.rad2deg(xmax) * cfac * 1.0 / 2**16
        pixel_lmax = np.rad2deg(ymax) * lfac * 1.0 / 2**16

        def ellipse(line, col):
            return ((line / pixel_lmax) ** 2) + ((col / pixel_cmax) ** 2) <= 1

        cols_idx = da.arange(-(coff - local_coff),
                             ncols - (coff - local_coff),
                             dtype=np.float, chunks=CHUNK_SIZE)
        lines_idx = da.arange(nlines - (loff - local_loff),
                              -(loff - local_loff),
                              -1,
                              dtype=np.float, chunks=CHUNK_SIZE)
        return ellipse(lines_idx[:, None], cols_idx[None, :])
Beispiel #20
0
def _minmax_impl(a_gpu, axis, min_or_max, stream=None):
    ''' Returns both max and argmax (min/argmin) along an axis.'''
    assert len(a_gpu.shape) < 3
    if iscomplextype(a_gpu.dtype):
        raise ValueError("Cannot compute min/max of complex values")

    if axis is None:  ## Note: PyCUDA doesn't have an overall argmax/argmin!
        if min_or_max == 'max':
            return gpuarray.max(a_gpu).get()
        else:
            return gpuarray.min(a_gpu).get()
    else:
        if axis < 0:
            axis += 2
    assert axis in (0, 1)

    global _global_cublas_allocator
    alloc = _global_cublas_allocator

    n, m = a_gpu.shape if a_gpu.flags.c_contiguous else (a_gpu.shape[1], a_gpu.shape[0])
    col_kernel, row_kernel = _get_minmax_kernel(a_gpu.dtype, min_or_max)
    if (axis == 0 and a_gpu.flags.c_contiguous) or (axis == 1 and a_gpu.flags.f_contiguous):
        target = gpuarray.empty(m, dtype=a_gpu.dtype, allocator=alloc)
        idx = gpuarray.empty(m, dtype=np.uint32, allocator=alloc)
        col_kernel(a_gpu, target, idx, np.uint32(m), np.uint32(n),
                   block=(32, 1, 1), grid=(m, 1, 1), stream=stream)
    else:
        target = gpuarray.empty(n, dtype=a_gpu, allocator=alloc)
        idx = gpuarray.empty(n, dtype=np.uint32, allocator=alloc)
        row_kernel(a_gpu, target, idx, np.uint32(m), np.uint32(n),
                block=(32, 1, 1), grid=(n, 1, 1), stream=stream)
    return target, idx
Beispiel #21
0
	def _build_synth_assignments(self,f_lo,phase_offset=0,tag='a'):
		"""
		Build phase increment words for synthesizer.
		
		Parameters
		----------
		f_lo : float
		    LO frequency.
		phase_offset : float
		    LO phase offset in radians.
		tag : string
		    Tag associated with the downconverter, should be one of
		    {'a','b','c','d','e','f'}. Default is 'a'.
		
		Returns
		-------
		assign_synth : dict
		    Each (key,val) pair can be used to assign devices as in
		    roach2.write_int(key,val)
		"""
		# calculate discretized parameter values
		phase_res = 2**self.PHASE_LOOKUP_DEPTH
		mask = phase_res-1
		dphi = uint32(f_lo/self.ADC_SAMPLE_RATE*phase_res)
		dphi_demux = uint32((dphi*self.DEMUX) % phase_res)
		phi0 = uint32(phase_offset*phase_res/(2*pi))
		# build single 30bit value
		word = ((dphi_demux&mask)<<(self.PHASE_LOOKUP_DEPTH*2)) | ((dphi&mask)<<self.PHASE_LOOKUP_DEPTH) | (phi0&mask)
		assign_synth = {'ddc_1st_' + tag + '_synth_input_dphi':word}
		return assign_synth
Beispiel #22
0
 def getTimeBaseIndices(self, name, tBegin, tEnd):
     """ Return time indices of name corresponding to tBegin and tEnd """
     if not self.status:
         raise Exception('Shotfile not open!')
     try:
         sigName = ctypes.c_char_p(name)
     except TypeError:
         sigName = ctypes.c_char_p(name.encode())
     error = ctypes.c_int32(0)
     info = self.getTimeBaseInfo(name)
     if tEnd < tBegin:
         temp = tEnd
         tEnd = tBegin
         tBegin = temp
     if tBegin < info.tBegin:
         tBegin = info.tBegin
     if tEnd > info.tEnd:
         tEnd = info.tEnd
     try:
         time1 = ctypes.c_float(tBegin)
     except TypeError:
         time1 = ctypes.c_float(tBegin.value)
     try:
         time2 = ctypes.c_float(tEnd)
     except TypeError:
         time2 = ctypes.c_float(tEnd.value)
     k1 = ctypes.c_uint32(0)
     k2 = ctypes.c_uint32(0)
     lname = ctypes.c_uint64(len(name))
     __libddww__.ddtindex_(ctypes.byref(error), ctypes.byref(self.diaref), sigName, ctypes.byref(time1), 
                           ctypes.byref(time2), ctypes.byref(k1), ctypes.byref(k2), lname)
     getError(error.value)
     return numpy.uint32(k1.value), numpy.uint32(k2.value)
Beispiel #23
0
	def decode(self, server, block_header, target, job_id = None, extranonce2 = None):
		if block_header:
			job = Object()
	
			binary_data = block_header.decode('hex')
			data0 = np.zeros(64, np.uint32)
			data0 = np.insert(data0, [0] * 16, unpack('IIIIIIIIIIIIIIII', binary_data[:64]))
	
			job.target	  = np.array(unpack('IIIIIIII', target.decode('hex')), dtype=np.uint32)
			job.header	  = binary_data[:68]
			job.merkle_end  = np.uint32(unpack('I', binary_data[64:68])[0])
			job.time		= np.uint32(unpack('I', binary_data[68:72])[0])
			job.difficulty  = np.uint32(unpack('I', binary_data[72:76])[0])
			job.state	   = sha256(STATE, data0)
			job.f		   = np.zeros(8, np.uint32)
			job.state2	  = partial(job.state, job.merkle_end, job.time, job.difficulty, job.f)
			job.targetQ	 = 2**256 / int(''.join(list(chunks(target, 2))[::-1]), 16)
			job.job_id	  = job_id
			job.extranonce2 = extranonce2
			job.server	  = server
	
			calculateF(job.state, job.merkle_end, job.time, job.difficulty, job.f, job.state2)

			if job.difficulty != self.difficulty:
				self.set_difficulty(job.difficulty)
	
			return job
Beispiel #24
0
	def calculate_pressure(self):
		# Calculate atmospheric pressure in [Pa]
		self.B6 = self.B5 - 4000
		print("B6=",self.B6)
		X1 = (self.B2 * (self.B6 * self.B6 / 2**12)) / 2**11
		print("X1=",X1)
		X2 = self.AC2 * self.B6 / 2**11
		print("X2=",X2)
		X3 = X1 + X2
		print("X3=",X3)
		self.B3 = (((self.AC1 * 4 + X3) << self.BMP183_CMD['OVERSAMPLE_3']) + 2 ) / 4
		print("B3=",self.B3)
		X1 = self.AC3 * self.B6 / 2**13
		print("X1=",X1)
		X2 = (self.B1 * (self.B6 * self.B6 / 2**12)) / 2**16
		print("X2=",X2)
		X3 = ((X1 + X2) + 2) / 2**2
		print("X3=",X3)
		self.B4 = numpy.uint32 (self.AC4 * (X3 + 32768) / 2**15)
		print("B4=",self.B4)
		self.B7 = (numpy.uint32 (self.UP) - self.B3) * (50000 >> self.BMP183_CMD['OVERSAMPLE_3'])
		print("B7=",self.B7)
		p = numpy.uint32 ((self.B7 * 2) / self.B4)
		print("p=",p)
		X1 = (p / 2**8) * ( p / 2**8)
		print("X1=",X1)
		X1 = int (X1 * 3038) / 2**16
		print("X1=",X1)
		X2 = int (-7357 * p) / 2**16
		print("X2=",X2)
		self.pressure = p + (X1 + X2 +3791) / 2**4
		print("pressure=",self.pressure)
Beispiel #25
0
def communion_encode(msg):
    assert msg["mode"] in ("request", "response")
    m = 'SEAMLESS'.encode()
    tip = b'\x00' if msg["mode"] == "request" else b'\x01'
    m += tip

    m += np.uint32(msg["id"]).tobytes()
    remainder = msg.copy()
    remainder.pop("mode")
    remainder.pop("id")
    remainder.pop("content")    
    if len(remainder.keys()):
        rem = json.dumps(remainder).encode()
        nrem = np.uint32(len(rem))
        m += nrem
        m += rem
    else:
        m += b'\x00\x00\x00\x00'
    content = msg["content"]
    if content is None:
        m += b'\x00'
    else:
        assert isinstance(content, (str, bytes, bool)), content
        if isinstance(content, bool):
            is_str = b'\x01'
        else:
            is_str = b'\x03' if isinstance(content, str) else b'\x02'
        m += is_str
        if isinstance(content, str):
            content = content.encode()
        elif isinstance(content, bool):
            content = b'\x01' if content else b'\x00'
        m += content
    #assert communion_decode(m) == msg, (communion_decode(m), msg)
    return m
Beispiel #26
0
 def set_dac(self):
     @command()
     def set_dac_data(self, data):
         pass
     dac_data_1 = np.uint32(np.mod(np.floor(8192 * self.dac[0, :]) + 8192, 16384) + 8192)
     dac_data_2 = np.uint32(np.mod(np.floor(8192 * self.dac[1, :]) + 8192, 16384) + 8192)
     set_dac_data(self, dac_data_1 + 65536 * dac_data_2)
Beispiel #27
0
 def set_demod(self, data):
     @command()
     def set_demod_buffer(self, data): 
         pass
     data1 = np.uint32(np.mod(np.floor(8192 * data[0, :]) + 8192,16384) + 8192)
     data2 = np.uint32(np.mod(np.floor(8192 * data[1, :]) + 8192,16384) + 8192)
     set_demod_buffer(self, data1 + data2 * 2**16)
Beispiel #28
0
def color_count(image):
    '''Considering a (w,h,3) image of (dtype=uint8),
       compute the number of unique colors
    
       Encoding (i,j,k) into single index N = i+R*j+R*C*k
       Decoding N into (i,j,k) = (N-k*R*C-j*R, (N-k*R*C)/R, N/(R*C))
       using integer division\n

        Inputs:  image
        Returns: NumPy array of unique colors,
                 number of pixels of each unique color in image
    '''
    #Need to convert image to uint32 before multiplication so numbers are not truncated
    F = np.uint32(image[...,0])*256*256 + np.uint32(image[...,1])*256 + np.uint32(image[...,2])
    unique, counts = np.unique(F, return_counts=True)
    colors = np.empty(shape=(len(unique),3),dtype=np.uint32)
    numcol = np.empty(len(unique),dtype=np.uint32)
    i = 0
    for col,num in zip(unique,counts):
        R = col/(256*256)
        G = (col-R*256*256)/256
        B = (col-R*256*256-G*256)
        colors[i] = (R,G,B)
        numcol[i] = num
        i+=1
    return colors, numcol
Beispiel #29
0
 def createSplitPerm(self, size, subset_ratio=0.8, seed=None):
     rng = np.random.RandomState(np.uint32((time.time())))
     if seed is not None:
         rng.seed(np.uint32(seed))
     perm = rng.permutation(size)
     k = int(size * subset_ratio)
     return perm[:k]
 def __extractRegionIndices(self):
     ''' Function generates, and returns, a dictionary of indices for regions in
     self.regionFile.'''
     # Create variable to store region indices and loop through file
     regionIndices = {}
     with open(self.regionFile, 'r') as infile:
         for line in infile:
             # Extract region data and find indices of matching bins
             chrom, start, end, region = line.strip().split('\t')
             acceptableBins = (
                 (self.binChr == chrom)
                 & (self.binStart >= np.uint32(start))
                 & (self.binEnd <= np.uint32(end)))
             indices = np.where(acceptableBins)[0]
             # Add region indices to dictionary
             if region in regionIndices:
                 regionIndices[region] = np.concatenate(
                     (regionIndices[region], indices))
             else:
                 regionIndices[region] = indices
     # Check region dictionary for absent or erronous regions
     for region in regionIndices:
         # Extract indices, sort, and update dictionary
         indices = regionIndices[region]
         indices.sort()
         regionIndices[region] = indices
         # Check for absent or duplicate indices
         if len(indices) == 0:
             raise IOError('{} has no bins'.format(region))
         if len(set(indices)) != len(indices):
             raise IOError('{} has overlapping segments'.format(region))
     # Return region index data
     return(regionIndices)
Beispiel #31
0
def xorshift32(t):
    x=t
    x=x^(x<<np.uint32(13))
    x=x^(x>>np.uint32(17))
    x=x^(x<<np.uint32(5))
    return np.uint32(x)
Beispiel #32
0
    def _pq_compute_single_core(proc_id, gt_jsons_set, pred_jsons_set,
                                gt_pans_set, pred_pans_set, gt_image_jsons_set,
                                categories):
        OFFSET = 256 * 256 * 256
        VOID = 0
        pq_stat = PQStat()
        for idx, (gt_json, pred_json, gt_pan, pred_pan,
                  gt_image_json) in enumerate(
                      zip(gt_jsons_set, pred_jsons_set, gt_pans_set,
                          pred_pans_set, gt_image_jsons_set)):
            # if idx % 100 == 0:
            #     logger.info('Compute pq -> Core: {}, {} from {} images processed'.format(proc_id, idx, len(gt_jsons_set)))
            gt_pan, pred_pan = np.uint32(gt_pan), np.uint32(pred_pan)
            pan_gt = gt_pan[:, :,
                            0] + gt_pan[:, :, 1] * 256 + gt_pan[:, :,
                                                                2] * 256 * 256
            pan_pred = pred_pan[:, :,
                                0] + pred_pan[:, :,
                                              1] * 256 + pred_pan[:, :,
                                                                  2] * 256 * 256

            gt_segms = {el['id']: el for el in gt_json['segments_info']}
            pred_segms = {el['id']: el for el in pred_json['segments_info']}

            # predicted segments area calculation + prediction sanity checks
            pred_labels_set = set(el['id']
                                  for el in pred_json['segments_info'])
            labels, labels_cnt = np.unique(pan_pred, return_counts=True)
            for label, label_cnt in zip(labels, labels_cnt):
                if label not in pred_segms:
                    if label == VOID:
                        continue
                    raise KeyError(
                        'In the image with ID {} segment with ID {} is presented in PNG and not presented in JSON.'
                        .format(gt_ann['image_id'], label))
                pred_segms[label]['area'] = label_cnt
                pred_labels_set.remove(label)
                if pred_segms[label]['category_id'] not in categories:
                    raise KeyError(
                        'In the image with ID {} segment with ID {} has unknown category_id {}.'
                        .format(gt_ann['image_id'], label,
                                pred_segms[label]['category_id']))
            if len(pred_labels_set) != 0:
                raise KeyError(
                    'In the image with ID {} the following segment IDs {} are presented in JSON and not presented in PNG.'
                    .format(gt_ann['image_id'], list(pred_labels_set)))

            # confusion matrix calculation
            pan_gt_pred = pan_gt.astype(np.uint64) * OFFSET + pan_pred.astype(
                np.uint64)
            gt_pred_map = {}
            labels, labels_cnt = np.unique(pan_gt_pred, return_counts=True)
            for label, intersection in zip(labels, labels_cnt):
                gt_id = label // OFFSET
                pred_id = label % OFFSET
                gt_pred_map[(gt_id, pred_id)] = intersection

            # count all matched pairs
            gt_matched = set()
            pred_matched = set()
            tp = 0
            fp = 0
            fn = 0

            for label_tuple, intersection in gt_pred_map.items():
                gt_label, pred_label = label_tuple
                if gt_label not in gt_segms:
                    continue
                if pred_label not in pred_segms:
                    continue
                if gt_segms[gt_label]['iscrowd'] == 1:
                    continue
                if gt_segms[gt_label]['category_id'] != pred_segms[pred_label][
                        'category_id']:
                    continue

                union = pred_segms[pred_label]['area'] + gt_segms[gt_label][
                    'area'] - intersection - gt_pred_map.get(
                        (VOID, pred_label), 0)
                iou = intersection / union
                if iou > 0.5:
                    pq_stat[gt_segms[gt_label]['category_id']].tp += 1
                    pq_stat[gt_segms[gt_label]['category_id']].iou += iou
                    gt_matched.add(gt_label)
                    pred_matched.add(pred_label)
                    tp += 1

            # count false positives
            crowd_labels_dict = {}
            for gt_label, gt_info in gt_segms.items():
                if gt_label in gt_matched:
                    continue
                # crowd segments are ignored
                if gt_info['iscrowd'] == 1:
                    crowd_labels_dict[gt_info['category_id']] = gt_label
                    continue
                pq_stat[gt_info['category_id']].fn += 1
                fn += 1

            # count false positives
            for pred_label, pred_info in pred_segms.items():
                if pred_label in pred_matched:
                    continue
                # intersection of the segment with VOID
                intersection = gt_pred_map.get((VOID, pred_label), 0)
                # plus intersection with corresponding CROWD region if it exists
                if pred_info['category_id'] in crowd_labels_dict:
                    intersection += gt_pred_map.get(
                        (crowd_labels_dict[pred_info['category_id']],
                         pred_label), 0)
                # predicted segment is ignored if more than half of the segment correspond to VOID and CROWD regions
                if intersection / pred_info['area'] > 0.5:
                    continue
                pq_stat[pred_info['category_id']].fp += 1
                fp += 1
        # logger.info('Compute pq -> Core: {}, all {} images processed'.format(proc_id, len(gt_jsons_set)))
        return pq_stat
Beispiel #33
0
    def _converter_2ch_single_core(proc_id, pan_2ch_set, color_gererator):
        sys.path.insert(
            0,
            os.path.join(os.path.abspath(os.path.dirname(__file__)), '..',
                         '..', 'lib', 'dataset_devkit'))
        from panopticapi.utils import rgb2id
        OFFSET = 1000
        VOID = 255
        annotations, pan_all = [], []
        for idx in range(len(pan_2ch_set)):
            pan_2ch = np.uint32(pan_2ch_set[idx])
            pan = OFFSET * pan_2ch[:, :, 0] + pan_2ch[:, :, 1]
            pan_format = np.zeros((pan_2ch.shape[0], pan_2ch.shape[1], 3),
                                  dtype=np.uint8)

            l = np.unique(pan)
            segm_info = []
            for el in l:
                sem = el // OFFSET
                if sem == VOID:
                    continue
                mask = pan == el
                if vis_panoptic:
                    color = color_gererator.categories[sem]['color']
                else:
                    color = color_gererator.get_color(sem)
                pan_format[mask] = color
                index = np.where(mask)
                x = index[1].min()
                y = index[0].min()
                width = index[1].max() - x
                height = index[0].max() - y
                segm_info.append({
                    "category_id":
                    sem.item(),
                    "iscrowd":
                    0,
                    "id":
                    int(rgb2id(color)),
                    "bbox": [x.item(),
                             y.item(),
                             width.item(),
                             height.item()],
                    "area":
                    mask.sum().item()
                })
            annotations.append({"segments_info": segm_info})
            if vis_panoptic:
                pan_format = Image.fromarray(pan_format)
                draw = ImageDraw.Draw(pan_format)
                for el in l:
                    sem = el // OFFSET
                    if sem == VOID:
                        continue
                    if color_gererator.categories[sem][
                            'isthing'] and el % OFFSET != 0:
                        mask = ((pan == el) * 255).astype(np.uint8)
                        _, contour, _ = cv2.findContours(
                            mask.copy(), cv2.RETR_EXTERNAL,
                            cv2.CHAIN_APPROX_NONE)
                        for c in contour:
                            c = c.reshape(-1).tolist()
                            if len(c) < 4:
                                print('warning: invalid contour')
                                continue
                            draw.line(c, fill='white', width=2)
                pan_format = np.array(pan_format)
            pan_all.append(pan_format)
        return annotations, pan_all
Beispiel #34
0
 def process(self, val):
     if self.processNumber(val):
         self.trace(self.__format.format(np.uint32(self.value)))
         return True
     else:
         return False
Beispiel #35
0
def img_spikes_from_to(path,
                       num_neurons,
                       start_file_idx,
                       end_file_idx,
                       on_time_ms,
                       off_time_ms,
                       start_time,
                       delete_before=0,
                       ext='txt.bz2',
                       noise=True,
                       noise_prob=0.):
    start = start_file_idx
    end = end_file_idx
    spikes = []

    spk_files = glob.glob(os.path.join(path, "*.%s" % (ext)))
    spk_files.sort()
    # print(path)
    # print(len(spk_files))
    f = None
    spks = [[] for i in range(num_neurons)]
    if len(spk_files) == 0:
        raise Exception("Unable to locate files in dir:\n\t\t%s" % path)

    t = float(start_time)
    max_t = start_time
    for fname in spk_files[start:end]:

        # print(fname)
        # spks[:] = [ [] for i in range(num_neurons) ]
        n_lines = file_len(fname, compressed=True)

        # f = open(fname, 'r')

        f = bz2.BZ2File(fname, 'rb')
        line_n = 2
        np.random.seed()

        for line in f:
            vals = line.split(' ')
            nrn_id, spk_time = np.uint32(float(vals[0])), int(float(vals[1]))

            line_n += 1

            rand_dt = np.random.randint(-2, 3)  #[-2, -1, 0, 1, 2] or [..., 3)

            # print("id = %s, t = %s"%(vals[0], vals[1]))
            if nrn_id > num_neurons:
                raise Exception(
                    "Neuron Id from file is greater than number of "
                    "neurons given in the argument (%d > %d)" %
                    (nrn_id, num_neurons))

            if noise:
                np.random.seed()
                dice_roll = np.random.uniform(0., 1.)
                if dice_roll <= noise_prob:
                    continue

            rspk_time = spk_time + rand_dt

            if rspk_time <= delete_before:
                continue

            rspk_time += t
            rspk_time -= delete_before

            if rspk_time < 0:
                continue

            sys.stdout.write(
                "\r%05.2f%%\tbase t %06d\tneuron %06d\ttime %06d\tmax t %06d" %
                (100. * float(line_n) / n_lines, t, nrn_id, rspk_time,
                 on_time_ms))
            sys.stdout.flush()

            if rspk_time > on_time_ms:
                break

            if rspk_time in spks[nrn_id]:
                continue

            spks[nrn_id].append(rspk_time)

            if rspk_time > max_t:
                max_t = rspk_time

        f.close()
        # print(fname, t)
        # t += on_time_ms + off_time_ms
        t = max_t
        t += off_time_ms
        # spikes.append(spks)
        sys.stdout.write("\n")
        sys.stdout.flush()

    for nrn_id in range(num_neurons):
        nspk = len(spks[nrn_id])

        if nspk == 0:
            continue

        spks[nrn_id][:] = list(set(spks[nrn_id]))  # remove duplicates

        # random noise (spike loss)
        # np.random.seed(np.uint32(time.time()*(10**10)))
        # to_remove = np.random.choice(np.arange(nspk), size=int(nspk*0.1),
        # replace=False)
        # for i in sorted(to_remove, reverse=True):
        # del spks[nrn_id][i]

        spks[nrn_id].sort()

    print()
    return spks
Beispiel #36
0
def qc_fpga_task(fpga_trials, alf_trials):
    """
    :fpga_task is the dictionary output of
    ibllib.io.extractors.ephys_fpga.extract_behaviour_sync
    : bpod_trials is the dictionary output of ibllib.io.extractors.ephys_trials.extract_all
    : alf_trials is the ALF _ibl_trials object after extraction (alf.io.load_object)
    :return: qc_session, qc_trials, True means QC passes while False indicates a failure
    """

    GOCUE_STIMON_DELAY = 0.01  # -> 0.1
    FEEDBACK_STIMFREEZE_DELAY = 0.01  # -> 0.1
    VALVE_STIM_OFF_DELAY = 1
    VALVE_STIM_OFF_JITTER = 0.1
    ITI_IN_STIM_OFF_JITTER = 0.1
    ERROR_STIM_OFF_DELAY = 2
    ERROR_STIM_OFF_JITTER = 0.1
    RESPONSE_FEEDBACK_DELAY = 0.0005

    def strictly_after(t0, t1, threshold):
        """ returns isafter, iswithinthreshold"""
        return (t1 - t0) > 0, np.abs((t1 - t0)) <= threshold

    ntrials = fpga_trials['stimOn_times'].size
    qc_trials = Bunch({})
    """
    First Check consistency of the dataset: whithin each trial, all events happen after trial
    start should not be NaNs and increasing. This is not a QC but an assertion.
    """
    status = True
    for k in [
            'response_times', 'stimOn_times', 'response_times',
            'goCueTrigger_times', 'goCue_times', 'feedback_times'
    ]:
        if k.endswith('_bpod'):
            tstart = alf_trials['intervals_bpod'][:, 0]
        else:
            tstart = alf_trials['intervals'][:, 0]
        selection = ~np.isnan(alf_trials[k])
        status &= np.all(alf_trials[k][selection] - tstart[selection] > 0)
        status &= np.all(np.diff(alf_trials[k][selection]) > 0)
    assert status
    """
    This part of the function uses only fpga_trials information
    """
    # check number of feedbacks: should always be one
    qc_trials['n_feedback'] = (
        np.uint32(~np.isnan(fpga_trials['valveOpen_times'])) +
        np.uint32(~np.isnan(fpga_trials['errorCue_times'])))

    # check for non-Nans
    qc_trials['stimOn_times_nan'] = ~np.isnan(fpga_trials['stimOn_times'])
    qc_trials['goCue_times_nan'] = ~np.isnan(fpga_trials['goCue_times'])

    # stimOn before goCue
    qc_trials['stimOn_times_before_goCue_times'], qc_trials['stimOn_times_goCue_times_delay'] =\
        strictly_after(fpga_trials['stimOn_times'], fpga_trials['goCue_times'], GOCUE_STIMON_DELAY)

    # stimFreeze before feedback
    qc_trials['stim_freeze_before_feedback'], qc_trials['stim_freeze_feedback_delay'] = \
        strictly_after(fpga_trials['stimFreeze_times'], fpga_trials['feedback_times'],
                       FEEDBACK_STIMFREEZE_DELAY)

    # stimOff 1 sec after valve, with 0.1 as acceptable jitter
    qc_trials['stimOff_delay_valve'] = np.less(
        np.abs(fpga_trials['stimOff_times'] - fpga_trials['valveOpen_times'] -
               VALVE_STIM_OFF_DELAY),
        VALVE_STIM_OFF_JITTER,
        out=np.ones(ntrials, dtype=np.bool),
        where=~np.isnan(fpga_trials['valveOpen_times']))

    # iti_in whithin 0.01 sec of stimOff
    qc_trials['iti_in_delay_stim_off'] = \
        np.abs(fpga_trials['stimOff_times'] - fpga_trials['itiIn_times']) < ITI_IN_STIM_OFF_JITTER

    # stimOff 2 secs after errorCue_times with jitter
    # noise off happens 2 secs after stimm, with 0.1 as acceptable jitter
    qc_trials['stimOff_delay_noise'] = np.less(
        np.abs(fpga_trials['stimOff_times'] - fpga_trials['errorCue_times'] -
               ERROR_STIM_OFF_DELAY),
        ERROR_STIM_OFF_JITTER,
        out=np.ones(ntrials, dtype=np.bool),
        where=~np.isnan(fpga_trials['errorCue_times']))
    """
    This part uses only alf_trials information
    """
    # TEST  Response times (from session start) should be increasing continuously
    #       Note: RT are not durations but time stamps from session start
    #       1. check for non-Nans
    qc_trials['response_times_nan'] = ~np.isnan(alf_trials['response_times'])
    #       2. check for positive increase
    qc_trials['response_times_increase'] = \
        np.diff(np.append([0], alf_trials['response_times'])) > 0
    # TEST  Response times (from goCue) should be positive
    qc_trials['response_times_goCue_times_diff'] = \
        alf_trials['response_times'] - alf_trials['goCue_times'] > 0
    # TEST  1. Response_times should be before feedback
    qc_trials['response_before_feedback'] = \
        alf_trials['feedback_times'] - alf_trials['response_times'] > 0
    #       2. Delay between wheel reaches threshold (response time) and
    #       feedback is 100us, acceptable jitter 500 us
    qc_trials['response_feedback_delay'] = \
        alf_trials['feedback_times'] - alf_trials['response_times'] < RESPONSE_FEEDBACK_DELAY

    # Test output at session level
    qc_session = {k: np.all(qc_trials[k]) for k in qc_trials}

    return qc_session, qc_trials
Beispiel #37
0
        da.arange(10, chunks=-1, whatsthis=1)

    assert da.arange(10).chunks == ((10, ), )


@pytest.mark.parametrize(
    "start,stop,step,dtype",
    [
        (0, 1, 1, None),  # int64
        (1.5, 2, 1, None),  # float64
        (1, 2.5, 1, None),  # float64
        (1, 2, 0.5, None),  # float64
        (np.float32(1), np.float32(2), np.float32(1),
         None),  # promoted to float64
        (np.int32(1), np.int32(2), np.int32(1), None),  # promoted to int64
        (np.uint32(1), np.uint32(2), np.uint32(1), None),  # promoted to int64
        (np.uint64(1), np.uint64(2), np.uint64(1),
         None),  # promoted to float64
        (np.uint32(1), np.uint32(2), np.uint32(1), np.uint32),
        (np.uint64(1), np.uint64(2), np.uint64(1), np.uint64),
        # numpy.arange gives unexpected results
        # https://github.com/numpy/numpy/issues/11505
        # (1j, 2, 1, None),
        # (1, 2j, 1, None),
        # (1, 2, 1j, None),
        # (1+2j, 2+3j, 1+.1j, None),
    ],
)
def test_arange_dtypes(start, stop, step, dtype):
    a_np = np.arange(start, stop, step, dtype=dtype)
    a_da = da.arange(start, stop, step, dtype=dtype, chunks=-1)
Beispiel #38
0
def get_mydata_correl(data_pars):
    state_num = data_pars['state_num']
    time_len = data_pars['time_len']
    signal_dimension = data_pars['signal_dimension']
    CNR = data_pars['CNR']
    window_len = data_pars['window_len']
    half_window_len = data_pars['half_window_len']
    a = np.ones(shape=(state_num, state_num))
    alpha = np.ones(10) * 10
    alpha[5:] = 1
    base_prob = np.random.dirichlet(alpha) * 0.1
    for t in range(state_num):
        a[t, :] = base_prob
        a[t, t] += 0.9

    # simulate states
    state = np.zeros(time_len, dtype=np.uint8)
    p = np.random.uniform()
    state[0] = np.floor(p * state_num)
    for t in range(0, time_len - 1):
        p = np.random.uniform()
        for s in range(state_num):
            if (p <= np.sum(a[state[t], :s + 1])):
                state[t + 1] = s
                break

    freq = np.zeros(state_num)
    for t in range(state_num):
        freq[t] = np.sum(state == t)
    loading = np.random.randint(-1, 2, size=(state_num, signal_dimension))

    cov = np.zeros((state_num, signal_dimension, signal_dimension))
    for t in range(state_num):
        cov[t, :, :] = np.matmul(np.transpose([loading[t, :]]),
                                 [loading[t, :]])

    # generate BOLD signal
    signal = np.zeros((time_len, signal_dimension))
    for t in range(0, time_len):
        signal[t, :] = np.random.multivariate_normal(
            np.zeros((signal_dimension)), cov[state[t], :, :])
    signal += np.random.normal(size=signal.shape) / CNR
    original_dim = np.uint32(signal_dimension * (signal_dimension - 1) / 2)

    x_train = np.zeros(shape=(time_len - window_len * 2,
                              np.uint32(original_dim)))
    sum_corr = np.zeros(shape=(state_num, original_dim))
    occupancy = np.zeros(state_num)

    for t in range(window_len, time_len - window_len):
        corr_matrix = np.corrcoef(
            np.transpose(signal[t - half_window_len:t + half_window_len +
                                1, :]))
        upper = corr_matrix[np.triu_indices(signal_dimension, k=1)]
        x_train[t - window_len, :] = np.squeeze(upper)
        if (np.sum(state[t - half_window_len:t + half_window_len +
                         1] == state[t]) == window_len):
            sum_corr[state[t], :] += x_train[t - window_len, :]
            occupancy[state[t]] += 1

    return x_train
Beispiel #39
0
n_workers = (cat.size[0] * cat.size[1], )
prg.sep_channels(queue, n_workers, None, pix_buf, r_buf, g_buf, b_buf)

wgs = cl.Kernel(prg, 'blur_channel').get_work_group_info(
    cl.kernel_work_group_info.WORK_GROUP_SIZE,
    ctx.get_info(cl.context_info.DEVICES)[0])
n_local = (16, 12)
if n_local[0] * n_local[1] > wgs:
    print "Reduce the n_local variable size please!"

nn_buf = cl.LocalMemory((n_local[0] + 2) * (n_local[1] + 2))
n_workers = (cat.size[0], cat.size[1])

prg.blur_channel(queue, n_workers, n_local, r_buf, rb_buf, nn_buf,
                 np.uint32(cat.size[0]), np.uint32(cat.size[1]))
prg.blur_channel(queue, n_workers, n_local, g_buf, gb_buf, nn_buf,
                 np.uint32(cat.size[0]), np.uint32(cat.size[1]))
prg.blur_channel(queue, n_workers, n_local, b_buf, bb_buf, nn_buf,
                 np.uint32(cat.size[0]), np.uint32(cat.size[1]))

#show_single_buffer(queue, n_pix, r_buf)
show_single_buffer(queue, n_pix, rb_buf)

n_workers = (cat.size[0] * cat.size[1], )
prg.mer_channels(queue, n_workers, None, pixb_buf, rb_buf, gb_buf, bb_buf)

cl.enqueue_copy(queue, result, pixb_buf)

im_data = [(p[0], p[1], p[2], p[3]) for p in result]
cat.putdata(im_data)
Beispiel #40
0
    

    ind=ind+h2

h=4
h2=h*h

static_plain=True

if (static_plain):
	s_plain = "HELLO WORLD!!!!!!!!!"
	size_mesg = len(s_plain)
else:	
	size_mesg=36

lenH=np.uint32((size_mesg+h2-1)/h2)

print(lenH)
rp=1    

seed=123 #np.uint32(time.time())
#seed=xorshift32(seed)
#print(seed)
#seed=xorshift32(seed)
#print(seed)
#sys.exit(0)

DK = np.empty([LSC_SKEY],dtype=np.uint8)
Nonce = np.empty([LSC_SKEY],dtype=np.uint8)
sc = np.empty([256],dtype=np.uint8)
PboxRM = np.empty([h2],dtype=np.uint8)
Beispiel #41
0
'''chroma.bvh: Bounding Volume Hierarchy generation and manipulation.'''

import numpy as np
from pycuda.gpuarray import vec

uint4 = vec.uint4  # pylint: disable-msg=C0103,E1101

CHILD_BITS = 28
NCHILD_MASK = np.uint32(0xFFFF << CHILD_BITS)


def unpack_nodes(nodes):
    '''Creates a numpy record array with the contents of nodes
    array unpacked into separate fields.

      ``nodes``: ndarray(shape=n, dtype=uint4)
         BVH node array in the packed x,y,z,w format.

    Returns ndarray(shape=n, dtype=[('xlo', np.uint16), ('xhi', np.uint16),
                                    ('ylo', np.uint16), ('yhi', np.uint16),
                                    ('zlo', np.uint16), ('zhi', np.uint16),
                                    ('child', np.uint32), ('nchild', np.uint16)])
    '''
    unpacked_dtype = np.dtype([('xlo', np.uint16), ('xhi', np.uint16),
                               ('ylo', np.uint16), ('yhi', np.uint16),
                               ('zlo', np.uint16), ('zhi', np.uint16),
                               ('child', np.uint32), ('nchild', np.uint16)])
    unpacked = np.empty(shape=len(nodes), dtype=unpacked_dtype)

    for axis in ['x', 'y', 'z']:
        unpacked[axis + 'lo'] = nodes[axis] & 0xFFFF
def feature_vecs_DOC_W2V(train_pos, train_neg, test_pos, test_neg):
    """
    Returns the feature vectors for all text in the train and test datasets.
    """
    # Load the pre-trained word2vec model
    word2vec_model = word2vec.Word2Vec.load(path_to_pretrained_w2v)

    # Doc2Vec requires TaggedDocument objects as input.
    # Turn the datasets from lists of words to lists of TaggedDocument objects.
    labeled_train_pos = [
        TaggedDocument(words, ["TRAIN_POS_" + str(i)])
        for i, words in enumerate(train_pos)
    ]
    labeled_train_neg = [
        TaggedDocument(words, ["TRAIN_NEG_" + str(i)])
        for i, words in enumerate(train_neg)
    ]
    labeled_test_pos = [
        TaggedDocument(words, ["TEST_POS_" + str(i)])
        for i, words in enumerate(test_pos)
    ]
    labeled_test_neg = [
        TaggedDocument(words, ["TEST_NEG_" + str(i)])
        for i, words in enumerate(test_neg)
    ]

    sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg

    # Use modified doc2vec codes for applying the pre-trained word2vec model
    model = doc2vec_modified.Doc2Vec(dm=0,
                                     dm_mean=1,
                                     alpha=0.025,
                                     min_alpha=0.0001,
                                     min_count=1,
                                     size=1000,
                                     hs=1,
                                     workers=4,
                                     train_words=False,
                                     train_lbls=True)
    model.reset_weights()

    # Copy wiki word2vec model into doc2vec model
    model.vocab = word2vec_model.vocab
    model.syn0 = word2vec_model.syn0
    model.syn1 = word2vec_model.syn1
    model.index2word = word2vec_model.index2word

    print("# of pre-trained vocab = " + str(len(model.vocab)))

    # Extract sentence labels for the training and test data
    train_pos_labels = [
        "TRAIN_POS_" + str(i) for i in range(len(labeled_train_pos))
    ]
    train_neg_labels = [
        "TRAIN_NEG_" + str(i) for i in range(len(labeled_train_neg))
    ]
    test_pos_labels = [
        "TEST_POS_" + str(i) for i in range(len(labeled_test_pos))
    ]
    test_neg_labels = [
        "TEST_NEG_" + str(i) for i in range(len(labeled_test_neg))
    ]

    sentence_labels = train_pos_labels + train_neg_labels + test_pos_labels + test_neg_labels

    new_syn0 = empty((len(sentences), model.layer1_size), dtype=REAL)
    new_syn1 = empty((len(sentences), model.layer1_size), dtype=REAL)

    syn_index = 0

    # Initialize and add a vector of syn0 (i.e. input vector) and syn1 (i.e. output vector) for a vector of a label
    for label in sentence_labels:
        v = model.append_label_into_vocab(
            label)  # I made this function in the doc2vec code

        random.seed(
            uint32(model.hashfxn(model.index2word[v.index] + str(model.seed))))

        new_syn0[syn_index] = (random.rand(model.layer1_size) -
                               0.5) / model.layer1_size
        new_syn1[syn_index] = zeros((1, model.layer1_size), dtype=REAL)

        syn_index += 1

    model.syn0 = vstack([model.syn0, new_syn0])
    model.syn1 = vstack([model.syn1, new_syn1])

    model.precalc_sampling()

    # Train the model
    # This may take a bit to run
    for i in range(5):
        start_time = time.time()

        print("Training iteration %d" % (i))
        random.shuffle(sentences)
        model.train(sentences)

        print("Done - Training")
        print("--- %s minutes ---" % ((time.time() - start_time) / 60))
        start_time = time.time()

        # Convert "nan" values into "0" in vectors
        indices_nan = isnan(model.syn0)
        model.syn0[indices_nan] = 0.0

        indices_nan = isnan(model.syn1)
        model.syn1[indices_nan] = 0.0

        # Extract the feature vectors for the training and test data
        train_pos_vec = [
            model.syn0[model.vocab["TRAIN_POS_" + str(i)].index]
            for i in range(len(labeled_train_pos))
        ]
        train_neg_vec = [
            model.syn0[model.vocab["TRAIN_NEG_" + str(i)].index]
            for i in range(len(labeled_train_neg))
        ]
        test_pos_vec = [
            model.syn0[model.vocab["TEST_POS_" + str(i)].index]
            for i in range(len(labeled_test_pos))
        ]
        test_neg_vec = [
            model.syn0[model.vocab["TEST_NEG_" + str(i)].index]
            for i in range(len(labeled_test_neg))
        ]

        print("Done - Extracting the feature vectors")
        print("--- %s minutes ---" % ((time.time() - start_time) / 60))

    # Return the four feature vectors
    return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec
Beispiel #43
0
borealis_site_rawacf_data = OrderedDict([(str(1558583991060), {
    "borealis_git_hash": np.unicode_('v0.2-61-gc13ab34'),
    "experiment_id": np.int64(100000000),
    "experiment_name": np.unicode_('TestScheme9ACFs'),
    "experiment_comment": np.unicode_(''),
    "num_slices": np.int64(1),
    "slice_comment": np.unicode_(''),
    "station": np.unicode_('sas'),
    "num_sequences": np.int64(29),
    "range_sep": np.float32(44.96887),
    "first_range_rtt": np.float32(1200.8307),
    "first_range": np.float32(180.0),
    "rx_sample_rate": np.float64(3333.3333333333335),
    "scan_start_marker": np.bool_(True),
    "int_time": np.float32(3.000395),
    "tx_pulse_len": np.uint32(300),
    "tau_spacing": np.uint32(2400),
    "main_antenna_count": np.uint32(16),
    "intf_antenna_count": np.uint32(4),
    "freq": np.uint32(10500),
    "samples_data_type": np.unicode_('complex float'),
    "pulses": np.array([0, 9, 12, 20, 22, 26, 27]).astype(np.uint32),
    "lags": np.array([[0,  0],
                      [26, 27],
                      [20, 22],
                      [9, 12],
                      [22, 26],
                      [22, 27],
                      [20, 26],
                      [20, 27],
                      [12, 20],
Beispiel #44
0
    def eval(self, pars):

        tic()
        _ctx, queue = card()
        self.res[:] = 0
        cl.enqueue_copy(queue, self.res_b, self.res)
        radius, length, cyl_theta, cyl_phi = \
            [GaussianDispersion(int(pars[base+'_pd_n']), pars[base+'_pd'], pars[base+'_pd_nsigma'])
             for base in GpuCylinder.PD_PARS]

        #Get the weights for each
        radius.value, radius.weight = radius.get_weights(
            pars['radius'], 0, 10000, True)
        length.value, length.weight = length.get_weights(
            pars['length'], 0, 10000, True)
        cyl_theta.value, cyl_theta.weight = cyl_theta.get_weights(
            pars['cyl_theta'], -np.inf, np.inf, False)
        cyl_phi.value, cyl_phi.weight = cyl_phi.get_weights(
            pars['cyl_phi'], -np.inf, np.inf, False)

        #Perform the computation, with all weight points
        sum, norm, norm_vol, vol = 0.0, 0.0, 0.0, 0.0
        size = len(cyl_theta.weight)
        sub = pars['sldCyl'] - pars['sldSolv']

        real = np.float32 if self.qx.dtype == np.dtype(
            'float32') else np.float64
        #Loop over radius, length, theta, phi weight points
        for i in xrange(len(radius.weight)):
            for j in xrange(len(length.weight)):

                vol += radius.weight[i] * length.weight[j] * pow(
                    radius.value[i], 2) * length.value[j]
                norm_vol += radius.weight[i] * length.weight[j]

                for k in xrange(len(cyl_theta.weight)):
                    for l in xrange(len(cyl_phi.weight)):
                        self.prg.CylinderKernel(queue, self.qx.shape, None,
                                                self.qx_b, self.qy_b,
                                                self.res_b, real(sub),
                                                real(radius.value[i]),
                                                real(length.value[j]),
                                                real(pars['scale']),
                                                real(radius.weight[i]),
                                                real(length.weight[j]),
                                                real(cyl_theta.weight[k]),
                                                real(cyl_phi.weight[l]),
                                                real(cyl_theta.value[k]),
                                                real(cyl_phi.value[l]),
                                                np.uint32(self.qx.size),
                                                np.uint32(size))

                        norm += radius.weight[i] * length.weight[
                            j] * cyl_theta.weight[k] * cyl_phi.weight[l]

    # if size > 1:
    #    norm /= math.asin(1.0)
        cl.enqueue_copy(queue, self.res, self.res_b)
        sum = self.res
        if vol != 0.0 and norm_vol != 0.0:
            sum *= norm_vol / vol

        print toc() * 1000, self.qx.shape[0]
        return sum / norm + pars['background']
Beispiel #45
0
    def get_compute_kernels(self, runner, full_output, bulk):
        gpu_rho = runner.gpu_field(self.rho)
        gpu_phi = runner.gpu_field(self.phi)
        gpu_v = runner.gpu_field(self.v)
        gpu_map = runner.gpu_geo_map()

        gpu_dist1a = runner.gpu_dist(0, 0)
        gpu_dist1b = runner.gpu_dist(0, 1)
        gpu_dist2a = runner.gpu_dist(1, 0)
        gpu_dist2b = runner.gpu_dist(1, 1)

        options = 0
        if full_output:
            options |= 1
        if bulk:
            options |= 2

        options = np.uint32(options)
        # Primary.
        args1a = ([gpu_map, gpu_dist1a, gpu_dist1b, gpu_rho, gpu_phi] + gpu_v +
                  [options])
        args1b = ([gpu_map, gpu_dist2a, gpu_dist2b, gpu_rho, gpu_phi] + gpu_v +
                  [options])
        # Secondary.
        args2a = ([gpu_map, gpu_dist1b, gpu_dist1a, gpu_rho, gpu_phi] + gpu_v +
                  [options])
        args2b = ([gpu_map, gpu_dist2b, gpu_dist2a, gpu_rho, gpu_phi] + gpu_v +
                  [options])

        macro_args1 = ([gpu_map, gpu_dist1a, gpu_dist2a, gpu_rho, gpu_phi] +
                       gpu_v + [options])
        macro_args2 = ([gpu_map, gpu_dist1b, gpu_dist2b, gpu_rho, gpu_phi] +
                       gpu_v + [options])

        args_a_signature = 'P' * (len(args1a) - 1) + 'i'
        args_b_signature = 'P' * (len(args1b) - 1) + 'i'
        macro_signature = 'P' * (len(macro_args1) - 1) + 'i'

        if runner.gpu_scratch_space is not None:
            macro_args1.append(runner.gpu_scratch_space)
            macro_args2.append(runner.gpu_scratch_space)
            macro_signature += 'P'

            args1a.append(runner.gpu_scratch_space)
            args2a.append(runner.gpu_scratch_space)
            args1b.append(runner.gpu_scratch_space)
            args2b.append(runner.gpu_scratch_space)
            args_a_signature += 'P'
            args_b_signature += 'P'

        macro = runner.get_kernel(
            'ShanChenPrepareMacroFields',
            macro_args1,
            macro_signature,
            needs_iteration=self.config.needs_iteration_num)

        if self.config.access_pattern == 'AB':
            macro_secondary = runner.get_kernel(
                'ShanChenPrepareMacroFields',
                macro_args2,
                macro_signature,
                needs_iteration=self.config.needs_iteration_num)
            macro_pair = KernelPair(macro, macro_secondary)
        else:
            macro_pair = KernelPair(macro, macro)

        # TODO(michalj): These kernels can actually run in parallel.
        primary = [
            runner.get_kernel('ShanChenCollideAndPropagate0',
                              args1a,
                              args_a_signature,
                              needs_iteration=self.config.needs_iteration_num),
            runner.get_kernel('ShanChenCollideAndPropagate1',
                              args1b,
                              args_b_signature,
                              needs_iteration=self.config.needs_iteration_num)
        ]

        if self.config.access_pattern == 'AB':
            secondary = [
                runner.get_kernel(
                    'ShanChenCollideAndPropagate0',
                    args2a,
                    args_a_signature,
                    needs_iteration=self.config.needs_iteration_num),
                runner.get_kernel(
                    'ShanChenCollideAndPropagate1',
                    args2b,
                    args_b_signature,
                    needs_iteration=self.config.needs_iteration_num)
            ]
            sim_pair = KernelPair(primary, secondary)
        else:
            sim_pair = KernelPair(primary, primary)

        return zip(macro_pair, sim_pair)
 def write_uint32(self, value):
     return self.write(np.uint32(value))
Beispiel #47
0
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/master/LICENSE

from __future__ import absolute_import

import numpy

# used in unmarshaling
kByteCountMask = numpy.int64(0x40000000)
kByteCountVMask = numpy.int64(0x4000)
kClassMask = numpy.int64(0x80000000)
kNewClassTag = numpy.int64(0xFFFFFFFF)

kIsOnHeap = numpy.uint32(0x01000000)
kIsReferenced = numpy.uint32(1 << 4)

kMapOffset = 2

# not used?
kNullTag = 0
kNotDeleted = numpy.uint32(0x02000000)
kZombie = numpy.uint32(0x04000000)
kBitMask = numpy.uint32(0x00FFFFFF)
kDisplacementMask = numpy.uint32(0xFF000000)

############# core/zip/inc/Compression.h

kZLIB = 1
kLZMA = 2
kOldCompressionAlgo = 3
kLZ4 = 4
kZSTD = 5
Beispiel #48
0
def threefry_2x32(keypair, count):
    """Apply the Threefry 2x32 hash.

  Args:
    keypair: a pair of 32bit unsigned integers used for the key.
    count: an array of dtype uint32 used for the counts.

  Returns:
    An array of dtype uint32 with the same shape as `count`.
  """
    # Based on ThreeFry2x32 by phawkins@ in //.../xla/client/lib/prng.cc
    key1, key2 = keypair
    if not lax._dtype(key1) == lax._dtype(key2) == lax._dtype(
            count) == onp.uint32:
        msg = "threefry_2x32 requires uint32 arguments, got {}"
        raise TypeError(
            msg.format([lax._dtype(x) for x in [key1, key2, count]]))

    rotate_left = _make_rotate_left(lax._dtype(count))

    def apply_round(v, rot):
        v = v[:]
        v[0] = v[0] + v[1]
        v[1] = rotate_left(v[1], rot)
        v[1] = v[0] ^ v[1]
        return v

    odd_size = count.size % 2
    if odd_size:
        x = list(np.split(np.concatenate([count.ravel(), onp.uint32([0])]), 2))
    else:
        x = list(np.split(count.ravel(), 2))

    rotations = [13, 15, 26, 6, 17, 29, 16, 24]
    ks = [key1, key2, key1 ^ key2 ^ onp.uint32(0x1BD11BDA)]

    x[0] = x[0] + ks[0]
    x[1] = x[1] + ks[1]

    for r in rotations[:4]:
        x = apply_round(x, r)
    x[0] = x[0] + ks[1]
    x[1] = x[1] + ks[2] + onp.uint32(1)

    for r in rotations[4:]:
        x = apply_round(x, r)
    x[0] = x[0] + ks[2]
    x[1] = x[1] + ks[0] + onp.uint32(2)

    for r in rotations[:4]:
        x = apply_round(x, r)
    x[0] = x[0] + ks[0]
    x[1] = x[1] + ks[1] + onp.uint32(3)

    for r in rotations[4:]:
        x = apply_round(x, r)
    x[0] = x[0] + ks[1]
    x[1] = x[1] + ks[2] + onp.uint32(4)

    for r in rotations[:4]:
        x = apply_round(x, r)
    x[0] = x[0] + ks[2]
    x[1] = x[1] + ks[0] + onp.uint32(5)

    out = np.concatenate(x)
    assert out.dtype == onp.uint32
    return lax.reshape(out[:-1] if odd_size else out, count.shape)
plt.rc('text', usetex=True)
plt.rc('font', **font) # family='serif')
# params = {'text.latex.preamble': ['\usepackage{upgreek}']}
# plt.rcParams.update(params)
plt.rc

###############################
### data in 
nam_data = 'Sedimentation_velocities.dat'


###############################
### load data
data_In = np.loadtxt(nam_data)

n_meas_tot = np.uint32(data_In[:,0]) 
v_front = data_In[:,1] ## front velocity
uv_front = data_In[:,2]
v_xdfa = data_In[:,3]
uv_xdfa = data_In[:,4]

###### list of pumprates
pumprate = [750, 700, 650, 600, 550, 500, 450, 400, 350]
N_rate = len(pumprate)


N_meas = [3, 3, 3, 3, 3, 3, 3, 3, 3]  ### number of measurements per pump rate


##############################
### plot attenuation vs z_tilde 
Beispiel #50
0
    def get_compute_kernels(self, runner, full_output, bulk):
        gpu_rho = runner.gpu_field(self.rho)
        gpu_phi = runner.gpu_field(self.phi)
        gpu_lap = runner.gpu_field(self.phi_laplacian)
        gpu_v = runner.gpu_field(self.v)
        gpu_map = runner.gpu_geo_map()

        gpu_dist1a = runner.gpu_dist(0, 0)
        gpu_dist1b = runner.gpu_dist(0, 1)
        gpu_dist2a = runner.gpu_dist(1, 0)
        gpu_dist2b = runner.gpu_dist(1, 1)

        options = 0
        if full_output:
            options |= 1
        if bulk:
            options |= 2

        if hasattr(
                self,
                '_force_term_for_eq') and self._force_term_for_eq.get(1) == 0:
            phi_args = [gpu_rho, gpu_phi]
        else:
            phi_args = [gpu_phi]

        options = np.uint32(options)
        # Primary.
        args1a = ([gpu_map, gpu_dist1a, gpu_dist1b, gpu_rho, gpu_phi] + gpu_v +
                  [gpu_lap, options])
        args1b = ([gpu_map, gpu_dist2a, gpu_dist2b] + phi_args + gpu_v +
                  [gpu_lap, options])
        # Secondary.
        args2a = ([gpu_map, gpu_dist1b, gpu_dist1a, gpu_rho, gpu_phi] + gpu_v +
                  [gpu_lap, options])
        args2b = ([gpu_map, gpu_dist2b, gpu_dist2a] + phi_args + gpu_v +
                  [gpu_lap, options])

        macro_args1 = [
            gpu_map, gpu_dist1a, gpu_dist2a, gpu_rho, gpu_phi, options
        ]
        macro_args2 = [
            gpu_map, gpu_dist1b, gpu_dist2b, gpu_rho, gpu_phi, options
        ]

        args_a_signature = 'P' * (len(args1a) - 1) + 'i'
        args_b_signature = 'P' * (len(args1b) - 1) + 'i'
        macro_signature = 'P' * (len(macro_args1) - 1) + 'i'

        if runner.gpu_scratch_space is not None:
            macro_args1.append(runner.gpu_scratch_space)
            macro_args2.append(runner.gpu_scratch_space)
            macro_signature += 'P'

            args1a.append(runner.gpu_scratch_space)
            args2a.append(runner.gpu_scratch_space)
            args1b.append(runner.gpu_scratch_space)
            args2b.append(runner.gpu_scratch_space)
            args_a_signature += 'P'
            args_b_signature += 'P'

        macro = runner.get_kernel(
            'FreeEnergyPrepareMacroFields',
            macro_args1,
            macro_signature,
            needs_iteration=self.config.needs_iteration_num)

        if self.config.access_pattern == 'AB':
            macro_secondary = runner.get_kernel(
                'FreeEnergyPrepareMacroFields',
                macro_args2,
                macro_signature,
                needs_iteration=self.config.needs_iteration_num)
            macro_pair = KernelPair(macro, macro_secondary)
        else:
            macro_pair = KernelPair(macro, macro)

        # Note: these two kernels need to be executed in order.
        primary = [
            runner.get_kernel('FreeEnergyCollideAndPropagateFluid',
                              args1a,
                              args_a_signature,
                              needs_iteration=self.config.needs_iteration_num),
            runner.get_kernel('FreeEnergyCollideAndPropagateOrderParam',
                              args1b,
                              args_b_signature,
                              needs_iteration=self.config.needs_iteration_num)
        ]

        if self.config.access_pattern == 'AB':
            secondary = [
                runner.get_kernel(
                    'FreeEnergyCollideAndPropagateFluid',
                    args2a,
                    args_a_signature,
                    needs_iteration=self.config.needs_iteration_num),
                runner.get_kernel(
                    'FreeEnergyCollideAndPropagateOrderParam',
                    args2b,
                    args_b_signature,
                    needs_iteration=self.config.needs_iteration_num)
            ]
            sim_pair = KernelPair(primary, secondary)
        else:
            sim_pair = KernelPair(primary, primary)

        return zip(macro_pair, sim_pair)
Beispiel #51
0
#print("RM1")
#print(RM1)
rc4keyperm(DK[LSC_SKEY/4:2*LSC_SKEY/4], h2, rp, PboxRM, LSC_SKEY/4);
#print("PboxRM")
#print(PboxRM)
rc4key(DK[2*LSC_SKEY/4:3*LSC_SKEY/4], Sbox1, LSC_SKEY/4);
rc4key(DK[3*LSC_SKEY/4:LSC_SKEY], Sbox2, LSC_SKEY/4);
#print("Sbox1")
#print(Sbox1)
#print("Sbox2")
#print(Sbox2)

RM2=np.copy(RM1)
RMorig=np.copy(RM1)

myrand=np.uint32(0)

for i in range(min(LSC_SKEY,32)):
	myrand=myrand|(DK[i]&1);
	myrand=np.uint32(myrand<<1)

# with following setting:
#
# LSC_DETERMINISTIC=True
# LSC_SKEY=16
# LSC_STATIC_KEY=True
# LSC_WMIC=True
# LSC_MICv=2
#
# when fcount=0
#
def extract_data(oname, coords, obsname, T0, period, inst, SDSS,
                 comp_mags=None, myLoc='.', fnames=None,
                 lower_phase=-0.5, upper_phase=0.5,
                 no_calibration=False):
    '''
    Takes a set of *CAM observations (and data on the system), and produces a set of phase folded lightcurves.

    If we're in the SDSS field, each .log file needs a corresponding .coords file that contains the RA and Dec of each aperture:
        <CCD1 ap1 RA> <Dec>
        <CCD1 ap2 RA> <Dec>

        <CCD2 ap1 RA> <Dec>
        <CCD2 ap2 RA> <Dec>

        <CCD3 ap1 RA> <Dec>
        <CCD3 ap2 RA> <Dec>
        <CCD3 ap3 RA> <Dec>

    If not, I need a standard star reduction, and each .log file needs a corresponding .log reduction that uses the same parameters
    to ensure an accurate match. These should be specified in comp_fnames. If none are supplied, try searching for a


    Arguments:
    ----------
    oname: str
        Template for written files. Applied to

    coords: str
        RA, Dec of target. Must in in a format astropy can understand.

    obsname: str
        Observatory location. See astropy for a list of valid names

    T0: float
        Ephemeris data

    period: float
        Ephemeris data

    SDSS: bool, optional
        If True, I'll do an SDSS lookup for the comparison star magnitudes. If False, use a standard star to calibrate

    myLoc: str, optional
        Working directory. If not supplied, default to current working directory

    fnames: list, optional
        List of target reduction files. If not supplied, searches for log files

    Returns:
    --------
    written_files: list
        List of created .calib files.
    '''
    printer("\n\n# # # # # # # # # # # # # # # # # # # BEGIN BATCH CALIBRATION # # # # # # # # # # # # # # # # # # #\n\n")

    # Writing out
    lc_dir = os.path.join(myLoc, 'MCMC_LIGHTCURVES')
    try:
        os.mkdir(lc_dir)
    except: pass
    printer("Lightcurves will go in: {}".format(lc_dir))

    figs_dir = os.path.join(myLoc, 'MCMC_LIGHTCURVES', "FIGS")
    try:
        os.mkdir(figs_dir)
    except: pass
    printer("Figures will go in: {}".format(figs_dir))


    # Report the things we're working with
    printer("  Using these log files: ")
    for i, fname in enumerate(fnames):
        printer("    {:2d} - {}".format(i, fname))
    printer('  ')
    printer("  I'll write out to {}*\n".format(oname))

    #Correct to BMJD
    printer("  Correcting observations from MJD to BMJD (observed from '{}')".format(obsname))
    printer("  Phase folding data for a T0: {:}, period: {:}".format(T0, period))

    # Data masking stuff
    FLAG = np.uint32(0)
    for f in FLAGS_TO_IGNORE:
        FLAG = FLAG | f
    if FLAG:
        printer("  Ignoring bad data flags: {}".format(FLAGS_TO_IGNORE))
        printer("List of keys:")
        printer(hcam.FLAGS)

    # Where are we?
    try:
        observatory = coord.EarthLocation.of_site(obsname)
    except:
        lat, lon = obsname.split(',')
        printer("  Attempting to get the earth observatory from latitude and longitude")
        observatory = coord.EarthLocation.from_geodetic(lat=lat, lon=lon)

    star_loc = coord.SkyCoord(
        coords,
        unit=(u.hourangle, u.deg), frame='icrs'
    )

    # I want to know what instrument I'm using, since each has a different number of cameras
    inst = inst.lower()
    if inst == 'uspec':
        nCCD = 1
        bands = ['???']
        c    = ['black']
    elif inst == 'ucam':
        nCCD = 3
        bands = ['r', 'g', 'u']
        c = ['red', 'green', 'blue']
    elif inst == 'hcam':
        nCCD = 5
        bands = ['u', 'g', 'r', 'i', 'z']
        c = ['blue', 'green', 'red', 'magenta', 'black']

    printer("  I'm using the instrument {}, which has {} CCDS in the following order:".format(inst, nCCD))
    for n, col in zip(range(nCCD), c):
        printer("  -> CCD {}: plotted in {}".format(n+1, col))

    written_files = []


    #  Plotting #
    ADU_lightcurves = {fname: [] for fname in fnames}

    print("Making plotting area...", end='')
    plt.ion()
    fig, ax = plt.subplots(nCCD, figsize=[11.69, 8.27], sharex=True)
    # If we only have one CCD, axes still need to be a lists
    if nCCD == 1:
        ax = [ax]

    twinAx = []
    for i, a in enumerate(ax):
        a.set_ylabel('Flux, mJy')

        twinAx.append(a.twinx())
        twinAx[i].set_ylabel('Count Ratio')
        twinAx[i].yaxis.tick_right()

    ax[-1].set_xlabel('Phase, days')
    ax[0].set_title('Waiting for data...')
    fig.tight_layout()

    compFig, compAx = plt.subplots(nCCD, figsize=[11.69,8.27], sharex=True)

    # If we only have one CCD, axes still need to be a lists
    if nCCD == 1:
        compAx = [compAx]

    compFig.tight_layout()
    plt.show()
    print(" Done!")



    # I want a master pdf file with all the nights' lightcurves plotted
    pdfname = os.path.join(figs_dir, oname+"_all_nights.pdf")
    with PdfPages(pdfname) as pdf:
        for fname in fnames:
            printer("\n----------------------------------------------------------------\n----------------------------------------------------------------\n")
            printer("Calibrating lightcurves for {}".format(fname))
            printer("\n----------------------------------------------------------------\n----------------------------------------------------------------\n")

            print("CWD:  {}".format(os.getcwd()))
            print("File: {}".format(fname))
            data = hcam.hlog.Hlog.read(fname)
            if data == {}:
                data = hcam.hlog.Hlog.rulog(fname)
            if data == {}:
                data = hcam.hlog.Hlog.rfits(fname)
            if data == {}:
                raise Exception("Could not properly read in log file, {}".format(fname))

            printer("  Read the data file!")

            # Get the apertures of this data set
            aps = data.apnames
            CCDs = [str(i) for i in aps]
            CCDs = sorted(CCDs)
            if CCDs == []:
                printer("ERROR! No data in the file!")
            printer("  The observations have the following CCDs: {}".format([int(ccd) for ccd in CCDs]))

            printer("  Am I flux calibrating the data? {}".format(not no_calibration))
            if no_calibration:
                printer("\n!!! Not doing flux calibration! Setting reference magnitudes to correspond to a flux=1\n\n")

                # Reference stars is a dict, keyed by the CCD string
                reference_stars = {}
                for CCD in CCDs:
                    mags = []

                    # The comparison star fluxes get added together, so each should have an even share of 1 mJy.
                    individual_flx = 1.0 / len(aps[CCD][1:])
                    for ap in aps[CCD][1:]:
                        mags.append(sdss_flux2mag(individual_flx))
                    reference_stars[CCD] = np.array(mags)

                printer("'Unit' Reference stars have a magnitude of {:.2f}\n".format(sdss_flux2mag(1.0)))

            elif SDSS:
                printer("  Looking up SDSS magnitudes from the database")
                comparison_coord_files = comp_mags[fname]
                reference_stars = construct_reference(comparison_coord_files)

            else:
                reference_stars = {}
                comparisons = comp_mags[fname]
                bands = list(comparisons.keys())

                printer("  For each of these CCDs, I've been given comparison stars of the following magnitudes:")
                for i, (b, comps) in enumerate(comparisons.items()):
                    # I need to capture 'none' strings here, and store them as np.nans.
                    # Later, when I construct the comparison star, these apertures must be ignored!

                    comparison_list = []
                    for comp in comps:
                        try:
                            # Can I float? (we all float down here, georgie...)
                            comp = float(comp)
                            comparison_list.append(comp)
                        except:
                            # If I can't, Ignore me.
                            comparison_list.append(np.nan)
                    # Who doesnt love vectorised calculations?
                    reference_stars[str(i+1)] = np.array(comparison_list)

                printer("  My comparison stars have the following apparent mags:")
                for b, mags in reference_stars.items():
                    printer("    - CCD{}, mags: {}".format(b, mags))
            printer("\n\n")

            for a in ax:
                    a.clear()
                    a.set_ylabel('Flux, mJy')
            ax[-1].set_xlabel('Phase, days')
            ax[0].set_title('Waiting for data...')

            # Loop through the CCDs.
            # # For each CCD, grab the target lightcurve, and the comparisons
            for CCD in CCDs:
                lightcurve_metadata = '# This is data from the file {} CCD {}\n'.format(fname, CCD)

                CCD_int = int(CCD) - 1
                printer("-> CCD {}".format(CCD))

                # Plot the comparison we construct
                compAx[CCD_int].clear()
                compAx[CCD_int].set_title("CCD {}, comparison star".format(CCD))
                compAx[CCD_int].set_ylabel("Counts per frame")

                # Get this frame's apertures
                ap = aps[CCD]
                printer("  This CCD has the apertures: {}".format(ap))
                # Check that there is more than one aperture -- i.e. if a comparison star exists
                if len(ap) <= 1:
                    printer("I can't do relative photometry with only one aperture!")
                    printer("!!! Bad log file, '{}' !!!".format(fname))
                    raise LookupError("Not enough apertures in the log file!", fname)


                # Check for nans in the log files.
                for a in ap:
                    star = data.tseries(CCD, a)
                    if np.any(np.isnan(star.y)):
                        printer("!!! Log file cannot contain nan values! File {}".format(fname))
                        raise ValueError("Log file cannot contain nan values!", fname)


                # Get some data on the """quality""" of the observations
                metadata = '#\n# Reduction info:\n\n'
                to_proc = data[CCD]

                ap_x = [header for header in
                    to_proc.dtype.fields if "x_" in header]
                ap_y = [header for header in
                    to_proc.dtype.fields if "y_" in header]
                ap_fwhm = [header for header in
                    to_proc.dtype.fields if "fwhm_" in header]

                for x_label, y_label, fwhm_label in zip(ap_x, ap_y, ap_fwhm):
                    x_pix_loc = to_proc[x_label].mean()
                    y_pix_loc = to_proc[y_label].mean()
                    fwhm_pix_loc = to_proc[fwhm_label].mean()

                    aperture_number =x_label.replace("x_", "")

                    metadata += '#   Aperture {}\n'.format(aperture_number)
                    metadata += "#     x location: {:.0f}\n".format(x_pix_loc)
                    metadata += "#     y location: {:.0f}\n".format(y_pix_loc)
                    metadata += "#     fwhm:       {:.2f}\n#\n#\n".format(fwhm_pix_loc)
                lightcurve_metadata += metadata


                # Grab the target data
                target = data.tseries(CCD, '1')


                # mags is a list of the relevant comparison star magnitudes.
                # For non-SDSS fields, this is the clipped mean magnitude of each object.
                mags = reference_stars[CCD]
                fluxs = sdss_mag2flux(mags)
                sumFlux = np.nansum(fluxs)
                sumMag = sdss_flux2mag(sumFlux)

                printer("  Comparison star mags: {}".format(mags))
                if no_calibration:
                    lightcurve_metadata += "# No flux calibration being done!!\n"
                    lightcurve_metadata += "# simulated a dummy comparison magnitude of 1.00 mJy\n"
                else:
                    lightcurve_metadata += "# Comparison star mags: {}\n".format(mags)


                # Add up the reference star fluxes
                N_comparisons = 0
                comparison = "Dummy initialiser ( ͡° ͜ʖ ͡°)"
                count_ratios = []
                for a, mag in zip(ap[1:], mags):
                    if np.isnan(mag):
                        printer("  The reference star in ap {} is being ignored!".format(a))
                    else:
                        N_comparisons += 1
                        new_comparison = data.tseries(CCD, a)
                        r = sdss_mag2flux(mag) / new_comparison.y.mean()
                        r_err = sdss_mag2flux(mag) / new_comparison.y.std()
                        try:
                            comparison = comparison + new_comparison
                            printer("  The reference star now includes data from aperture {}".format(a))
                        except:
                            comparison = data.tseries(CCD, a)
                            printer("  The comparison was initialised with aperture {}".format(a))
                        printer("    This star has mean count/frame of {:.3f}".format(new_comparison.y.mean()))
                        printer("    and has a flux/count of {:.3g} +/- {:.3g} mJy/count".format(r, r_err))

                printer("  The 'comparison star' I've construced from {} apertures now has a mean count/frame of {:.3f}".format(N_comparisons, np.mean(comparison.y)))


                # If we have SDSS stars too bright, get their mags from flux calibrating those that arent
                if SDSS and np.any(np.isinf(mags)):
                    printer("\n\n  I have some comparisons that saturated SDSS! Inferring their magnitudes from fainter stars.")

                    lightcurve_metadata += "# Some comparison stars saturated the SDSS image.\n# Their magnitudes were inferred from fainter stars\n"

                    printer("  Collecting fainter stars...")
                    calibComp = None
                    for mag, a in zip(mags, ap[1:]):
                        if np.inf(mag):
                            printer("    Skipping aperture {}, as it is nan".format(a))
                        else:
                            if calibComp is None:
                                calibComp = data.tseries(CCD, a)
                            else:
                                calibComp += data.tseries(CCD, a)

                    if calibComp is None:
                        raise Exception("All comparison stars saturated SDSS! Pick at least one that doesn't!")
                    calibComp_counts = np.mean(calibComp.y)

                    printer("  My non-saturated SDSS stars have a mean count/frame of {:.3f}".format(calibComp_counts))
                    lightcurve_metadata += "# My non-saturated SDSS stars have a mean count/frame of {:.3f}\n".format(calibComp_counts)

                    printer("  My fluxes are {}".format([f for f in fluxs if not np.inf(f)]))
                    printer("    with a sum flux of {:.3f} mJy".format(sumFlux))
                    printer("     and a sum mag of  {:.3f} mag".format(sumMag))

                    lightcurve_metadata += "# My fluxes are {}\n".format([f for f in fluxs if not np.inf(f)])
                    lightcurve_metadata += "#   with a sum flux of {:.3f} mJy\n".format(sumFlux)
                    lightcurve_metadata += "#    and a sum mag of  {:.3f} mag\n".format(sumMag)

                    for i, (mag, a) in enumerate(zip(mags, ap[1:])):
                        if np.isinf(mag):
                            cnts = data.tseries(CCD, a)
                            meanCnts = np.mean(cnts.y)

                            if np.any(np.isnan(cnts.y)):
                                meanCnts = np.nanmean(cnts.y)

                                printer("The file {} has nan counts! That's weird, and you should fix that.".format(fname))
                                printer("I'll continue ignoring the nan, BUT FIX IT!")

                                lightcurve_metadata += "# The file {} has nan counts! That's wierd, and you should fix that.\n".format(fname)
                                lightcurve_metadata += "# I'll continue ignoring the nan, BUT FIX IT!\n"


                            # Calibrated against known SDSS stars. Observed through the same air column since they're the same frame, so no ext. corr.
                            mag = sumMag - 2.5*np.log10(meanCnts/calibComp_counts)
                            mags[i] = mag

                            printer("    Star {} had no SDSS magnitude. Computed a magnitude of {:.3f} from an e- flux of {}".format(a, mag, meanCnts))
                            lightcurve_metadata += "# Star {} had no SDSS magnitude. Computed a magnitude of {:.3f} from an e- flux of {}\n".format(a, mag, meanCnts)

                    printer("\n")

                # # # # # # # # # # # # # # # # # # # #
                # # Conversion of target lightcurve # #
                # # # # # # # # # # # # # # # # # # # #

                # Get the non-saturated fluxes
                fluxs = sdss_mag2flux(mags)
                comparison_flux = np.nansum(fluxs)

                ratio = target / comparison # counts / counts - ratio between target and comp

                printer("\n\n  Correcting data to BMJD time...")
                ratio = tcorrect(ratio, star_loc, obsname)

                # If we're the first CCD, figure out what eclipse cycle we are
                if CCD == '1':
                    meantime = np.mean(ratio.t)
                    E = calc_E(meantime, T0, period)
                    printer("  The mean time of this eclipse is {:.3f}.".format(meantime))
                    printer("  From ephemeris data, I get an eclipse Number,")
                    printer("    E = ({:.3f} - [T0={:.3f}]) / [P={:.5f}]".format(meantime, T0, period))
                    printer("    E = {:.3f}".format(E))

                    E = np.rint(E)
                    # The above can be off, if the eclipse isnt the minimum. in/decriment until it's within bounds
                    while T0 + E*period < ratio.t[0]:
                        printer("    !!! Eclipse time not within these data! Incrimenting E...")
                        E += 1
                    while T0 + E*period > ratio.t[-1]:
                        printer("    !!! Eclipse time not within these data! Decrimenting E...")
                        E -= 1

                    printer("  I think that the eclipse spanning from {:.3f} to {:.3f} is cycle number {}".format(
                        ratio.t[0], ratio.t[-1], E)
                    )

                    eclTime = T0 + E*period
                    printer("  The eclipse is then at time {:.3f}".format(eclTime))
                    printer("")

                # slice out the data between phase -0.5 and 0.5
                printer("  Slicing out data between phase {} and {}".format(lower_phase, upper_phase))
                slice_time = (ratio.t - eclTime) / period
                slice_args = (slice_time < upper_phase)  *  (slice_time > lower_phase)

                ratio = hcam.hlog.Tseries(
                    slice_time[slice_args],
                    ratio.y[slice_args],
                    ratio.ye[slice_args],
                    ratio.mask[slice_args]
                    )

                # Bad data has error = -1
                mask = np.where(ratio.ye != -1)
                ratio = ratio[mask]

                meanRatio = np.mean(ratio.y)

                printer("  I sliced out {} data from the lightcurve about the eclipse.".format(len(ratio.t)))

                # Save the ratio for later
                ADU_lightcurves[fname].append(copy.deepcopy(ratio))

                # Convert the ratio from ADU to mJy
                printer("  Multiplying the target count flux / comparison count flux by the comparison flux, {:.3f}".format(comparison_flux))
                ratio = ratio * comparison_flux # Scale back up to actual flux.

                # Filter out flags I don't care about.
                ratio.mask = ratio.mask & (~ FLAG)

                #######################################################################################################
                ############ IGNORE ME I'M BORING AND HARD TO READ. WHY READ ANYTHING HARD? JUST TRUST ME. ############
                #######################################################################################################

                #  Reporting  #

                lightcurve_metadata += "# I calculated an eclipse time of {} BMJD, and phase-folded around that\n".format(eclTime)
                lightcurve_metadata += "# with a T0 of {}, and a period of {}, making this eclipse {}.\n".format(T0, period, E)
                lightcurve_metadata += "# I also sliced out the phase {} -> {}!!\n#\n#\n".format(lower_phase, upper_phase)

                printer("  Comparison star apparent SDSS magnitudes:")
                lightcurve_metadata += "# Comparison star apparent SDSS magnitudes:\n"

                for m, mag in enumerate(mags):
                    printer("    Star {} -> {:.3f} mag".format(m, mag))
                    lightcurve_metadata += "#   Star {} -> {:.3f} mag\n".format(m, mag)
                printer("")
                lightcurve_metadata += "#\n#\n#\n"

                printer("  Apparent fluxes of the comparison stars:")
                lightcurve_metadata += "# Apparent fluxes of the comparison stars:\n"

                for i, flux in enumerate(fluxs):
                    printer("    Star {} -> {:.3f} mJy".format(i, flux))
                    lightcurve_metadata += "#   Star {} -> {:.3f} mJy\n".format(i, flux)
                lightcurve_metadata += "#\n"

                printer('  Sum apparent Flux: {:.3f} mJy\n'.format(comparison_flux))

                printer("  Instrumental counts, summed per mean frame ({} frames) of {} comparison stars: {:.1f}".format(
                    len(comparison.y), N_comparisons, np.mean(comparison.y)
                ))

                printer("  Instrumental counts per mean frame ({} frames) of target: {:.1f}".format(
                    len(target.y), np.mean(target.y)
                ))
                printer("  Mean Target/comparison count ratio: {:.3f}".format(meanRatio))
                printer("  Mean target magnitude: {:.3f}".format(sdss_flux2mag(meanRatio * comparison_flux)))

                lightcurve_metadata += '# Sum apparent Flux: {:.3f} mJy\n#\n#\n'.format(comparison_flux)
                lightcurve_metadata += "# Instrumental summed counts per mean frame ({} frames) of {} comparison stars: {:.1f}\n".format(len(comparison.y), len(ap[1:]), np.mean(comparison.y))
                lightcurve_metadata += "# Instrumental counts per mean frame ({} frames) of target: {:.1f}\n#\n".format(len(target.y), np.mean(target.y))
                lightcurve_metadata += "# Mean Target/comparison count ratio: {:.3f}\n".format(meanRatio)
                lightcurve_metadata += "# Mean target magnitude: {:.3f}\n".format(sdss_flux2mag(meanRatio * comparison_flux))

                # Plotting management
                ax[CCD_int].clear()
                if CCD_int == 0:
                    title = os.path.split(fname)[1]

                    ax[0].set_title(title)
                    compAx[0].set_title("{}\nCCD {}, comparison stars, normalised.".format(title, CCD))

                ax[CCD_int].set_ylabel('Flux, mJy')

                # Plot the ratio
                ratio.mplot(ax[CCD_int], colour=c[CCD_int])

                # Scale the right side labels
                twinAx[CCD_int].set_ylim( ax[CCD_int].get_ylim() / comparison_flux )
                # Draw
                fig.canvas.draw_idle()

                compMin =  9e99
                compMax = -9e99
                if len(ap) == 2:
                    # # Plot the mean count flux on the figure -- only used when single aperture, as not as useful as ratios
                    compAx[CCD_int].errorbar(comparison.t, comparison.y, yerr=comparison.ye,
                        label='Mean', color='black', linestyle='', marker='o', capsize=0)
                    try:
                        mean, _, _ = sigma_clipped_stats(comparison.y, maxiters=2, sigma=3)
                    except:
                        mean, _, _ = sigma_clipped_stats(comparison.y, iters=2, sigma=3)
                    compAx[CCD_int].axhline(mean, linestyle='--', color='black')
                    compMin = np.min(comparison.y)
                    compMax = np.max(comparison.y)
                else:
                    # Plot each combination of comparison star ratios, i.e. for 3 comparisons: 2/3, 2/4, 3/4
                    j = 0
                    for i, a in enumerate(ap[1:-1]):
                        first = data.tseries(CCD, a)
                        for b in ap[i+2:]:
                            printer("  -> Plotting ap {}/{}".format(a, b))
                            toPlot = first / data.tseries(CCD, b)

                            # Filter out flags I don't care about.
                            toPlot.mask = toPlot.mask & (~ FLAG)

                            # Apply the mask to the data
                            if np.any(toPlot.mask):
                                mask = np.where(toPlot.mask == 0)
                                printer("  -> {} masked data!".format(np.sum(toPlot.mask != 0)))
                                printer("\nMasked data:")
                                printer(toPlot.mask)
                                printer("\n\n")
                                printer("Flags:")
                                printer(hcam.FLAGS)

                                if np.all(toPlot.mask != 0):
                                    print("ALL DATA ARE MASKED! Stopping...")
                                    exit()

                                toPlot.t  = toPlot.t[mask]
                                toPlot.y  = toPlot.y[mask]
                                toPlot.ye = toPlot.ye[mask]
                                toPlot.mask = toPlot.mask[mask]

                            toPlot.y = toPlot.y / np.mean(toPlot.y)
                            toPlot.y = toPlot.y + (j / 5)
                            j += 1

                            # Get min and max axis limits
                            if np.max(toPlot.y) > compMax:
                                compMax = np.max(toPlot.y)
                            if np.min(toPlot.y) < compMin:
                                compMin = np.min(toPlot.y)

                            # Fit a straight line to the data. Deviations indicate bad comparisons
                            A,B = curve_fit(straight_line, toPlot.t, toPlot.y)[0]
                            fit_X = np.linspace(toPlot.t[0], toPlot.t[-1], 3)
                            fit_Y = straight_line(fit_X, A, B)

                            # iters is depreciated. Try the new version, if that fails do the old version. yay, flexibility!
                            try:
                                mean, _, _ = sigma_clipped_stats(toPlot.y, maxiters=2, sigma=3)
                            except:
                                mean, _, _ = sigma_clipped_stats(toPlot.y, iters=2, sigma=3)
                            compAx[CCD_int].axhline(mean, linestyle='--', color='black')
                            compAx[CCD_int].plot(fit_X, fit_Y, color='red', linestyle=':')
                            compAx[CCD_int].scatter(toPlot.t, toPlot.y,
                                s=10,
                                label="Aperture {}/{} - grad: {:.2f}".format(a, b, A),
                                alpha=0.6
                            )
                            compFig.canvas.draw_idle()

                # Add in legend artist
                compAx[CCD_int].legend()

                pad = 0.05 * (compMax - compMin)
                compAx[CCD_int].set_ylim([compMin-pad, compMax+pad])
                compAx[CCD_int].set_xlim([comparison.t[0], comparison.t[-1]])

                # File handling stuff
                b = bands[CCD_int]
                while b == '???':
                    b = input("What band are these data?: ")
                    if b == '':
                        print("PLEASE ENTER A BAND NAME for:\n{}\n".format(fname))
                        b = "???"

                date = time.Time(eclTime, format='mjd')
                date = date.strftime("%Y-%m-%d@%Hh%Mm")

                filename = oname
                filename = "{}_{}_{}.calib".format(filename, date, b)

                filename = os.path.join(lc_dir, filename)

                # Saving data
                printer("  These data have {} masked points.".format(np.sum(ratio.mask != 0)))
                if np.sum(ratio.mask != 0):
                    printer("\n\n{}\n\n".format(ratio.mask))
                with open(filename, 'w') as f:
                    f.write(lightcurve_metadata)
                    f.write("# Phase, Flux, Err_Flux\n")
                    for t, y, ye, mask in zip(ratio.t, ratio.y, ratio.ye, ratio.mask):
                        if not mask:
                            f.write("{} {} {}\n".format(t, y, ye))

                written_files.append(filename)
                printer("  Wrote data to {}".format(filename))
                printer("  Finished CCD {}\n".format(CCD))

            ax[-1].set_xlabel('Phase, days')

            x_range = [min(ratio.t), max(ratio.t)]
            ax[0].set_xlim(x_range)

            x_range = [min(comparison.t), max(comparison.t)]
            compAx[0].set_xlim(x_range)

            plt.tight_layout()
            plt.subplots_adjust(hspace=0.0)

            fig.canvas.draw_idle()
            compFig.canvas.draw_idle()

            input("\n  Hit enter for next file\r")
            print()
            pdf.savefig(fig)
            pdf.savefig(compFig)
        plt.close(compFig)

        # Plot the ADU lightcurves for the user.
        for a in ax:
                a.clear()
                a.set_ylabel('Flux, ADU')
        ax[-1].set_xlabel('Phase, days')
        ax[0].set_title('Waiting for data...')

        for fname, lightcurves in ADU_lightcurves.items():
            for i, tseries in enumerate(lightcurves):
                print("{} // CCD {}".format(fname, i))
                flx = tseries.y
                phase = tseries.t

                ax[i].step(phase, flx, label=fname)

        for a in ax:
            a.legend()
        ax[0].set_title("ADU Lightcurves of all files")
        plt.tight_layout()
        fig.canvas.draw_idle()
        ADU_name = os.path.join(figs_dir, oname+'_ADU_lightcurves.pdf')
        fig.savefig(ADU_name)

        input("Hit enter to continue... ")

    printer("  ")
    printer("  Saved the plots to {}".format(pdfname))

    plt.close('all')
    plt.ioff()

    return written_files
Beispiel #53
0
np.complex64(3j)
np.complex64(A())
np.complex64(C())
np.complex128(3j)
np.complex128(C())
np.complex128(None)
np.complex64("1.2")
np.complex128(b"2j")

np.int8(4)
np.int16(3.4)
np.int32(4)
np.int64(-1)
np.uint8(B())
np.uint32()
np.int32("1")
np.int64(b"2")

np.float16(A())
np.float32(16)
np.float64(3.0)
np.float64(None)
np.float32("1")
np.float16(b"2.5")

if sys.version_info >= (3, 8):
    np.uint64(D())
    np.float32(D())
    np.complex64(D())
Beispiel #54
0
def LSC_process_pkt(lorapkt, dst, ptype, src, seq):

	#it seems that there is not a full reset when calling this function from a parent Python program
	#so we need to reset at the beginning of the function
	if LSC_DETERMINISTIC==True:
		RM1=np.copy(RMorig)
		RM2=np.copy(RM1)
	
	size_mesg = len(lorapkt)
	
	if LSC_WMIC:
		lenH=np.uint32((size_mesg+HEADER_SIZE+h2-1)/h2)
	else:		
		lenH=np.uint32((size_mesg+h2-1)/h2)

	plain = np.empty([lenH*h2],dtype=np.uint8)
	cipher = np.empty([lenH*h2],dtype=np.uint8)
	check = np.empty([lenH*h2],dtype=np.uint8)
	
	for i in range(lenH*h2):
		cipher[i]=0

	if LSC_WMIC:

		#add the packet header to compute MIC
		cipher[0]=dst
		cipher[1]=ptype
		cipher[2]=src
		cipher[3]=seq
		
		for i in range(size_mesg):   
			cipher[HEADER_SIZE+i]=lorapkt[i]		
		
		print "?LSC: received MIC: ",
		print (cipher[size_mesg+HEADER_SIZE-LSC_SMIC:size_mesg+HEADER_SIZE])
		
		#encrypt received content: HEADER+CIPHER
		#but with fcount=seq+1
		#here is use the plain buffer
		encrypt_ctr(cipher, plain, lenH, RM1, PboxRM, Sbox1, Sbox2, (seq+1) % 256)
		
		if LSC_MICv==1:
			#skip the first 4 bytes and take the next 4 bytes of encrypted HEADER+CIPHER
			plain[0]=plain[LSC_SMIC]
			plain[1]=plain[LSC_SMIC+1]
			plain[2]=plain[LSC_SMIC+2]
			plain[3]=plain[LSC_SMIC+3]	
		elif LSC_MICv==2:
			#first, compute byte-sum of encrypted HEADER+CIPHER
			myMIC=np.sum(plain[:size_mesg+HEADER_SIZE-LSC_SMIC])

			plain[0]=xorshift32(np.uint32(myMIC % 7))
			plain[1]=xorshift32(np.uint32(myMIC % 13))
			plain[2]=xorshift32(np.uint32(myMIC % 29))
			plain[3]=xorshift32(np.uint32(myMIC % 57))
		elif LSC_MICv==3:
			#should implement a better algorithm?
			#XTEA?: http://code.activestate.com/recipes/496737-python-xtea-encryption/	
			print "todo"
		
		print "?LSC: computed MIC: ",
		print (plain[:LSC_SMIC])
		
		if np.array_equal(plain[:LSC_SMIC], cipher[size_mesg+HEADER_SIZE-LSC_SMIC:size_mesg+HEADER_SIZE]):
	
			print "?LSC: valid MIC"
			
			#print "?LSC: [cipher]: ",
			#print (cipher[HEADER_SIZE:size_mesg+HEADER_SIZE-LSC_SMIC])
			
			#re-index cipher data to get rid of HEADER and MIC
			size_mesg -= LSC_SMIC
			for i in range(size_mesg):   
				cipher[i]=lorapkt[i]
		else:
			return "###BADMIC###"
	else:
		for i in range(size_mesg):   
			cipher[i]=lorapkt[i]		

	#notice the usage of RM2 to decrypt as RM1 has changed
	encrypt_ctr(cipher, check, lenH, RM2, PboxRM, Sbox1, Sbox2, seq)

	#print "?LSC: [plain]: ",
	#print (check)

	s_plain = check.tostring()
	print "?LSC: plain payload is "+replchars.sub(replchars_to_hex, s_plain[:size_mesg])
	return s_plain[:size_mesg]
Beispiel #55
0
    def generateHeader(self, data, inShape):
        #data["values"] can be one of 3 shapes: dense 4d mat for activity, dense 6d mat for weights
        #scipy csr_sparse matrix for sparse activity

        header = {}

        values = data["values"]

        #If sparse matrix, write as sparse format
        if (sp.issparse(values)):
            if (inShape == None):
                raise ValueError(
                    "Sparse values must have shape input when generating header"
                )
            if len(inShape) != 3:
                raise ValueError(
                    "Shape parameter must be a 3 tuple of (ny, nx, nf)")
            (ny, nx, nf) = inShape
            (numFrames, numFeat) = values.shape
            if (not numFeat == ny * nx * nf):
                raise ValueError(
                    "Shape provided does not match the data shape (" +
                    str(ny) + "*" + str(nx) + "*" + str(nf) + " vs " +
                    str(numFeat) + ")")

            header["headersize"] = np.uint32(80)
            header["numparams"] = np.uint32(20)
            header["filetype"] = np.uint32(6)
            header["nx"] = np.uint32(nx)
            header["ny"] = np.uint32(ny)
            header["nf"] = np.uint32(nf)
            header["numrecords"] = np.uint32(1)
            header["recordsize"] = np.uint32(0)  #Not used in sparse activity
            header["datasize"] = np.uint32(8)  #Int/float are 4 bytes each
            header["datatype"] = np.uint32(4)  #Type is location-value pair
            header["nxprocs"] = np.uint32(1)  #No longer used
            header["nyprocs"] = np.uint32(1)
            header["nxExtended"] = np.uint32(nx)
            header["nyExtended"] = np.uint32(ny)
            header["kx0"] = np.uint32(0)
            header["ky0"] = np.uint32(0)
            header["nbatch"] = np.uint32(1)
            header["nbands"] = np.uint32(numFrames)
            header["time"] = np.float64(data["time"][0])

        #If 4d dense matrix, write as dense format
        elif (values.ndim == 4):
            (numFrames, ny, nx, nf) = values.shape
            header["headersize"] = np.uint32(80)
            header["numparams"] = np.uint32(20)
            header["filetype"] = np.uint32(4)
            header["nx"] = np.uint32(nx)
            header["ny"] = np.uint32(ny)
            header["nf"] = np.uint32(nf)
            header["numrecords"] = np.uint32(1)
            header["recordsize"] = np.uint32(nx * ny *
                                             nf)  #Not used in sparse activity
            header["datasize"] = np.uint32(4)  #floats are 4 bytes
            header["datatype"] = np.uint32(3)  #Type is float
            header["nxprocs"] = np.uint32(1)  #No longer used
            header["nyprocs"] = np.uint32(1)
            header["nxExtended"] = np.uint32(nx)
            header["nyExtended"] = np.uint32(ny)
            header["kx0"] = np.uint32(0)
            header["ky0"] = np.uint32(0)
            header["nbatch"] = np.uint32(1)
            header["nbands"] = np.uint32(numFrames)
            header["time"] = np.float64(data["time"][0])

        #If 6d dense matrix, write as weights format
        elif (values.ndim == 6):
            (numFrames, numArbors, numKernels, nyp, nxp, nfp) = values.shape
            header["headersize"] = np.uint32(104)
            header["numparams"] = np.uint32(26)
            header["filetype"] = np.uint32(5)
            header["nx"] = np.uint32(1)  #size not used by weights
            header["ny"] = np.uint32(1)
            header["nf"] = np.uint32(numKernels)  #Pre nf
            header["numrecords"] = np.uint32(numArbors)
            #Each data for arbor is preceded by nxp(2 bytes), ny (2 bytes) and offset (4 bytes)
            header["recordsize"] = np.uint32(
                0)  #weight files do not use recordsize
            header["datasize"] = np.uint32(4)  #floats are 4 bytes
            header["datatype"] = np.uint32(3)  #float type
            header["nxprocs"] = np.uint32(1)
            header["nyprocs"] = np.uint32(1)
            header["nxExtended"] = np.uint32(1)
            header["nyExtended"] = np.uint32(1)
            header["kx0"] = np.uint32(0)
            header["ky0"] = np.uint32(0)
            header["nbatch"] = np.uint32(1)
            header["nbands"] = np.uint32(
                numArbors
            )  #For weights, numArbors is stored in nbands, no field for numFrames
            #This field will be updated on write
            header["time"] = np.float64(data["time"][0])
            #Weights have extended header
            header["nxp"] = np.uint32(nxp)
            header["nyp"] = np.uint32(nyp)
            header["nfp"] = np.uint32(nfp)
            header["wMax"] = np.uint32(1)  #This field will be updated on write
            header["wMin"] = np.uint32(1)  #This field will be updated on write
            header["numpatches"] = np.uint32(numKernels)
        return header
Beispiel #56
0
  def __compile_kernels(self):
    ctype_indices = dtype_to_ctype(self.dtype_indices)
    ctype_labels = dtype_to_ctype(self.dtype_labels)
    ctype_counts = dtype_to_ctype(self.dtype_counts)
    ctype_samples = dtype_to_ctype(self.dtype_samples)
    n_labels = self.n_labels
    n_threads = self.COMPUTE_THREADS_PER_BLOCK
    n_shf_threads = self.RESHUFFLE_THREADS_PER_BLOCK
    
    """ DFS module """
    dfs_module = compile_module("dfs_module.cu", (n_threads, n_shf_threads, n_labels, 
      ctype_samples, ctype_labels, ctype_counts, ctype_indices, self.MAX_BLOCK_PER_FEATURE, 
      self.debug))
    
    const_stride = dfs_module.get_global("stride")[0]
    driver.memcpy_htod(const_stride, np.uint32(self.stride))

    self.find_min_kernel = dfs_module.get_function("find_min_imp")
    self.find_min_kernel.prepare("PPPi")
  
    self.fill_kernel = dfs_module.get_function("fill_table")
    self.fill_kernel.prepare("PiiP")
    
    self.scan_reshuffle_tex = dfs_module.get_function("scan_reshuffle")
    self.scan_reshuffle_tex.prepare("PPii")
    tex_ref = dfs_module.get_texref("tex_mark")
    self.mark_table.bind_to_texref_ext(tex_ref) 
      
    self.comput_total_2d = dfs_module.get_function("compute_2d")
    self.comput_total_2d.prepare("PPPPPPPii")

    self.reduce_2d = dfs_module.get_function("reduce_2d")
    self.reduce_2d.prepare("PPPPPi")
    
    self.scan_total_2d = dfs_module.get_function("scan_gini_large")
    self.scan_total_2d.prepare("PPPPii")
    
    self.scan_reduce = dfs_module.get_function("scan_reduce")
    self.scan_reduce.prepare("Pi")

    """ BFS module """
    bfs_module = compile_module("bfs_module.cu", (self.BFS_THREADS, n_labels, ctype_samples,
      ctype_labels, ctype_counts, ctype_indices,  self.debug))

    const_stride = bfs_module.get_global("stride")[0]
    const_n_features = bfs_module.get_global("n_features")[0]
    const_max_features = bfs_module.get_global("max_features")[0]
    driver.memcpy_htod(const_stride, np.uint32(self.stride))
    driver.memcpy_htod(const_n_features, np.uint16(self.n_features))
    driver.memcpy_htod(const_max_features, np.uint16(self.max_features))

    self.scan_total_bfs = bfs_module.get_function("scan_bfs")
    self.scan_total_bfs.prepare("PPPP")

    self.comput_bfs_2d = bfs_module.get_function("compute_2d")
    self.comput_bfs_2d.prepare("PPPPPPPPP")

    self.fill_bfs = bfs_module.get_function("fill_table")
    self.fill_bfs.prepare("PPPPP")

    self.reshuffle_bfs = bfs_module.get_function("scan_reshuffle")
    tex_ref = bfs_module.get_texref("tex_mark")
    self.mark_table.bind_to_texref_ext(tex_ref) 
    self.reshuffle_bfs.prepare("PPP") 

    self.reduce_bfs_2d = bfs_module.get_function("reduce")
    self.reduce_bfs_2d.prepare("PPPPPPi")
    
    self.get_thresholds = bfs_module.get_function("get_thresholds")
    self.get_thresholds.prepare("PPPPP")
   
    self.predict_kernel = mk_kernel(
        params = (ctype_indices, ctype_samples, ctype_labels), 
        func_name = "predict", 
        kernel_file = "predict.cu", 
        prepare_args = "PPPPPPPii")
  
    self.bfs_module = bfs_module
    self.dfs_module = dfs_module
Beispiel #57
0
"""
arrayops stores various array operations that will come in handy
"""

import bisect
import numpy as np

zero = np.uint32(0)
one = np.uint32(1)


def index(a, x):
    """
    Given a sorted list, locate the leftmost value equal to x and return its
    index. This is the same as a.index(x) but uses the additional information
    that a is a sorted list.

    Example:
        >>> a = [0,1,2,3,4]
        >>> ind = index(a, 3)
        >>> ind
        3

    :param a: List to search
    :param x: value to find
    :return:
    """
    i = bisect.bisect_left(a, x)
    if i != len(a) and a[i] == x:
        return i
    raise None
Beispiel #58
0
    def write(self, data, shape=None, useExistingHeader=False):
        if (self.mode != 'w' and self.mode != 'a'):
            raise Exception("File not opened for writing")

        #Seek to end of file
        self.pvpFile.seek(0, os.SEEK_END)

        #Check data structure
        self.checkData(data)

        if not 'header' in data.keys():
            if useExistingHeader:
                raise ValueError(
                    "Must specify a \"header\" field if using existing header")

        #Generate header if it doesn't exist
        if (not self.header):
            if (useExistingHeader):
                self.header = data["header"]
            else:
                self.header = self.generateHeader(data, shape)
            #Write out full header
            self.writeHeader()

        #Otherwise, check header fields
        else:
            if (useExistingHeader):
                self.checkHeaders(self.header, data["header"])
            else:
                self.checkHeaders(self.header,
                                  self.generateHeader(data, shape))
            #Change nbands for number of frames
            self.updateHeader(data)

        if self.header['numparams'] == 26:
            hPattern = extendedHeaderPattern
        else:
            hPattern = headerPattern

        #Write out files based on data
        if self.header['filetype'] == 2:
            raise Exception('Filetype 2 not yet supported for write pvp')

        elif self.header['filetype'] == 4:
            (numFrames, ny, nx, nf) = data["values"].shape
            for dataFrame in range(numFrames):
                self.pvpFile.write(data["time"][dataFrame])
                self.pvpFile.write(data["values"][dataFrame, :, :, :])

        elif self.header['filetype'] == 5:
            (numFrames, numArbors, numKernels, nyp, nxp,
             nfp) = data["values"].shape
            # Type 5's have a header in each frame
            #Make a copy of header dictionary to avoid changing
            #the header field
            tmpHeader = self.header.copy()
            for dataFrame in range(numFrames):
                #Set header fields that change from frame to frame
                tmpHeader["time"] = np.float64(data["time"][dataFrame])
                ##wMax and wMin are int32's, whereas the max and min might not be an int
                #tmpHeader["wMax"] = np.uint32(np.max(data["values"][dataFrame, :, :, :, :, :]))
                #tmpHeader["wMin"] = np.uint32(np.min(data["values"][dataFrame, :, :, :, :, :]))
                #We write headers here because we need a header per frame
                for headerEntry in hPattern:
                    self.pvpFile.write(headerEntry[1](
                        tmpHeader[headerEntry[0]]))
                #Within each patch, we write out each nxp, nyp, and offset
                for dataArbor in range(numArbors):
                    for dataKernel in range(numKernels):
                        self.pvpFile.write(np.uint16(nxp))
                        self.pvpFile.write(np.uint16(nyp))
                        self.pvpFile.write(
                            np.uint32(0))  #Offset is always 0 for kernels
                        self.pvpFile.write(data["values"][dataFrame, dataArbor,
                                                          dataKernel, :, :, :])
        #Sparse values
        elif self.header['filetype'] == 6:
            (numFrames, numData) = data["values"].shape
            for dataFrame in range(numFrames):
                frameVals = data["values"].getrow(dataFrame)
                count = frameVals.nnz
                index = frameVals.indices
                value = frameVals.data
                #Write time first, followed by count, followed by values
                self.pvpFile.write(data["time"][dataFrame])
                self.pvpFile.write(np.uint32(count))
                npOut = np.zeros((count, 2)).astype(np.uint32)
                npOut[:, 0] = np.uint32(index)
                npOut[:, 1] = np.float32(value).view(np.uint32)
                self.pvpFile.write(npOut.flatten())
Beispiel #59
0
 def test_3(self):
     sig = ['float64(float64, uint32)']
     func = self.funcs['func2']
     A = np.arange(100, dtype=np.float64)
     scale = np.uint32(3)
     self._run_and_compare(func, sig, A, scale, atol=1e-8)
Beispiel #60
0
def motif3struct_wei(W):
    '''
    Structural motifs are patterns of local connectivity. Motif frequency
    is the frequency of occurrence of motifs around a node. Motif intensity
    and coherence are weighted generalizations of motif frequency.

    Parameters
    ----------
    W : NxN np.ndarray
        weighted directed connection matrix (all weights between 0 and 1)

    Returns
    -------
    I : 13xN np.ndarray
        motif intensity matrix
    Q : 13xN np.ndarray
        motif coherence matrix
    F : 13xN np.ndarray
        motif frequency matrix

    Notes
    -----
    Average intensity and coherence are given by I./F and Q./F.
    '''
    from scipy import io
    import os
    fname = os.path.join(os.path.dirname(__file__), motiflib)
    mot = io.loadmat(fname)
    m3 = mot['m3']
    m3n = mot['m3n']
    id3 = mot['id3'].squeeze()
    n3 = mot['n3'].squeeze()

    n = len(W)  # number of vertices in W
    I = np.zeros((13, n))  # intensity
    Q = np.zeros((13, n))  # coherence
    F = np.zeros((13, n))  # frequency

    A = binarize(W, copy=True)  # create binary adjmat
    As = np.logical_or(A, A.T)  # symmetrized adjmat

    for u in range(n - 2):
        # v1: neighbors of u (>u)
        V1 = np.append(np.zeros((u, ), dtype=int), As[u, u + 1:n + 1])
        for v1 in np.where(V1)[0]:
            # v2: neighbors of v1 (>u)
            V2 = np.append(np.zeros((u, ), dtype=int), As[v1, u + 1:n + 1])
            V2[V1] = 0  # not already in V1
            # and all neighbors of u (>v1)
            V2 = np.logical_or(
                np.append(np.zeros((v1, )), As[u, v1 + 1:n + 1]), V2)
            for v2 in np.where(V2)[0]:
                a = np.array(
                    (A[v1, u], A[v2, u], A[u, v1], A[v2, v1], A[u, v2], A[v1,
                                                                          2]))
                s = np.uint32(np.sum(np.power(10, np.arange(5, -1, -1)) * a))
                ix = np.squeeze(s == m3n)

                w = np.array(
                    (W[v1, u], W[v2, u], W[u, v1], W[v2, v1], W[u, v2], W[v1,
                                                                          v2]))

                M = w * m3[ix, :]
                id = id3[ix] - 1
                l = n3[ix]
                x = np.sum(M, axis=1) / l  # arithmetic mean
                M[M == 0] = 1  # enable geometric mean
                i = np.prod(M, axis=1)**(1 / l)  # intensity
                q = i / x  # coherence

                # add to cumulative counts
                I[id, u] += i
                I[id, v1] += i
                I[id, v2] += i
                Q[id, u] += q
                Q[id, v1] += q
                Q[id, v2] += q
                F[id, u] += 1
                F[id, v1] += 1
                F[id, v1] += 1

    return I, Q, F