def get_StartTime(self): """ Uses SWMM5 Conversion Functions to Pull DateTime String. This converts to Python datetime format. :return: Simulation start time. :rtype: datetime Examples: >>> OutputFile = SWMMBinReader() >>> OutputFile.OpenBinFile("outputfile.out") >>> OutputFile.get_StartTime() >>> datetime.datetime(2016,10,4,12,4,0) """ _StartTime = self._get_StartTimeSWMM() _date = int(_StartTime) _time = _StartTime - _date # Pull Date String DateStr = ctypes.create_string_buffer(50) self.SWMMdateToStr(ctypes.c_double(_date), ctypes.byref(DateStr)) DATE = DateStr.value # Pull Time String TimeStr = ctypes.create_string_buffer(50) self.SWMMtimeToStr(ctypes.c_double(_time), ctypes.byref(TimeStr)) TIME = TimeStr.value DTime = datetime.strptime(DATE + ' ' + TIME, '%Y-%b-%d %H:%M:%S') return DTime
def getNodePos(self , i , j , k): """Will return the (x,y,z) for the node given by (i,j,k). Observe that this method does not consider cells, but the nodes in the grid. This means that the valid input range for i,j and k are are upper end inclusive. To get the four bounding points of the lower layer of the grid: p0 = grid.getNodePos(0 , 0 , 0) p1 = grid.getNodePos(grid.getNX() , 0 , 0) p2 = grid.getNodePos(0 , grid.getNY() , 0) p3 = grid.getNodePos(grid.getNX() , grid.getNY() , 0) """ if not 0 <= i <= self.getNX(): raise IndexError("Invalid I value:%d - valid range: [0,%d]" % (i , self.getNX())) if not 0 <= j <= self.getNY(): raise IndexError("Invalid J value:%d - valid range: [0,%d]" % (j , self.getNY())) if not 0 <= k <= self.getNZ(): raise IndexError("Invalid K value:%d - valid range: [0,%d]" % (k , self.getNZ())) x = ctypes.c_double() y = ctypes.c_double() z = ctypes.c_double() cfunc.get_corner_xyz( self , i,j,k , ctypes.byref(x) , ctypes.byref(y) , ctypes.byref(z)) return (x.value , y.value , z.value)
def create_yamr(self, stor_file, port, info_filename, n_fail_max, overdue_resched_fac, overdue_giveup_fac): func = self.runlib.rmic_create_yamr func.restype = ctypes.c_void_p self.obj = func( ctypes.c_char_p(stor_file), ctypes.c_char_p(port), ctypes.c_char_p(info_filename), n_fail_max, ctypes.c_double(overdue_resched_fac), ctypes.c_double(overdue_giveup_fac))
def gamma3(pulsar, tobs, harmonic): """S/N ratio decrease in an accel & jerk pulsar search""" gamma = C.c_double(0.) harm = float(harmonic) m1 = pulsar.pulsar_mass_msolar m2 = pulsar.companion_mass_msolar period_s = pulsar.period / 1000. long_peri = pulsar.long_peri_degrees inc_deg = pulsar.inclination_degrees ecc = pulsar.ecc orb_p = pulsar.orbital_period_days gammalib.gamma3_( C.byref(C.c_double(harm)), C.byref(C.c_double(tobs)), C.byref(C.c_double(m1)), C.byref(C.c_double(m2)), C.byref(C.c_double(period_s)), C.byref(C.c_double(long_peri)), C.byref(C.c_double(inc_deg)), C.byref(C.c_double(ecc)), C.byref(C.c_double(orb_p)), C.byref(gamma) ) return gamma.value
def call_forward_cic(nx1,nx2,boxsize,yif1,yif2): img_in = np.array(np.ones(len(yif1)),dtype=ct.c_double) yif1 = np.array(yif1,dtype=ct.c_double) yif2 = np.array(yif2,dtype=ct.c_double) img_out = np.zeros((nx1,nx2)) fcic.forward_cic(img_in,yif1,yif2,ct.c_double(boxsize),ct.c_double(boxsize),ct.c_int(nx1),ct.c_int(nx2),ct.c_int(len(yif1)),img_out) return img_out.T
def mul(self, a, b, out, alpha=1.0, beta=0.0): w = self._wrappers # Ensure the matrices are compatible if a.nrow != out.nrow or a.ncol != b.nrow or b.ncol != out.ncol: raise ValueError('Incompatible matrices for out = a*b') # CUBLAS expects inputs to be column-major (or Fortran order in # numpy parlance). However as C = A*B => C^T = (A*B)^T # = (B^T)*(A^T) with a little trickery we can multiply our # row-major matrices directly. m, n, k = b.ncol, a.nrow, a.ncol A, B, C = b, a, out # Do not transpose either A or B opA = opB = w.CUBLAS_OP_N # α and β factors for C = α*(A*op(B)) + β*C if a.dtype == np.float64: cublasgemm = w.cublasDgemm alpha_ct, beta_ct = c_double(alpha), c_double(beta) else: cublasgemm = w.cublasSgemm alpha_ct, beta_ct = c_float(alpha), c_float(beta) class MulKernel(ComputeKernel): def run(iself, queue): w.cublasSetStream(self._handle, queue.cuda_stream_comp.handle) cublasgemm(self._handle, opA, opB, m, n, k, alpha_ct, A, A.leaddim, B, B.leaddim, beta_ct, C, C.leaddim) return MulKernel()
def _dgemm(trans_a, trans_b, m, n, k, a, b, c, alpha=1, beta=0, offseta=0, offsetb=0, offsetc=0): if a.size == 0 or b.size == 0: if beta == 0: c[:] = 0 else: c[:] *= beta return c assert(a.flags.c_contiguous) assert(b.flags.c_contiguous) assert(c.flags.c_contiguous) _np_helper.NPdgemm(ctypes.c_char(trans_b.encode('ascii')), ctypes.c_char(trans_a.encode('ascii')), ctypes.c_int(n), ctypes.c_int(m), ctypes.c_int(k), ctypes.c_int(b.shape[1]), ctypes.c_int(a.shape[1]), ctypes.c_int(c.shape[1]), ctypes.c_int(offsetb), ctypes.c_int(offseta), ctypes.c_int(offsetc), b.ctypes.data_as(ctypes.c_void_p), a.ctypes.data_as(ctypes.c_void_p), c.ctypes.data_as(ctypes.c_void_p), ctypes.c_double(alpha), ctypes.c_double(beta)) return c
def goMapping(freq=30): global myMap,q # Parameters RED_BALL = 4 GREEN_BALL = 5 YELLOW_WALL = 2 myMap.initMapping() while not q: start = time.time() sensorPoints = ct.getSensorPoints() pose = ct.getPose() balls = getBalls() knownBalls = [v.getBallCoords(b,pose) for b in getKnownBalls(balls)] specialWallPoints = [getPoint(pose,v.getAreaAngle(a,pose),640) for a in getWalls()] # Mapping update myMap.robotPositioned(ctypes.c_double(pose[0]), ctypes.c_double(pose[1])) for a in knownBalls: myMap.ballDetected(ctypes.c_double(a[0]),ctypes.c_double(a[1]),ctypes.c_int(RED_BALL)) for wp in specialWallPoints: myMap.specialWall(ctypes.c_double(wp[0]),ctypes.c_double(wp[1]),ctypes.c_int(YELLOW_WALL)) for s in sensorPoints: if(s[2]): myMap.wallDetected(ctypes.c_double(s[0]), ctypes.c_double(s[1])) else: myMap.wallNotDetected(ctypes.c_double(s[0]), ctypes.c_double(s[1])) time.sleep(max(0,1.0/float(freq) - (time.time()-start)))
def __getitem__(self, key): self._update() dx = c_double() dy = c_double() dz = c_double() m = self.__len__() has_z = self._ndim == 3 if isinstance(key, int): if key + m < 0 or key >= m: raise IndexError("index out of range") if key < 0: i = m + key else: i = key lgeos.GEOSCoordSeq_getX(self._cseq, i, byref(dx)) lgeos.GEOSCoordSeq_getY(self._cseq, i, byref(dy)) if has_z: lgeos.GEOSCoordSeq_getZ(self._cseq, i, byref(dz)) return (dx.value, dy.value, dz.value) else: return (dx.value, dy.value) elif isinstance(key, slice): res = [] start, stop, stride = key.indices(m) for i in range(start, stop, stride): lgeos.GEOSCoordSeq_getX(self._cseq, i, byref(dx)) lgeos.GEOSCoordSeq_getY(self._cseq, i, byref(dy)) if has_z: lgeos.GEOSCoordSeq_getZ(self._cseq, i, byref(dz)) res.append((dx.value, dy.value, dz.value)) else: res.append((dx.value, dy.value)) return res else: raise TypeError("key must be an index or slice")
def _get_bounds(handle, bounds_fn, interleaved): pp_mins = ctypes.pointer(ctypes.c_double()) pp_maxs = ctypes.pointer(ctypes.c_double()) dimension = ctypes.c_uint32(0) bounds_fn(handle, ctypes.byref(pp_mins), ctypes.byref(pp_maxs), ctypes.byref(dimension)) if (dimension.value == 0): return None mins = ctypes.cast(pp_mins,ctypes.POINTER(ctypes.c_double \ * dimension.value)) maxs = ctypes.cast(pp_maxs,ctypes.POINTER(ctypes.c_double \ * dimension.value)) results = [mins.contents[i] for i in range(dimension.value)] results += [maxs.contents[i] for i in range(dimension.value)] p_mins = ctypes.cast(mins,ctypes.POINTER(ctypes.c_double)) p_maxs = ctypes.cast(maxs,ctypes.POINTER(ctypes.c_double)) core.rt.Index_Free(ctypes.cast(p_mins, ctypes.POINTER(ctypes.c_void_p))) core.rt.Index_Free(ctypes.cast(p_maxs, ctypes.POINTER(ctypes.c_void_p))) if interleaved: # they want bbox order. return results return Index.deinterleave(results)
def calc_fdtd(ey, hz, dx, dt, hist_ey): """ FDTDメインルーチン""" last_step = int(Const.STOP_TIME / dt) fp1 = open('fdtd_point.csv', 'w') print_point_value_header(fp1) fp2 = open('fdtd_line.csv', 'w') resource = ctypes.CDLL('./utility.so') tstart = ctypes.c_double() resource.get_current_time_by_sec(ctypes.byref(tstart)) # メインループ step = 0 while step <= last_step: if step % 100 == 0: print "%d / %d (%.2f %%)" % (step, last_step, float(step)/last_step * 100) # 磁流源の設定 stimulus = calc_stimulus(step, dt) # 波源を(0.01mm, 0.05mm)に設定 hz[500] += stimulus * dt # Eyの計算 calc_ey(ey, hz, dx, dt) # 境界条件 ey_boundary(ey, hist_ey, dt) hist_update(ey, hist_ey) # Hzの計算 calc_hz(hz, ey, dx, dt) # 波形出力 # print_point_value(fp1, hz, step * dt, stimulus) # if step % 100 == 0: # print_line_value(fp2, hz, step) step += 1 tend = ctypes.c_double() resource.get_current_time_by_sec(ctypes.byref(tend)) memsize = ctypes.c_long() resource.get_use_memory_size_from_mac(ctypes.byref(memsize)) fp1.close() fp2.close() print "All User Time: %.2f [sec], %.2f [min], %.2f [hour]" % \ (tend.value-tstart.value, (tend.value-tstart.value)/60, (tend.value-tstart.value)/3600); if 0 <= memsize.value and memsize.value < 1024: print "Memory : %.2f [B]" % (float(memsize.value)) elif 1024 <= memsize.value and memsize.value/1024 < 1024: print "Memory : %.2f [KB]" % (float(memsize.value)/1024) elif 1024 <= memsize.value/1024 and memsize.value/1024/1024 < 1024: print "Memory : %.2f [MB]" % (float(memsize.value)/1024/1024) elif 1024 <= memsize.value/1024/1024: print "Memory : %.2f [GB]" % (float(memsize.value)/1024/1024/1024)
def goRelative(self, dx, dy): if self.good: self.jog(0.0,0.0) dX = ctypes.c_double(dx * self.um_to_unit) dY = ctypes.c_double(dy * self.um_to_unit) dZA = ctypes.c_double(0.0) tango.LSX_MoveRel(self.LSID, dX, dY, dZA, dZA, self.wait)
def get_coordinate_pointers(self, coordinates): try: iter(coordinates) except TypeError: raise TypeError('Bounds must be a sequence') dimension = self.properties.dimension mins = ctypes.c_double * dimension maxs = ctypes.c_double * dimension if not self.interleaved: coordinates = Index.interleave(coordinates) # it's a point make it into a bbox. [x, y] => [x, y, x, y] if len(coordinates) == dimension: coordinates += coordinates if len(coordinates) != dimension * 2: raise core.RTreeError("Coordinates must be in the form " "(minx, miny, maxx, maxy) or (x, y) for 2D indexes") # so here all coords are in the form: # [xmin, ymin, zmin, xmax, ymax, zmax] for i in range(dimension): if not coordinates[i] <= coordinates[i + dimension]: raise core.RTreeError("Coordinates must not have minimums more than maximums") p_mins = mins(*[ctypes.c_double(\ coordinates[i]) for i in range(dimension)]) p_maxs = maxs(*[ctypes.c_double(\ coordinates[i + dimension]) for i in range(dimension)]) return (p_mins, p_maxs)
def setFrom(self, val, name=None): """Create an argument using a value as input. Arguments: Value -- an OpenSCAD value string -- the name of the value """ if name: self.name = ctypes.c_char_p(name) else: self.name = ctypes.c_char_p(0) if isinstance(val, bool): self.type = 'b' self.boolValue = ctypes.c_bool(val) elif isinstance(val, int) or isinstance(val, float): self.type = 'd' self.dblValue = ctypes.c_double(val) elif isinstance(val, str): self.type = 's' self.strValue = ctypes.c_char_p(val) elif isinstance(val, list) or isinstance(val, tuple): self.type = 'v' self.vecLen = ctypes.c_int(len(val)) arr = (ctypes.c_double * len(val))() for i, v in enumerate(val): arr[i] = ctypes.c_double(v) self.vecValue = arr
def trigger_IQ_bandwidth_set(self, dBandWidth, iOption=0): afDigitizerDll_Trigger_SetIntIQTriggerDigitalBandwidth = getDllObject('afDigitizerDll_Trigger_SetIntIQTriggerDigitalBandwidth', argtypes = [afDigitizerInstance_t, c_double, c_int, POINTER(c_double)]) dValue=c_double() error = afDigitizerDll_Trigger_SetIntIQTriggerDigitalBandwidth(self.session, c_double(dBandWidth), c_int(iOption), byref(dValue)) self.check_error(error) return dValue.value
def util_lon_lat(orig_lon, orig_lat, x, y): """ Transform x, y [km] to decimal degree in reference to orig_lon and orig_lat >>> util_lon_lat(12.0, 48.0, 0.0, 0.0) (12.0, 48.0) >>> lon, lat = util_lon_lat(12.0, 48.0, 73.9041, 111.1908) >>> print("%.4f, %.4f" % (lon, lat)) 13.0000, 49.0000 :param orig_lon: Longitude of reference origin :param orig_lat: Latitude of reference origin :param x: value [km] to calculate relative coordinate in degree :param y: value [km] to calculate relative coordinate in degree :return: lon, lat coordinate in degree (absolute) """ # 2009-10-11 Moritz clibsignal.utl_lonlat.argtypes = [C.c_double, C.c_double, C.c_double, C.c_double, C.POINTER(C.c_double), C.POINTER(C.c_double)] clibsignal.utl_lonlat.restype = C.c_void_p lon = C.c_double() lat = C.c_double() clibsignal.utl_lonlat(orig_lon, orig_lat, x, y, C.byref(lon), C.byref(lat)) return lon.value, lat.value
def callback(x, comm): """ Callback that calculates the Black Scholes Option Price for a given Volatility """ fail = NagError() p_userdata = cast(comm[0].p, py_object) userdata = p_userdata.value time = c_double(userdata[0]) callput = userdata[1] strike = c_double(userdata[2]) underlying = userdata[3] current_price = userdata[4] out = c_double(0.0) # NAG function call # Update input with risk free (r) and dividend (q) rate if any: # (Nag_RowMajor, callput, 1, 1, strike, underlying, time, x, r, q, out, fail) s30aac(Nag_RowMajor, callput, 1, 1, strike, underlying, time, x, 0.0, 0.0, out, fail) if(fail.code == 0): return out.value - current_price print fail.message return 0.0
def __init__(self, machine, name, **keys): super(self.__class__, self).__init__( machine, name ) self.AddInput("signal") self.AddOutput("out") self.Gain=math.pi*0.5 if 'gain' in keys.keys(): self.Gain = keys['gain'] else: print "WARNING! No gain given, using default gain = "+str(self.Gain) self.Q=math.sqrt(2.0)*0.5 if 'Q' in keys.keys(): self.Q = keys['Q'] else: print "WARNING! No Q give, using default Q = "+str(self.Q) self.fc=0 if 'fc' in keys.keys(): self.fc = keys['fc'] else: raise NameError("Missing fc!") self.cCoreID = Circuit.cCore.Add_SKLP(self.machine.cCoreID, c_double(self.fc), c_double(self.Q), c_double(self.Gain)) self.SetInputs(**keys)
def d_line_setter(d_line, model, packet): packet.mu = c_double(0.0) scale = d_line * 1e1 model.time_explosion = c_double(INVERSE_C * scale) packet.nu = c_double(1.0) nu_line = (1. - d_line/scale) packet.nu_line = c_double(nu_line)
def configure(self, n_samples, seconds_per_point): """ Configures the sampling length and rate. n==0: single point 0<n<Inf: single waveform """ if n_samples == 0: CHK( dll.DAQmxSetSampTimingType(self.ao_task, DAQmx_Val_OnDemand) ) elif n_samples < numpy.inf: f = 1./seconds_per_point CHK( dll.DAQmxSetSampTimingType(self.ao_task, DAQmx_Val_SampClk) ) CHK( dll.DAQmxCfgSampClkTiming(self.ao_task, self.co_dev+'InternalOutput', ctypes.c_double(f), DAQmx_Val_Falling, DAQmx_Val_FiniteSamps, ctypes.c_ulonglong(n_samples)) ) CHK( dll.DAQmxSetCOPulseFreq(self.co_task, self.co_dev, ctypes.c_double(f) )) CHK( dll.DAQmxCfgImplicitTiming(self.co_task, DAQmx_Val_ContSamps, ctypes.c_ulonglong(n_samples+1) )) CHK( dll.DAQmxCfgImplicitTiming(self.ci_task, DAQmx_Val_FiniteSamps, ctypes.c_ulonglong(n_samples+1) )) self.ci_data = numpy.empty((n_samples+1,), dtype=numpy.uint32)
def __init__(self, machine, name, **keys): super(self.__class__, self).__init__( machine, name ) self.AddInput("signal") self.AddOutput("out") self.Gain=math.pi*0.5 if 'gain' in keys.keys(): self.Gain = keys['gain'] else: print "WARNING! No gain given, using default gain = "+str(self.Gain) self.fc=0 if 'fcut' in keys.keys(): self.fc = keys['fcut'] else: raise NameError("Missing fcut!") self.band=0 if 'band' in keys.keys(): self.band = keys['band'] else: raise NameError("Missing band!") self.cCoreID = Circuit.cCore.Add_SKBP(self.machine.cCoreID, c_double(self.fc), c_double(self.band), c_double(self.Gain))
def hscryptkdf(password, dklen, maxmem, maxmemfrac, maxtime, params = None, saltsz = 32, nocheck = False): dk = create_string_buffer(dklen) # get lib's param size psz = _hgetparamsize() # check length of params if params is not None and len(params) < (psz + saltsz): raise Exception('For this build of the scrypt lib the params are %s/%s bytes! The salt size is %s.' % (len(params), psz + saltsz, saltsz)) if params is None: print('creating param bytes') params = create_string_buffer(psz + saltsz) recover = 0 else: print('using param bytes') params = c_char_p(params) recover = 1 if nocheck: nocheck = 1 else: nocheck = 0 rcode = _hscryptkdf( c_char_p(password), c_size_t(len(password)), dk, c_size_t(dklen), c_size_t(saltsz), c_double(maxmem), c_double(maxmemfrac), c_double(maxtime), params, c_uint8(recover), c_uint8(nocheck) ) if recover == 0: # convert from string buffer into bytes object params = bytes(params) return (rcode, bytes(dk), params)
def __init__(self,uid,timebase=None): if timebase == None: logger.debug("Capture will run with default system timebase") self.timebase = c_double(0) elif hasattr(timebase,'value'): logger.debug("Capture will run with app wide adjustable timebase") self.timebase = timebase else: logger.error("Invalid timebase variable type. Will use default system timebase") self.timebase = c_double(0) # self.use_hw_ts = self.check_hw_ts_support() self.use_hw_ts = False self._last_timestamp = self.get_now() self.capture = uvc.Capture(uid) self.uid = uid logger.debug('avaible modes %s'%self.capture.avaible_modes) controls_dict = dict([(c.display_name,c) for c in self.capture.controls]) try: controls_dict['Auto Focus'].value = 0 except KeyError: pass try: # Auto Exposure Priority = 1 leads to reduced framerates under low light and corrupt timestamps. controls_dict['Auto Exposure Priority'].value = 0 except KeyError: pass self.sidebar = None self.menu = None
def Vegas(integrand, ndim, userdata=NULL, epsrel=EPSREL, epsabs=EPSABS, verbose=0, ncomp=1, seed=None, mineval=MINEVAL, maxeval=MAXEVAL, nstart=NSTART, nincrease=NINCREASE, nbatch=NBATCH, gridno=GRIDNO, statefile=NULL): neval = c_int() fail = c_int() comp = c_int() ARR = c_double * ncomp integral = ARR() error = ARR() prob = ARR() if seed is None: seed = 0 lib.Vegas(ndim, ncomp, integrand_type(integrand), userdata, c_double(epsrel), c_double(epsabs), verbose, seed, mineval, maxeval, nstart, nincrease, nbatch, gridno, statefile, byref(neval), byref(fail), integral, error, prob) return dict(neval=neval.value, fail=fail.value, comp=comp.value, results=[{ 'integral':integral[comp], 'error':error[comp], 'prob':prob[comp] } for comp in range(ncomp)])
def Suave(integrand, ndim, nnew=1000, flatness=25., userdata=NULL, epsrel=EPSREL, epsabs=EPSABS, verbose=0, ncomp=1, seed=None, mineval=MINEVAL, maxeval=MAXEVAL): neval = c_int() fail = c_int() comp = c_int() nregions = c_int() ARR = c_double * ncomp integral = ARR() error = ARR() prob = ARR() if seed is None: seed = 0 lib.Suave(ndim, ncomp, integrand_type(integrand), userdata, c_double(epsrel), c_double(epsabs), verbose, seed, mineval, maxeval, nnew, c_double(flatness), byref(nregions), byref(neval), byref(fail), integral, error, prob) return dict(neval=neval.value, fail=fail.value, comp=comp.value, nregions=nregions.value, results=[{ 'integral':integral[comp], 'error':error[comp], 'prob':prob[comp] } for comp in range(ncomp)])
def computeEigenVectorsOfHessianImage( imgStack, zAnisotropyFactor, sigma=3.5 ): """ Computes an orientation matrix (3x3) per pixel. """ if imgStack.dtype != np.dtype("uint8"): raise RuntimeError("image must be of uint8 type") if not imgStack.flags["C_CONTIGUOUS"]: raise RuntimeError("image must be C_CONTIGUOUS, and must be provided in z-y-x order.") if imgStack.ndim != 3: raise RuntimeError("image must be 3D.") # Data is C-style memory layout (z-y-x) width = imgStack.shape[2] height = imgStack.shape[1] depth = imgStack.shape[0] ptr_address = libPtr.computeEigenVectorsOfHessianImage( ctypes.c_void_p(imgStack.ctypes.data), ctypes.c_int(width), ctypes.c_int(height), ctypes.c_int(depth), ctypes.c_double(zAnisotropyFactor), ctypes.c_double(sigma) ) return EigenVectorsOfHessianImage(ptr_address, imgStack.shape + (3,3))
def generate_photo_current(mesh,e_field,problem): current = 0 accumulated_charge = 0. total_photons = len(mesh.coordinates()) particles = mpc.CParticles(2000,mesh.c_mesh,mesh.dim) e_field = np.array(mc.pre_compute_field(mesh,e_field)) nextDensity = problem.density_funcs.combined_density.vector().array().astype('int') for point in xrange(len(mesh.coordinates())): new_particle(point,particles,problem,-1,mesh) new_particle(point,particles,problem,+1,mesh) for rep in xrange(300): accumulated_charge+=mpc.lib.photocurrentC(particles.ptr, nextDensity.ctypes.data, e_field.ctypes.data, mesh.c_mesh, ctypes.c_double(mesh.dt), ctypes.c_double(mesh.length_scale)) print "accumulated_charge",accumulated_charge,total_photons charge_per_photon = constants.eC*accumulated_charge/total_photons power = 1000. #per meter squared photons_sec = power*photons_per_watt(400*10**-9) current = charge_per_photon*photons_sec print "photocurrent:",current return current
def lagrange_interpol_2D_td(points1, points2, coefficients, x1, x2): points1 = np.require(points1, dtype=np.float64, requirements=["F_CONTIGUOUS"]) points2 = np.require(points2, dtype=np.float64, requirements=["F_CONTIGUOUS"]) coefficients = np.require(coefficients, dtype=np.float64, requirements=["F_CONTIGUOUS"]) # Should be safe enough. This was never raised while extracting a lot of # seismograms. assert len(points1) == len(points2) N = len(points1) - 1 nsamp = coefficients.shape[0] interpolant = np.zeros(nsamp, dtype="float64", order="F") lib.lagrange_interpol_2D_td( C.c_int(N), C.c_int(nsamp), points1.ctypes.data_as(C.POINTER(C.c_double)), points2.ctypes.data_as(C.POINTER(C.c_double)), coefficients.ctypes.data_as(C.POINTER(C.c_double)), C.c_double(x1), C.c_double(x2), interpolant.ctypes.data_as(C.POINTER(C.c_double))) return interpolant
def Cuhre(integrand, ndim, key=0, mineval=MINEVAL, maxeval=MAXEVAL, ncomp=1, userdata=NULL, seed=None, epsrel=EPSREL, epsabs=EPSABS, verbose=0, statefile=NULL, nvec=1): """ *key* chooses the basic integration rule: key = 7, 9, 11, 13 selects the cubature rule of degree key. Note that the degree-11 rule is available only in 3 dimensions, the degree-13 rule only in 2 dimensions. For other values, the default rule is taken, which is the degree-13 rule in 2 dimensions, the degree-11 rule in 3 dimensions, and the degree-9 rule otherwise. """ neval = c_int() fail = c_int() comp = c_int() nregions = c_int() ARR = c_double * ncomp integral = ARR() error = ARR() prob = ARR() if seed is None: seed = 0 lib.Cuhre(ndim, ncomp, wrap_integrand(integrand), userdata, c_int(nvec), c_double(epsrel), c_double(epsabs), verbose, mineval, maxeval, key, statefile, spin, byref(nregions), byref(neval), byref(fail), integral, error, prob) return dict(neval=neval.value, fail=fail.value, comp=comp.value, nregions=nregions.value, results=[{ 'integral':integral[comp], 'error':error[comp], 'prob':prob[comp] } for comp in range(ncomp)])
def __init__(self, rf_freq, rf_dur): # Constants corresponding to various NI options, defined with c_types vi = c_int(1) o1 = c_int(1) o2 = c_int(101) o3 = c_int(1000000 + 150000 + 219) o4 = c_double(0) length = len(rf_freq) # Loading the driver to call niFgen functions try: self.nfg = windll.LoadLibrary("niFgen_32.dll") except OSError: self.nfg = windll.LoadLibrary("niFgen_64.dll") # The way to initialize niFgen can be found in NI examples # It is copy/past from that, only translated into c_types self.nfg.niFgen_init(b'Dev3', c_bool(1), c_bool(1), byref(vi)) self.nfg.niFgen_ConfigureChannels(vi, b"0") self.nfg.niFgen_ConfigureOutputMode(vi, o2) # This is the way to convert numpy array into C array rff = rf_freq.ctypes.data rfd = rf_dur.ctypes.data rf = c_int(0) self.nfg.niFgen_CreateFreqList(vi, o1, length, rff, rfd, byref(rf)) self.nfg.niFgen_ConfigureFreqList(vi, b"0", rf, c_double(4), o4, o4) self.nfg.niFgen_ConfigureDigitalEdgeStartTrigger(vi, b"PFI1", o2) self.nfg.niFgen_ConfigureTriggerMode(vi, b"0", c_int(2)) self.nfg.niFgen_ConfigureOutputEnabled(vi, b"0", c_bool(1)) # These two lines export the internal clock of the niFgen (100 MHz) # divided by 400 (250 kHz) on the RTSI1 channel. It's the only way # to be sure that all cards are synchronized on the same clock self.nfg.niFgen_SetAttributeViInt32(vi, b"", o3, c_long(400)) self.nfg.niFgen_ExportSignal(vi, o2, b"", b"RTSI1") # self.niFgen.niFgen_ExportSignal(vi, c_int(1000 + 4), b"", b"RTSI2") self.nfg.niFgen_Commit(vi) self.vi = vi
def get_parameter_double(key): val_c = c_double() getparameter_double_c(c_char_p(key.encode()), byref(val_c)) return val_c.value
def _get_y(self, index): return capi.cs_gety(self.ptr, index, byref(c_double()))
def set_cosmology(self, H0=67.0, cosmomc_theta=None, ombh2=0.022, omch2=0.12, omk=0.0, Num_drf=0.2, neutrino_hierarchy='degenerate', num_massive_neutrinos=1, mnu=0.06, nnu=3.046, YHe=None, meffsterile=0.0, standard_neutrino_neff=3.046, TCMB=constants.COBE_CMBTemp, tau=None, deltazrei=None, bbn_predictor=None, theta_H0_range=[10, 100]): #ZP drf """ Sets cosmological parameters in terms of physical densities and parameters used in Planck 2015 analysis. Default settings give a single distinct neutrino mass eigenstate, by default one neutrino with mnu = 0.06eV. Set the neutrino_hierarchy parameter to normal or inverted to use a two-eigenstate model that is a good approximation to the known mass splittings seen in oscillation measurements. If you require more fine-grained control you can set the neutrino parameters directly rather than using this function. :param H0: Hubble parameter (in km/s/Mpc) :param cosmomc_theta: The CosmoMC theta parameter. You must set H0=None to solve for H0 given cosmomc_theta :param ombh2: physical density in baryons :param omch2: physical density in cold dark matter :param omk: Omega_K curvature parameter :param neutrino_hierarchy: 'degenerate', 'normal', or 'inverted' (1 or 2 eigenstate approximation) :param num_massive_neutrinos: number of massive neutrinos (ignored unless hierarchy == 'degenerate') :param mnu: sum of neutrino masses (in eV) :param nnu: N_eff, effective relativistic degrees of freedom :param YHe: Helium mass fraction. If None, set from BBN consistency. :param meffsterile: effective mass of sterile neutrinos :param standard_neutrino_neff: default value for N_eff in standard cosmology (non-integer to allow for partial heating of neutrinos at electron-positron annihilation and QED effects) :param TCMB: CMB temperature (in Kelvin) :param tau: optical depth; if None, current Reion settings are not changed :param deltazrei: redshift width of reionization; if None, uses default :param bbn_predictor: :class:`.bbn.BBNPredictor` instance used to get YHe from BBN consistency if YHe is None :param theta_H0_range: if cosmomc_theta is specified, the min, max interval of H0 values to map to; outside this range it will raise an exception. """ if YHe is None: # use BBN prediction bbn_predictor = bbn_predictor or bbn.get_default_predictor() YHe = bbn_predictor.Y_He(ombh2, nnu - standard_neutrino_neff) self.YHe = YHe if cosmomc_theta is not None: if not (0.001 < cosmomc_theta < 0.1): raise CAMBParamRangeError('cosmomc_theta looks wrong (parameter is just theta, not 100*theta)') kw = locals(); [kw.pop(x) for x in ['self', 'H0', 'cosmomc_theta']] if H0 is not None: raise CAMBError('Set H0=None when setting cosmomc_theta.') try: from scipy.optimize import brentq except ImportError: raise CAMBError('You need SciPy to set cosmomc_theta.') from . import camb def f(H0): self.set_cosmology(H0=H0, **kw) return camb.get_background(self, no_thermo=True).cosmomc_theta() - cosmomc_theta try: self.H0 = brentq(f, theta_H0_range[0], theta_H0_range[1], rtol=1e-4) except ValueError: raise CAMBParamRangeError('No solution for H0 inside of theta_H0_range') else: self.H0 = H0 self.TCMB = TCMB fac = (self.H0 / 100.0) ** 2 self.omegab = ombh2 / fac self.omegac = omch2 / fac self.Num_drf = Num_drf #ZP drf neutrino_mass_fac = 94.07 # conversion factor for thermal with Neff=3 TCMB=2.7255 omnuh2 = mnu / neutrino_mass_fac * (standard_neutrino_neff / 3.0) ** 0.75 omnuh2_sterile = meffsterile / neutrino_mass_fac if omnuh2_sterile > 0 and nnu < standard_neutrino_neff: raise CAMBError('sterile neutrino mass required Neff>3.046') if omnuh2 and not num_massive_neutrinos: raise CAMBError('non-zero mnu with zero num_massive_neutrinos') if isinstance(neutrino_hierarchy, six.string_types): if not neutrino_hierarchy in neutrino_hierarchies: raise CAMBError('Unknown neutrino_hierarchy {0:s}'.format(neutrino_hierarchy)) neutrino_hierarchy = neutrino_hierarchies.index(neutrino_hierarchy) + 1 omnuh2 = omnuh2 + omnuh2_sterile self.omegan = omnuh2 / fac self.omegav = 1 - omk - self.omegab - self.omegac - self.omegan # self.share_delta_neff = False # self.nu_mass_eigenstates = 0 # self.num_nu_massless = nnu # self.nu_mass_numbers[0] = 0 # self.num_nu_massive = 0 # if omnuh2 > omnuh2_sterile: # neff_massive_standard = num_massive_neutrinos * standard_neutrino_neff / 3.0 # self.num_nu_massive = num_massive_neutrinos # self.nu_mass_eigenstates = self.nu_mass_eigenstates + 1 # if nnu > neff_massive_standard: # self.num_nu_massless = nnu - neff_massive_standard # else: # self.num_nu_massless = 0 # neff_massive_standard = nnu # # self.nu_mass_numbers[self.nu_mass_eigenstates - 1] = num_massive_neutrinos # self.nu_mass_degeneracies[self.nu_mass_eigenstates - 1] = neff_massive_standard # self.nu_mass_fractions[self.nu_mass_eigenstates - 1] = (omnuh2 - omnuh2_sterile) / omnuh2 # else: # neff_massive_standard = 0 if omnuh2_sterile > 0: if nnu < standard_neutrino_neff: raise CAMBError('nnu < 3.046 with massive sterile') # self.num_nu_massless = standard_neutrino_neff - neff_massive_standard # self.num_nu_massive = self.num_nu_massive + 1 # self.nu_mass_eigenstates = self.nu_mass_eigenstates + 1 # self.nu_mass_numbers[self.nu_mass_eigenstates - 1] = 1 # self.nu_mass_degeneracies[self.nu_mass_eigenstates - 1] = max(1e-6, nnu - standard_neutrino_neff) # self.nu_mass_fractions[self.nu_mass_eigenstates - 1] = omnuh2_sterile / omnuh2 CAMB_SetNeutrinoHierarchy(byref(self), byref(c_double(omnuh2)), byref(c_double(omnuh2_sterile)), byref(c_double(nnu)), byref(c_int(neutrino_hierarchy)), byref(c_int(num_massive_neutrinos))) if tau is not None: self.Reion.set_tau(tau, delta_redshift=deltazrei) elif deltazrei: raise CAMBError('must set tau if setting deltazrei') return self
def checkFWver(): FWver = c_double() lib.nScope_check_FW_version(byref(FWver)) return FWver.value
def checkAPIver(): APIver = c_double() lib.nScope_check_API_version(byref(APIver)) return APIver.value
def test_DynTipDispRot_2v3noded(self): """@brief Check free vibration frequencies of goland wing.""" import NonlinearDynamic # imported after clean/make process Settings.OutputFileRoot = 'PyBeamGolandFree2noded' "beam options" XBOPTS = DerivedTypes.Xbopts(FollowerForce = ct.c_bool(False),\ MaxIterations = ct.c_int(99),\ PrintInfo = ct.c_bool(True),\ NumLoadSteps = ct.c_int(25),\ Solution = ct.c_int(312),\ MinDelta = ct.c_double(1e-4),\ NewmarkDamp = ct.c_double(1e-2)) "beam inputs" XBINPUT = DerivedTypes.Xbinput(2, 24) XBINPUT.BeamLength = 6.096 XBINPUT.BeamStiffness[0, 0] = 1.0e+09 XBINPUT.BeamStiffness[1, 1] = 1.0e+09 XBINPUT.BeamStiffness[2, 2] = 1.0e+09 XBINPUT.BeamStiffness[3, 3] = 0.9875e+06 XBINPUT.BeamStiffness[4, 4] = 9.77e+06 XBINPUT.BeamStiffness[5, 5] = 9.77e+08 XBINPUT.BeamStiffness[:, :] = 1.0 * XBINPUT.BeamStiffness[:, :] XBINPUT.BeamMass[0, 0] = 35.709121 XBINPUT.BeamMass[1, 1] = 35.709121 XBINPUT.BeamMass[2, 2] = 35.709121 XBINPUT.BeamMass[3, 3] = 8.6405832 XBINPUT.BeamMass[4, 4] = 0.001 XBINPUT.BeamMass[5, 5] = 0.001 "pitch-plunge coupling term" "b-frame coordinates" c = 1.8288 "using skew-symmetric operator" cg = np.array([0.0, -0.1, 0.0]) * c cgSkew = np.array([[ 0.0, -cg[2], cg[1] ],\ [ cg[2], 0.0, -cg[0]],\ [-cg[1], cg[0], 0.0] ]) XBINPUT.BeamMass[:3, 3:] = -XBINPUT.BeamMass[0, 0] * cgSkew XBINPUT.BeamMass[3:, :3] = XBINPUT.BeamMass[:3, 3:].T "set (0,5) and (5,0) to zero" XBINPUT.BeamMass[0, 5] = 0.0 XBINPUT.BeamMass[5, 0] = 0.0 "Dynamic parameters" XBINPUT.t0 = 0.0 XBINPUT.tfin = 1.0 XBINPUT.dt = 0.01 XBINPUT.Omega = 0.0 XBINPUT.ForceDyn[-1, 2] = 6e03 XBINPUT.ForcingType = '1-cos' NonlinearDynamic.Solve_Py(XBINPUT, XBOPTS) "Read from file" Dyn2noded = np.loadtxt(Settings.OutputDir + Settings.OutputFileRoot+'_SOL'+str(XBOPTS.Solution.value) + '_dyn.dat', \ skiprows=3) "run with 3-noded" Settings.OutputFileRoot = 'PyBeamGolandFree2noded' "beam inputs" XBINPUT = DerivedTypes.Xbinput(3, 12) XBINPUT.BeamLength = 6.096 XBINPUT.BeamStiffness[0, 0] = 1.0e+09 XBINPUT.BeamStiffness[1, 1] = 1.0e+09 XBINPUT.BeamStiffness[2, 2] = 1.0e+09 XBINPUT.BeamStiffness[3, 3] = 0.9875e+06 XBINPUT.BeamStiffness[4, 4] = 9.77e+06 XBINPUT.BeamStiffness[5, 5] = 9.77e+08 XBINPUT.BeamStiffness[:, :] = 1.0 * XBINPUT.BeamStiffness[:, :] XBINPUT.BeamMass[0, 0] = 35.709121 XBINPUT.BeamMass[1, 1] = 35.709121 XBINPUT.BeamMass[2, 2] = 35.709121 XBINPUT.BeamMass[3, 3] = 8.6405832 XBINPUT.BeamMass[4, 4] = 0.001 XBINPUT.BeamMass[5, 5] = 0.001 "pitch-plunge coupling term" "b-frame coordinates" c = 1.8288 "using skew-symmetric operator" cg = np.array([0.0, -0.1, 0.0]) * c cgSkew = np.array([[ 0.0, -cg[2], cg[1] ],\ [ cg[2], 0.0, -cg[0]],\ [-cg[1], cg[0], 0.0] ]) XBINPUT.BeamMass[:3, 3:] = -XBINPUT.BeamMass[0, 0] * cgSkew XBINPUT.BeamMass[3:, :3] = XBINPUT.BeamMass[:3, 3:].T "set (0,5) and (5,0) to zero" XBINPUT.BeamMass[0, 5] = 0.0 XBINPUT.BeamMass[5, 0] = 0.0 "Dynamic parameters" XBINPUT.t0 = 0.0 XBINPUT.tfin = 1.0 XBINPUT.dt = 0.001 XBINPUT.Omega = 0.0 XBINPUT.ForceDyn[-1, 2] = 6e03 XBINPUT.ForcingType = '1-cos' NonlinearDynamic.Solve_Py(XBINPUT, XBOPTS) "Read from file" Dyn3noded = np.loadtxt(Settings.OutputDir + Settings.OutputFileRoot + '_SOL' + str(XBOPTS.Solution.value) + '_dyn.dat', \ skiprows=3) PlotThings = False if PlotThings == True: "plot displacements" "plot displacements" plt.figure(1) plt.subplot(311) plt.plot(Dyn3noded[:, 0], Dyn3noded[:, 1]) plt.plot(Dyn2noded[:, 0], Dyn2noded[:, 1]) plt.grid() plt.ylabel('R_1') #plt.ylim(4.95,5.05) plt.subplot(312) plt.plot(Dyn3noded[:, 0], Dyn3noded[:, 2]) plt.plot(Dyn2noded[:, 0], Dyn2noded[:, 2]) plt.grid() plt.ylabel('R_2') plt.subplot(313) plt.plot(Dyn3noded[:, 0], Dyn3noded[:, 3]) plt.plot(Dyn2noded[:, 0], Dyn2noded[:, 3]) plt.grid() plt.ylabel('R_3') plt.xlabel('time') plt.legend(("3-noded","2-noded")\ ,'lower left',\ shadow=True,\ prop={'size':12}) plt.figure(2) plt.subplot(311) plt.plot(Dyn3noded[:, 0], Dyn3noded[:, 4]) plt.plot(Dyn2noded[:, 0], Dyn2noded[:, 4]) plt.grid() plt.ylabel('Psi_1') #plt.ylim(4.95,5.05) plt.subplot(312) plt.plot(Dyn3noded[:, 0], Dyn3noded[:, 5]) plt.plot(Dyn2noded[:, 0], Dyn2noded[:, 5]) plt.grid() plt.ylabel('Psi_2') plt.subplot(313) plt.plot(Dyn3noded[:, 0], Dyn3noded[:, 6]) plt.plot(Dyn2noded[:, 0], Dyn2noded[:, 6]) plt.grid() plt.ylabel('Psi_3') plt.xlabel('time') plt.legend(("3-noded","2-noded")\ ,'lower left',\ shadow=True,\ prop={'size':12}) plt.show() for iTime in range(Dyn3noded.shape[0]): "Assert z-displacement" Delz = 0.001 * np.max(Dyn3noded[:, 3]) self.assertAlmostEqual(Dyn2noded[iTime,3],Dyn3noded[iTime,3],\ places = None,\ msg = 'z-displacement does not match',\ delta=Delz)
class Process(object): _tree_buf = c_double() _ewcc_buf = c_double() _ct_buf = c_double() _pt_buf = c_double() _ptloop_buf = c_double() _loop_buf = (3 * c_double)() _ir1_buf = (3 * c_double)() _loop2_buf = (5 * c_double)() _ir2_buf = (5 * c_double)() _acc_buf = c_double() def __init__(self, process, amptype=default_amptype, qcd=-1, ew=-1): """Register a process with given amptype (default=LOOP). Use 'qcd' rsp. 'ew' to request a process in a certain order in alpha_s rsp. alpha. public members: * process -- the process name as registered * id (int) -- process id * amptype (int) -- amptype * contains -- LibraryContent() instance * n -- number of external particles Process instances can be passed to C functions as id. """ typearg = amptype needs_tree = False needs_pt = False if isinstance(amptype, strtype): try: amptype = AMPTYPES[amptype.lower()] except KeyError: pass if isinstance(amptype, strtype): if not set(typearg) - set(loopspec_flags): if 's' in typearg and 'l' in typearg: amptype = LOOP2 elif 'l' in amptype: amptype = LOOP elif 't' in amptype: amptype = TREE if 't' in typearg: needs_tree = True if 'p' in typearg: needs_pt = True if not amptype in AMPTYPES.values(): raise RegisterProcessError( 'Process(): illegal amptype \'{}\''.format(typearg)) if qcd >= 0: set_parameter('order_qcd', qcd) if ew >= 0: set_parameter('order_ew', ew) self.id = register_process(process, amptype) if self.id <= 0: raise RegisterProcessError( 'Failed to load process \'{}\' with amptype {}'.format( process, typearg)) self.contains = library_content(self.id) if needs_tree and not self.contains.tree: raise RegisterProcessError( ('Dedicated tree matrix elements are not available ' + 'in process {}.').format(process)) if needs_pt and not self.contains.pt: raise RegisterProcessError( ('Pseudo-tree matrix elements are not available ' + 'in process {}.').format(process)) self.process = process self.amptype = amptype self.n = n_external(self.id) self._as_parameter_ = self.id self._pp_buf = ((5 * self.n) * c_double)() if amptype == CC: self._cc_buf = (((self.n * (self.n - 1)) / 2) * c_double)() if amptype == SC: self._sc_buf = ((2 * self.n**2) * c_double)() if amptype == SCPV: self._sc_polvect_buf = ((self.n) * c_double)() def psp(self, sqrt_s=default_energy): """Generate a random phase space point for the process.""" phase_space_point_c(self.id, sqrt_s, self._pp_buf) return PhaseSpacePoint(self._pp_buf, self.n) def evaluate(self, pp_or_sqrt_s=default_energy, amptype=None): """Calculate matrix elements either for a given phase space point or for a random phase space point of given or default energy. 'amptype' overrides the amptype of the process -- if you use this, it's up to you to figure out if what you do makes sense. 'ct' calculates the counterterms only.""" ct_only = False r2_only = False if amptype == 'ct': ct_only = True amptype = None elif amptype == 'r2': r2_only = True amptype = None if not amptype: amptype = self.amptype if not start.started: start() start.started = True if isinstance(pp_or_sqrt_s, (int, float)): psp = self.psp(pp_or_sqrt_s) elif isinstance(pp_or_sqrt_s, PhaseSpacePoint): psp = pp_or_sqrt_s else: psp = PhaseSpacePoint(pp_or_sqrt_s, self.n) if amptype in (LOOP, LOOP2): if ct_only: evaluate_ct_c(self.id, psp, Process._tree_buf, Process._ct_buf) me = MatrixElement('ct', psp, tree=Process._tree_buf.value, ct=Process._ct_buf.value) elif r2_only: evaluate_r2_c(self.id, psp, Process._tree_buf, Process._ct_buf) me = MatrixElement('r2', psp, tree=Process._tree_buf.value, r2=Process._ct_buf.value) else: evaluate_full_c(self.id, psp, Process._tree_buf, Process._loop_buf, Process._ir1_buf, Process._loop2_buf, Process._ir2_buf, Process._acc_buf) me = MatrixElement(amptype, psp, tree=Process._tree_buf.value, loop=LoopME(*Process._loop_buf), iop=IOperator(*Process._ir1_buf), loop2=Loop2ME(*Process._loop2_buf), acc=Process._acc_buf.value) elif amptype == TREE: evaluate_tree_c(self.id, psp, Process._tree_buf) me = MatrixElement(amptype, psp, tree=Process._tree_buf.value) else: raise OpenLoopsError( 'Process() amptype {} not implemented yet'.format(amptype)) return me
''' DATA TYPE ''' data_integer = 1234567890 #numbers data_float = 13.7 #numbers with coma data_string = "*****@*****.**" data_boolean = True #value true and false (boolean) data_complex = complex(5, 6) #complex number #import data type from C to use data type from C like c_double from ctypes import c_double data_c_double = c_double(10.8) print("Data type introduction") print("data :", data_integer, "data type", type(data_integer)) print("data :", data_float, "data type", type(data_float)) print("data :", data_string, "data type", type(data_string)) print("data :", data_boolean, "data type", type(data_boolean)) print("data :", data_complex, "data type", type(data_complex)) print("data :", data_c_double, "data type", type(data_c_double))
def mainLoopViaCLib_On(self, nsweep=1000, nthermal=5000, ninterval=-1, algo='Metroplis', On=3, flunc=0.0, h=0., binGraph=False): self.nsweep = nsweep self.nthermal = nthermal ninterval = self.totOrbs if ninterval <= 0 else ninterval # initial spin, single ion anisotropy and number of linking initSpin = (c_double * self.totOrbs)() initD = (c_double * (3 * self.totOrbs))() nlinking = (c_int * self.totOrbs)() nlinking_list = [] for iorb, orb in enumerate(self.lattice): #print(orb.spin) initSpin[iorb] = c_double(orb.spin) initD[iorb * 3] = c_double(orb.D[0]) initD[iorb * 3 + 1] = c_double(orb.D[1]) initD[iorb * 3 + 2] = c_double(orb.D[2]) nlinking[iorb] = c_int(len(orb.linkedOrb)) nlinking_list.append(len(orb.linkedOrb)) # link strength ignoreNonDiagonalJ = 1 maxNLinking = np.max(nlinking_list) #print("maxNLinking=%d"%maxNLinking) linkStrength = (c_double * (self.totOrbs * maxNLinking * 9) )() # thus the nlinking of every orbs are the same cnt = 0 for iorb, orb in enumerate(self.lattice): #print("orb%d"%orb.id) for ilinking in range(maxNLinking): if ilinking >= nlinking_list[iorb]: for i in range(9): linkStrength[cnt] = c_double(0.) cnt += 1 else: #print("link %d :"%ilinking,orb.linkStrength[ilinking]) for i in range(9): linkStrength[cnt] = c_double( orb.linkStrength[ilinking][i]) if abs(orb.linkStrength[ilinking] [i]) > 1e-6 and i >= 3: ignoreNonDiagonalJ = 0 cnt += 1 ignoreNonDiagonalJ = c_int(ignoreNonDiagonalJ) # linking info. linkData = (c_int * (self.totOrbs * maxNLinking))() #-------------------------------------------------------------------------------# # linking info. for renormalized lattice # count total sites in shrinked lat. totOrb_rnorm = 0 for orb in self.lattice: if orb.chosen: totOrb_rnorm += 1 norbInCluster = len(orb.orb_cluster) #print("total %d orbs in renormalized lattice"%totOrb_rnorm) rOrb = (c_int * totOrb_rnorm)() # store id of renormalized orbs rOrbCluster = (c_int * (totOrb_rnorm * norbInCluster) )() # store id of renormalized orbs in cluster cnt = 0 for orb in self.lattice: if orb.chosen: rOrb[cnt] = orb.id cnt += 1 cnt = 0 #print("checking while preparing info.>>>>") for orb in self.lattice: if orb.chosen: #print("orb%d is chosen"%orb.id) for orbInCluster in orb.orb_cluster: rOrbCluster[cnt] = orbInCluster.id #print(" orb%d"%orbInCluster.id) cnt += 1 #print("<<<<") linkData_rnorm = ( c_int * (totOrb_rnorm * maxNLinking))() # store their link info cnt = 0 for orb in self.lattice: if orb.chosen: for iorb in range(maxNLinking): linkData_rnorm[cnt] = c_int( orb.linkedOrb_rnorm[iorb].id) if iorb < len( orb.linkedOrb_rnorm) else c_int(-1) #print("orb%d ~ orb%d, linkeData_rnorm[%d]= %d"%(orb.id,orb.linkedOrb_rnorm[iorb].id,cnt,linkData_rnorm[cnt])) cnt += 1 #for cnt in range(totOrb_rnorm*maxNLinking): # print(linkData_rnorm[cnt]) #-------------------------------------------------------------------------------# # field h = c_double(self.h) cnt = 0 for iorb, orb in enumerate(self.lattice): for ilinking in range(maxNLinking): if ilinking >= nlinking_list[iorb]: linkData[cnt] = -1 cnt += 1 else: linkData[cnt] = orb.linkedOrb[ilinking].id cnt += 1 maxNLinking_ = c_int(maxNLinking) # correlated info. nLat = len(self.correlatedOrbitalPair) corrOrbitalPair = (c_int * (nLat * 2))() for ipair, pair in enumerate(self.correlatedOrbitalPair): corrOrbitalPair[ipair * 2] = pair[0] corrOrbitalPair[ipair * 2 + 1] = pair[1] #print(pair[0],pair[1]) # orb group nOrbGroup = len(self.orbGroup) maxOrbGroupSize = 1 if nOrbGroup > 0: maxOrbGroupSize = len(self.orbGroup[0]) if nOrbGroup > 1: maxOrbGroupSize = np.max( [len(subGroup) for subGroup in self.orbGroup]) orbGroupList = (c_int * (nOrbGroup * maxOrbGroupSize))() cnt = 0 for subGroup in self.orbGroup: for iorb in range(maxOrbGroupSize): if iorb < len(subGroup): orbGroupList[cnt] = subGroup[iorb].id else: orbGroupList[cnt] = -1 cnt += 1 nOrbGroup = c_int(nOrbGroup) maxOrbGroupSize = c_int(maxOrbGroupSize) flunc_ = c_double(flunc) if win.libPool[1] != None: dll_path = win.libPool[1] if On == 2 else win.libPool[2] else: dll_name = "xylib.so" if On == 2 else "heisenberglib.so" if getattr(sys, 'frozen', False): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) dll_path = os.path.join(application_path, dll_name) print('loading dynamic library in %s' % (dll_path)) #print(os.listdir(application_path)) #exit() mylib = CDLL(dll_path) spinFrame = c_int(int(self.spinFrame)) updateAlgorithm = 0 # default Metropolis algorithm if algo == 'Wolff': updateAlgorithm = 1 updateAlgorithm = c_int(updateAlgorithm) cMC = mylib.MCMainFunction cMC.restype = py_object data = cMC(updateAlgorithm, self.totOrbs, initSpin, initD, nthermal, nsweep, maxNLinking_, nlinking, linkStrength, linkData, ninterval, nLat, corrOrbitalPair, nOrbGroup, maxOrbGroupSize, orbGroupList, flunc_, h, totOrb_rnorm, norbInCluster, rOrb, rOrbCluster, linkData_rnorm, spinFrame, ignoreNonDiagonalJ) spin_i_x, spin_i_y, spin_i_z, spin_j_x, spin_j_y, spin_j_z, spin_ij, autoCorr, E, E2, U4, spin_i_r_x, spin_i_r_y, spin_i_r_z, spin_j_r_x, spin_j_r_y, spin_j_r_z, spin_ij_r, E_r, E2_r, spin_i_tot_z, spin_j_tot_z, spin_tot_z, spin_i_h, spin_j_h, spin_tot_h, spinDistributionList, spinDotSpinBetweenGroups = data E *= self.T E2 *= self.T**2 E_r *= self.T E2_r *= self.T**2 # recover the real energies C = E2 - E * E C_r = E2_r - E_r * E_r spin_i = np.array([spin_i_x, spin_i_y, spin_i_z]) spin_j = np.array([spin_j_x, spin_j_y, spin_j_z]) spin_i_len = np.sqrt(np.dot(spin_i, spin_i)) spin_j_len = np.sqrt(np.dot(spin_j, spin_j)) spin_i_r = np.array([spin_i_r_x, spin_i_r_y, spin_i_r_z]) spin_j_r = np.array([spin_j_r_x, spin_j_r_y, spin_j_r_z]) # T <i><j> <ij> <autoCorr> <E> <E2> <U4> <E_r> <E2_r> C C_v print( '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f' % (self.T, self.h, spin_i_tot_z, spin_j_tot_z, spin_tot_z, spin_i_h, spin_j_h, spin_tot_h, spin_i_len, spin_j_len, spin_ij, np.dot(spin_i, spin_j), E, E2, U4, spin_ij_r, np.dot(spin_i_r, spin_j_r), E_r, E2_r)) with open('./out', 'a') as fout: fout.write( '%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.6f %.3f %.3f %.3f %.3f %.3f %.3f %.3f\n' % (self.T, self.h, spin_i_tot_z, spin_j_tot_z, spin_tot_z, spin_i_h, spin_j_h, spin_tot_h, spin_i_len, spin_j_len, spin_ij, np.dot(spin_i, spin_j), E, E2, U4, spin_ij_r, np.dot(spin_i_r, spin_j_r), E_r, E2_r)) if self.spinFrame == nsweep: self.outputSpinWaveSpetra(spinDistributionList) if self.spinFrame > 0: self.outputSpinDistributionForOn(spinDistributionList) if len(self.orbGroup) > 0: self.outputSpinGroup(spinDotSpinBetweenGroups) return spin_i, spin_j, spin_ij, autoCorr, E, E2, U4
def Plan(self, sx, sy, sphi, ex, ey, ephi, XYbounds): return lib.Plan(self.planner, self.obstacles, self.result, c_double(sx), c_double(sy), c_double(sphi), c_double(ex), c_double(ey), c_double(ephi), POINTER(c_double)(XYbounds))
def train(arg1, arg2=None, arg3=None): """ train(y, x [, options]) -> model | ACC y: a list/tuple/ndarray of l true labels (type must be int/double). x: 1. a list/tuple of l training instances. Feature vector of each training instance is a list/tuple or dictionary. 2. an l * n numpy ndarray or scipy spmatrix (n: number of features). train(prob [, options]) -> model | ACC train(prob, param) -> model | ACC Train a model from data (y, x) or a problem prob using 'options' or a parameter param. If '-v' is specified in 'options' (i.e., cross validation) either accuracy (ACC) or mean-squared error (MSE) is returned. options: -s type : set type of solver (default 1) for multi-class classification 0 -- L2-regularized logistic regression (primal) 1 -- L2-regularized L2-loss support vector classification (dual) 2 -- L2-regularized L2-loss support vector classification (primal) 3 -- L2-regularized L1-loss support vector classification (dual) 4 -- support vector classification by Crammer and Singer 5 -- L1-regularized L2-loss support vector classification 6 -- L1-regularized logistic regression 7 -- L2-regularized logistic regression (dual) for regression 11 -- L2-regularized L2-loss support vector regression (primal) 12 -- L2-regularized L2-loss support vector regression (dual) 13 -- L2-regularized L1-loss support vector regression (dual) -c cost : set the parameter C (default 1) -p epsilon : set the epsilon in loss function of SVR (default 0.1) -e epsilon : set tolerance of termination criterion -s 0 and 2 |f'(w)|_2 <= eps*min(pos,neg)/l*|f'(w0)|_2, where f is the primal function, (default 0.01) -s 11 |f'(w)|_2 <= eps*|f'(w0)|_2 (default 0.001) -s 1, 3, 4, and 7 Dual maximal violation <= eps; similar to liblinear (default 0.) -s 5 and 6 |f'(w)|_inf <= eps*min(pos,neg)/l*|f'(w0)|_inf, where f is the primal function (default 0.01) -s 12 and 13 |f'(alpha)|_1 <= eps |f'(alpha0)|, where f is the dual function (default 0.1) -B bias : if bias >= 0, instance x becomes [x; bias]; if < 0, no bias term added (default -1) -wi weight: weights adjust the parameter C of different classes (see README for details) -v n: n-fold cross validation mode -q : quiet mode (no outputs) """ prob, param = None, None if isinstance(arg1, (list, tuple)) or (scipy and isinstance(arg1, scipy.ndarray)): assert isinstance(arg2, (list, tuple)) or (scipy and isinstance( arg2, (scipy.ndarray, sparse.spmatrix))) y, x, options = arg1, arg2, arg3 prob = problem(y, x) param = parameter(options) elif isinstance(arg1, problem): prob = arg1 if isinstance(arg2, parameter): param = arg2 else: param = parameter(arg2) if prob == None or param == None: raise TypeError("Wrong types for the arguments") prob.set_bias(param.bias) liblinear.set_print_string_function(param.print_func) err_msg = liblinear.check_parameter(prob, param) if err_msg: raise ValueError('Error: %s' % err_msg) if param.flag_find_C: nr_fold = param.nr_fold best_C = c_double() best_rate = c_double() max_C = 1024 if param.flag_C_specified: start_C = param.C else: start_C = -1.0 liblinear.find_parameter_C(prob, param, nr_fold, start_C, max_C, best_C, best_rate) print("Best C = %lf CV accuracy = %g%%\n" % (best_C.value, 100.0 * best_rate.value)) return best_C.value, best_rate.value elif param.flag_cross_validation: l, nr_fold = prob.l, param.nr_fold target = (c_double * l)() liblinear.cross_validation(prob, param, nr_fold, target) ACC, MSE, SCC = evaluations(prob.y[:l], target[:l]) if param.solver_type in [ L2R_L2LOSS_SVR, L2R_L2LOSS_SVR_DUAL, L2R_L1LOSS_SVR_DUAL ]: print("Cross Validation Mean squared error = %g" % MSE) print("Cross Validation Squared correlation coefficient = %g" % SCC) return MSE else: print("Cross Validation Accuracy = %g%%" % ACC) return ACC else: m = liblinear.train(prob, param) m = toPyModel(m) return m
def mainLoopViaCLib(self, nsweep=1000, nthermal=500, ninterval=-1, algo='Wolff'): self.nsweep = nsweep self.nthermal = nthermal ninterval = self.totOrbs if ninterval <= 0 else ninterval # initial spin initSpin = (c_double * self.totOrbs)() nlinking = (c_int * self.totOrbs)() nlinking_list = [] for iorb, orb in enumerate(self.lattice): initSpin[iorb] = c_double(orb.spin) nlinking[iorb] = c_int(len(orb.linkedOrb)) nlinking_list.append(len(orb.linkedOrb)) # link strength maxNLinking = np.max(nlinking_list) #nlinking=len(orb.linkedOrb) linkStrength = (c_double * (self.totOrbs * maxNLinking))() #linkStrength_rnorm=(c_double*(self.totOrbs*maxNLinking))() # linking for renormalization cnt = 0 for iorb, orb in enumerate(self.lattice): for ilinking in range(maxNLinking): if ilinking >= nlinking_list[iorb]: linkStrength[cnt] = c_double(0.) #linkStrength_rnorm[cnt]=c_double(0.) cnt += 1 else: linkStrength[cnt] = c_double(orb.linkStrength[ilinking]) #linkStrength_rnorm[cnt]==c_double(orb.linkStrength[ilinking]) if orb.chosen else c_double(0.) cnt += 1 #for istrength, strength in enumerate(orb.linkStrength): # linkStrength[istrength]=c_double(strength) # linking info. linkData = (c_int * (self.totOrbs * maxNLinking))() cnt = 0 for orb in self.lattice: for iorb in range(maxNLinking): #orb.linkedOrb: linkData[cnt] = orb.linkedOrb[iorb].id if iorb < len( orb.linkedOrb) else -1 cnt += 1 #-------------------------------------------------------------------------------# # linking info. for renormalized lattice # count total sites in shrinked lat. totOrb_rnorm = 0 norbInCluster = -1 for orb in self.lattice: if orb.chosen: totOrb_rnorm += 1 norbInCluster = len(orb.orb_cluster) #print("total %d orbs in renormalized lattice"%totOrb_rnorm) rOrb = (c_int * totOrb_rnorm)() # store id of renormalized orb cores rOrbCluster = (c_int * (totOrb_rnorm * norbInCluster) )() # store id of renormalized orbs in cluster cnt = 0 for orb in self.lattice: if orb.chosen: rOrb[cnt] = orb.id cnt += 1 cnt = 0 #print("checking while preparing info.>>>>") for orb in self.lattice: if orb.chosen: #print("orb%d is chosen"%orb.id) for orbInCluster in orb.orb_cluster: rOrbCluster[cnt] = orbInCluster.id #print(" orb%d"%orbInCluster.id) cnt += 1 #print("<<<<") linkData_rnorm = ( c_int * (totOrb_rnorm * maxNLinking))() # store their link info cnt = 0 for orb in self.lattice: if orb.chosen: for iorb in range(maxNLinking): linkData_rnorm[cnt] = c_int( orb.linkedOrb_rnorm[iorb].id) if iorb < len( orb.linkedOrb_rnorm) else c_int(-1) #print("orb%d ~ orb%d, linkeData_rnorm[%d]= %d"%(orb.id,orb.linkedOrb_rnorm[iorb].id,cnt,linkData_rnorm[cnt])) cnt += 1 #for cnt in range(totOrb_rnorm*maxNLinking): # print(linkData_rnorm[cnt]) #-------------------------------------------------------------------------------# maxNLinking = c_int(maxNLinking) # field info. h = c_double(self.h) # correlated info. nLat = len(self.correlatedOrbitalPair) corrOrbitalPair = (c_int * (nLat * 2))() for ipair, pair in enumerate(self.correlatedOrbitalPair): corrOrbitalPair[ipair * 2] = pair[0] corrOrbitalPair[ipair * 2 + 1] = pair[1] # renormalization calc. switch renormOn = c_int(0) if self.dipoleCorrection else c_int(1) # num. of returned spin frame spinFrame = c_int(int(self.spinFrame)) if win.libPool[0] != None: dll_path = win.libPool[0] else: dll_name = "isinglib.so" if getattr(sys, 'frozen', False): application_path = os.path.dirname(sys.executable) elif __file__: application_path = os.path.dirname(__file__) dll_path = os.path.join(application_path, dll_name) print('loading dynamic library in %s' % (dll_path)) mylib = CDLL(dll_path) cMC = mylib.localUpdateMC # default Ising solver if algo == 'Wolff': # Other choices if abs(self.h) > 1e-5: print( "WARNING: external field has not been developed in Wolff algorithm (not effiecient), now the job continues but field is skipped, please switch to Metropolis algorithm to include field effects" ) cMC = mylib.blockUpdateMC # uncode returned Data cMC.restype = py_object data = cMC(self.totOrbs, initSpin, nthermal, nsweep, maxNLinking, nlinking, linkStrength, linkData, ninterval, nLat, corrOrbitalPair, h, renormOn, totOrb_rnorm, norbInCluster, rOrb, rOrbCluster, linkData_rnorm, spinFrame) #print('data has been returned successfully, dim=%d'%len(data)) spin_i, spin_j, spin_ij, autoCorr, E, E2, E_rnorm, E2_rnorm, U4, spin_tot, spinDistributionList = data E *= self.T E2 *= self.T**2 E_rnorm *= self.T E2_rnorm *= self.T**2 # recover the real energies print( "%.3f %.3f %.3f %.3f %.3f %.3f %.6f %.3f %.3f %.3f %.3f %.6f %.6f %.6f" % (self.T, self.h, spin_i, spin_j, spin_tot, spin_ij, autoCorr, E, E2, E_rnorm, E2_rnorm, E2 - E**2, E2_rnorm - E_rnorm**2, U4)) with open('./out', 'a') as fout: fout.write( "%.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.3f %.6f\n" % (self.T, self.h, spin_i, spin_j, spin_tot, spin_ij, E, E2, E_rnorm, E2_rnorm, U4)) if self.spinFrame > 0: self.outputSpinDistributionForIsing(spinDistributionList) return spin_i, spin_j, spin_ij, autoCorr, E, E2, U4
def Set_FPar_Double(self, Index, Value): '''Set_FPar_Double sets a global double variable to a specified value.''' _val = ctypes.c_double(Value) self.dll.e_Set_ADBFPar_Double(Index, _val, self.DeviceNo, self.__errPointer) self.__checkError('Set_FPar_Double')
def write(self, byte_array: bytes, lsb_tx_first=False, lsb_rx_first=False, err_clks=0) -> bytes: '''write an integer word and return the read back value''' sts = c_byte() dwf = self.dwf hdwf = self.hdwf byte_count = len(byte_array) #maximum effective byte length is 63 #(1024 samples/2 = 512 bits = 64 bytes #2 samples consumed => 63 bytes (12 words) assert byte_count <= 63 bit_count = byte_count * 8 + err_clks sample_count = bit_count * 2 + 2 #sample at clock freq to cater for cpha assert sample_count < self.max_output_samples, \ "Attempted write would exceed the output buffer length" #logger.info('bit_count: {}'.format(bit_count)) # serialization time length logger.debug("Bit count: {}".format(bit_count)) #runset = (bit_count+0.5*self.CPHA)*self.bit_period #add just slightly less than a half a bit so we don't get an extra #active clock edge but still allow enough time before CS going high runset = (bit_count + 0.49) * self.bit_period logger.debug("Runset: {}".format(runset)) dwf.FDwfDigitalOutRunSet(hdwf, c_double(runset)) #read_back_runset = c_double() #dwf.FDwfDigitalOutRunGet(hdwf, byref(read_back_runset)) #print("Read back run set: {}".format(read_back_runset.value)) # set number of sample to acquire dwf.FDwfDigitalInBufferSizeSet(hdwf, sample_count) # number of samples after trigger dwf.FDwfDigitalInTriggerPositionSet(hdwf, sample_count) if lsb_tx_first: data = (c_byte * byte_count)(*byte_array) else: new_bytes = [] for i, b in enumerate(byte_array): new_byte = 0 for bit_pos in range(8): new_byte |= ( (b >> bit_pos) & 1) << (7 - bit_pos) #reorder bits new_bytes.append(new_byte) data = (c_byte * byte_count)(*new_bytes) #data = bs dwf.FDwfDigitalOutDataSet(hdwf, self.pin_cfg.MOSI, byref(data), bit_count) # begin acquisition dwf.FDwfDigitalInConfigure(hdwf, 0, 1) #reconfigure, start acquisition #dwf.FDwfDigitalInStatus(hdwf, 1, byref(sts)) ##logger.info("STS VAL: {}".format(sts.value)) #assert sts.value == stsArm.value dwf.FDwfDigitalOutConfigure(hdwf, 1) while True: dwf.FDwfDigitalInStatus(hdwf, 1, byref(sts)) #logger.info("STS VAL: {}".format(sts.value)) if sts.value == stsDone.value: break time.sleep(0.001) #logger.info("Acquisition finished") # get samples, byte size rgwSamples = (c_uint16 * sample_count)() dwf.FDwfDigitalInStatusData(hdwf, byref(rgwSamples), 2 * sample_count) byte_array = bytearray() b = 0 logger.debug("Number of samples collected: {}".format(len(rgwSamples))) for i, sample in enumerate(rgwSamples): rx_bit = (sample >> self.pin_cfg.MISO) & 1 logger.debug("Sample {:2}: {:2}, mosi: {}, miso: {}".format( i, sample, (sample >> self.pin_cfg.MOSI) & 1, rx_bit)) Slice = rgwSamples[:bit_count * 2:2] for i, sample in enumerate(Slice): i_mod_8 = i % 8 if i_mod_8 == 0 and i != 0: #new byte is ready byte_array.append(b) b = 0 rx_bit = (sample >> self.pin_cfg.MISO) & 1 if lsb_rx_first: b |= rx_bit << i_mod_8 else: b <<= 1 b |= rx_bit # logger.info("Sample {:2}: {:2}, mosi: {}, miso: {}".format(i, sample, # (sample>>self.pin_cfg.MOSI)&1, # rx_bit)) byte_array.append(b) #logger.info("Returning: {}".format(byte_array)) return byte_array
syncDivider = 4 # you can change this, observe mode! READ MANUAL! CFDZeroCross0 = 10 # you can change this (in mV) CFDLevel0 = 50 # you can change this (in mV) CFDZeroCross1 = 10 # you can change this (in mV) CFDLevel1 = 150 # you can change this (in mV) # Variables to store information read from DLLs buffer = (ctypes.c_uint * TTREADMAX)() dev = [] libVersion = ctypes.create_string_buffer(b"", 8) hwSerial = ctypes.create_string_buffer(b"", 8) hwPartno = ctypes.create_string_buffer(b"", 8) hwVersion = ctypes.create_string_buffer(b"", 8) hwModel = ctypes.create_string_buffer(b"", 16) errorString = ctypes.create_string_buffer(b"", 40) resolution = ctypes.c_double() countRate0 = ctypes.c_int() countRate1 = ctypes.c_int() flags = ctypes.c_int() nactual = ctypes.c_int() ctcDone = ctypes.c_int() warnings = ctypes.c_int() warningstext = ctypes.create_string_buffer(b"", 16384) phlib = ctypes.CDLL("phlib64.dll") def closeDevices(): for i in range(0, MAXDEVNUM): phlib.PH_CloseDevice(ctypes.c_int(i)) exit(0)
def geos_linearring_from_py(ob, update_geom=None, update_ndim=0): try: # From array protocol array = ob.__array_interface__ assert len(array['shape']) == 2 m = array['shape'][0] n = array['shape'][1] if m < 3: raise ValueError( "A LinearRing must have at least 3 coordinate tuples") assert n == 2 or n == 3 # Make pointer to the coordinate array try: cp = cast(array['data'][0], POINTER(c_double)) except ArgumentError: cp = array['data'] # Add closing coordinates to sequence? if cp[0] != cp[m * n - n] or cp[1] != cp[m * n - n + 1]: M = m + 1 else: M = m # Create a coordinate sequence if update_geom is not None: cs = lgeos.GEOSGeom_getCoordSeq(update_geom) if n != update_ndim: raise ValueError( "Wrong coordinate dimensions; this geometry has dimensions: %d" \ % update_ndim) else: cs = lgeos.GEOSCoordSeq_create(M, n) # add to coordinate sequence for i in xrange(m): dx = c_double(cp[n * i]) dy = c_double(cp[n * i + 1]) dz = None if n == 3: dz = c_double(cp[n * i + 2]) # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, i, dx) lgeos.GEOSCoordSeq_setY(cs, i, dy) if n == 3: lgeos.GEOSCoordSeq_setZ(cs, i, dz) # Add closing coordinates to sequence? if M > m: dx = c_double(cp[0]) dy = c_double(cp[1]) dz = None if n == 3: dz = c_double(cp[2]) # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, M - 1, dx) lgeos.GEOSCoordSeq_setY(cs, M - 1, dy) if n == 3: lgeos.GEOSCoordSeq_setZ(cs, M - 1, dz) except AttributeError: # Fall back on list m = len(ob) n = len(ob[0]) if m < 3: raise ValueError( "A LinearRing must have at least 3 coordinate tuples") assert (n == 2 or n == 3) # Add closing coordinates if not provided if m == 3 or ob[0][0] != ob[-1][0] or ob[0][1] != ob[-1][1]: M = m + 1 else: M = m # Create a coordinate sequence if update_geom is not None: cs = lgeos.GEOSGeom_getCoordSeq(update_geom) if n != update_ndim: raise ValueError( "Wrong coordinate dimensions; this geometry has dimensions: %d" \ % update_ndim) else: cs = lgeos.GEOSCoordSeq_create(M, n) # add to coordinate sequence for i in xrange(m): coords = ob[i] dx = c_double(coords[0]) dy = c_double(coords[1]) dz = None if n == 3: dz = c_double(coords[2]) # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, i, dx) lgeos.GEOSCoordSeq_setY(cs, i, dy) if n == 3: lgeos.GEOSCoordSeq_setZ(cs, i, dz) # Add closing coordinates to sequence? if M > m: coords = ob[0] dx = c_double(coords[0]) dy = c_double(coords[1]) dz = None if n == 3: dz = c_double(coords[2]) # Because of a bug in the GEOS C API, # always set X before Y lgeos.GEOSCoordSeq_setX(cs, M - 1, dx) lgeos.GEOSCoordSeq_setY(cs, M - 1, dy) if n == 3: lgeos.GEOSCoordSeq_setZ(cs, M - 1, dz) if update_geom is not None: return None else: return lgeos.GEOSGeom_createLinearRing(cs), n
def read_segment(self, import_neuroshare_segment=True, lazy=False): """ Arguments: import_neuroshare_segment: import neuroshare segment as SpikeTrain with associated waveforms or not imported at all. """ assert not lazy, 'Do not support lazy' seg = Segment(file_origin=os.path.basename(self.filename), ) if sys.platform.startswith('win'): neuroshare = ctypes.windll.LoadLibrary(self.dllname) elif sys.platform.startswith('linux'): neuroshare = ctypes.cdll.LoadLibrary(self.dllname) neuroshare = DllWithError(neuroshare) # elif sys.platform.startswith('darwin'): # API version info = ns_LIBRARYINFO() neuroshare.ns_GetLibraryInfo(ctypes.byref(info), ctypes.sizeof(info)) seg.annotate(neuroshare_version=str(info.dwAPIVersionMaj) + '.' + str(info.dwAPIVersionMin)) # open file hFile = ctypes.c_uint32(0) neuroshare.ns_OpenFile(ctypes.c_char_p(self.filename), ctypes.byref(hFile)) fileinfo = ns_FILEINFO() neuroshare.ns_GetFileInfo(hFile, ctypes.byref(fileinfo), ctypes.sizeof(fileinfo)) # read all entities for dwEntityID in range(fileinfo.dwEntityCount): entityInfo = ns_ENTITYINFO() neuroshare.ns_GetEntityInfo(hFile, dwEntityID, ctypes.byref(entityInfo), ctypes.sizeof(entityInfo)) # EVENT if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_EVENT': pEventInfo = ns_EVENTINFO() neuroshare.ns_GetEventInfo(hFile, dwEntityID, ctypes.byref(pEventInfo), ctypes.sizeof(pEventInfo)) if pEventInfo.dwEventType == 0: # TEXT pData = ctypes.create_string_buffer( pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 1: # CVS pData = ctypes.create_string_buffer( pEventInfo.dwMaxDataLength) elif pEventInfo.dwEventType == 2: # 8bit pData = ctypes.c_byte(0) elif pEventInfo.dwEventType == 3: # 16bit pData = ctypes.c_int16(0) elif pEventInfo.dwEventType == 4: # 32bit pData = ctypes.c_int32(0) pdTimeStamp = ctypes.c_double(0.) pdwDataRetSize = ctypes.c_uint32(0) ea = Event(name=str(entityInfo.szEntityLabel), ) times = [] labels = [] for dwIndex in range(entityInfo.dwItemCount): neuroshare.ns_GetEventData(hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), ctypes.byref(pData), ctypes.sizeof(pData), ctypes.byref(pdwDataRetSize)) times.append(pdTimeStamp.value) labels.append(str(pData.value)) ea.times = times * pq.s ea.labels = np.array(labels, dtype='U') seg.events.append(ea) # analog if entity_types[entityInfo.dwEntityType] == 'ns_ENTITY_ANALOG': pAnalogInfo = ns_ANALOGINFO() neuroshare.ns_GetAnalogInfo(hFile, dwEntityID, ctypes.byref(pAnalogInfo), ctypes.sizeof(pAnalogInfo)) dwIndexCount = entityInfo.dwItemCount pdwContCount = ctypes.c_uint32(0) pData = np.zeros((entityInfo.dwItemCount, ), dtype='float64') total_read = 0 while total_read < entityInfo.dwItemCount: dwStartIndex = ctypes.c_uint32(total_read) dwStopIndex = ctypes.c_uint32(entityInfo.dwItemCount - total_read) neuroshare.ns_GetAnalogData( hFile, dwEntityID, dwStartIndex, dwStopIndex, ctypes.byref(pdwContCount), pData[total_read:].ctypes.data_as( ctypes.POINTER(ctypes.c_double))) total_read += pdwContCount.value signal = pq.Quantity(pData, units=pAnalogInfo.szUnits, copy=False) # t_start dwIndex = 0 pdTime = ctypes.c_double(0) neuroshare.ns_GetTimeByIndex(hFile, dwEntityID, dwIndex, ctypes.byref(pdTime)) anaSig = AnalogSignal( signal, sampling_rate=pAnalogInfo.dSampleRate * pq.Hz, t_start=pdTime.value * pq.s, name=str(entityInfo.szEntityLabel), ) anaSig.annotate(probe_info=str(pAnalogInfo.szProbeInfo)) seg.analogsignals.append(anaSig) # segment if entity_types[ entityInfo. dwEntityType] == 'ns_ENTITY_SEGMENT' and import_neuroshare_segment: pdwSegmentInfo = ns_SEGMENTINFO() if not str(entityInfo.szEntityLabel).startswith('spks'): continue neuroshare.ns_GetSegmentInfo(hFile, dwEntityID, ctypes.byref(pdwSegmentInfo), ctypes.sizeof(pdwSegmentInfo)) nsource = pdwSegmentInfo.dwSourceCount pszMsgBuffer = ctypes.create_string_buffer(" " * 256) neuroshare.ns_GetLastErrorMsg(ctypes.byref(pszMsgBuffer), 256) for dwSourceID in range(pdwSegmentInfo.dwSourceCount): pSourceInfo = ns_SEGSOURCEINFO() neuroshare.ns_GetSegmentSourceInfo( hFile, dwEntityID, dwSourceID, ctypes.byref(pSourceInfo), ctypes.sizeof(pSourceInfo)) pdTimeStamp = ctypes.c_double(0.) dwDataBufferSize = pdwSegmentInfo.dwMaxSampleCount * pdwSegmentInfo.dwSourceCount pData = np.zeros((dwDataBufferSize), dtype='float64') pdwSampleCount = ctypes.c_uint32(0) pdwUnitID = ctypes.c_uint32(0) nsample = int(dwDataBufferSize) times = np.empty((entityInfo.dwItemCount), dtype='f') waveforms = np.empty( (entityInfo.dwItemCount, nsource, nsample), dtype='f') for dwIndex in range(entityInfo.dwItemCount): neuroshare.ns_GetSegmentData( hFile, dwEntityID, dwIndex, ctypes.byref(pdTimeStamp), pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double)), dwDataBufferSize * 8, ctypes.byref(pdwSampleCount), ctypes.byref(pdwUnitID)) times[dwIndex] = pdTimeStamp.value waveforms[dwIndex, :, :] = pData[:nsample * nsource].reshape( nsample, nsource).transpose() sptr = SpikeTrain( times=pq.Quantity(times, units='s', copy=False), t_stop=times.max(), waveforms=pq.Quantity(waveforms, units=str(pdwSegmentInfo.szUnits), copy=False), left_sweep=nsample / 2. / float(pdwSegmentInfo.dSampleRate) * pq.s, sampling_rate=float(pdwSegmentInfo.dSampleRate) * pq.Hz, name=str(entityInfo.szEntityLabel), ) seg.spiketrains.append(sptr) # neuralevent if entity_types[ entityInfo.dwEntityType] == 'ns_ENTITY_NEURALEVENT': pNeuralInfo = ns_NEURALINFO() neuroshare.ns_GetNeuralInfo(hFile, dwEntityID, ctypes.byref(pNeuralInfo), ctypes.sizeof(pNeuralInfo)) pData = np.zeros((entityInfo.dwItemCount, ), dtype='float64') dwStartIndex = 0 dwIndexCount = entityInfo.dwItemCount neuroshare.ns_GetNeuralData( hFile, dwEntityID, dwStartIndex, dwIndexCount, pData.ctypes.data_as(ctypes.POINTER(ctypes.c_double))) times = pData * pq.s t_stop = times.max() sptr = SpikeTrain( times, t_stop=t_stop, name=str(entityInfo.szEntityLabel), ) seg.spiketrains.append(sptr) # close neuroshare.ns_CloseFile(hFile) seg.create_many_to_one_relationship() return seg
def _prepare_args(self, ty, val, stream, retr, kernelargs): """ Convert arguments to ctypes and append to kernelargs """ # map the arguments using any extension you've registered for extension in reversed(self.extensions): ty, val = extension.prepare_args( ty, val, stream=stream, retr=retr) if isinstance(ty, types.Array): devary = wrap_arg(val).to_device(retr, stream) c_intp = ctypes.c_ssize_t meminfo = ctypes.c_void_p(0) parent = ctypes.c_void_p(0) nitems = c_intp(devary.size) itemsize = c_intp(devary.dtype.itemsize) ptr = driver.device_pointer(devary) if driver.USE_NV_BINDING: ptr = int(ptr) data = ctypes.c_void_p(ptr) kernelargs.append(meminfo) kernelargs.append(parent) kernelargs.append(nitems) kernelargs.append(itemsize) kernelargs.append(data) for ax in range(devary.ndim): kernelargs.append(c_intp(devary.shape[ax])) for ax in range(devary.ndim): kernelargs.append(c_intp(devary.strides[ax])) elif isinstance(ty, types.Integer): cval = getattr(ctypes, "c_%s" % ty)(val) kernelargs.append(cval) elif ty == types.float64: cval = ctypes.c_double(val) kernelargs.append(cval) elif ty == types.float32: cval = ctypes.c_float(val) kernelargs.append(cval) elif ty == types.boolean: cval = ctypes.c_uint8(int(val)) kernelargs.append(cval) elif ty == types.complex64: kernelargs.append(ctypes.c_float(val.real)) kernelargs.append(ctypes.c_float(val.imag)) elif ty == types.complex128: kernelargs.append(ctypes.c_double(val.real)) kernelargs.append(ctypes.c_double(val.imag)) elif isinstance(ty, (types.NPDatetime, types.NPTimedelta)): kernelargs.append(ctypes.c_int64(val.view(np.int64))) elif isinstance(ty, types.Record): devrec = wrap_arg(val).to_device(retr, stream) ptr = devrec.device_ctypes_pointer if driver.USE_NV_BINDING: ptr = ctypes.c_void_p(int(ptr)) kernelargs.append(ptr) elif isinstance(ty, types.BaseTuple): assert len(ty) == len(val) for t, v in zip(ty, val): self._prepare_args(t, v, stream, retr, kernelargs) else: raise NotImplementedError(ty, val)
def __call__(self, ar): # Interpolate the input variable to the new grid using the Gridspec # remap file generated when this GS_Remapper object was initialized. # >>>for now, ar is required to be a variable (MV) <<< # Here, convert ar into a file for gs_fregrid # There's no special Gridspec way to write a variable; you write to any # *.nc file. The tough part is making sure you have there exactly what's # needed for the gs_fregrid call, especially because the grids are # supposedly supergrids and you just have some keywords to use to figure # out what goes where. And note that we'll need to check whether ar # really lives on self.ingrid . Supposedly a future API will work # better in this respect # Write the variable to a temporary file, as required by gs_fregrid. # The remap path should be suitable. # >>> this should be done more carefully, e.g. deal with failures; # >>> more worth doing when we have mosaic variables. timestr = str(int(time.time())) varfile = cdms2.open(self.inpath + "/" + "invvar" + timestr + ".nc", 'w') varfile.write(ar) varfile.close() history = "GS_Regridder" mosaic_in = self.inpath + "/" + self.infile mosaic_out = self.outpath + "/" + self.outfile dir_in = 256 * "\x00" # path is already encoded in input_file dir_out = 256 * "\x00" # path is already encoded in output_file input_file = varfile.id + 256 * "\x00" nfiles = 1 output_file = self.outpath + '/' + "outvar" + timestr + ".nc" + 256 * "\x00" nfiles_out = 1 scalar_name = ar.id + 256 * "\x00" nscalar = 1 u_name = 256 * "\x00" v_name = 256 * "\x00" nvector = 0 nvector2 = 0 interp_method = "conserve_order2" test_case = None test_param = c_double(1.0) opcode = c_uint(0) AGRID = 64 grid_type = AGRID finer_step = c_uint(0) fill_missing = 0 nlon = 0 nlat = 0 check_conserve = 0 y_at_center = 0 lonbegin = c_double(0.) lonend = c_double(360.) latbegin = c_double(-90.) latend = c_double(90.) lbegin = 0 lend = -1 kbegin = 0 kend = -1 print "__call__ remapfile=", self.remapfile libcf.gs_fregrid(history, mosaic_in, mosaic_out, dir_in, dir_out, input_file, nfiles, output_file, nfiles_out, self.remapfile, scalar_name, nscalar, u_name, nvector, v_name, nvector2, interp_method, test_case, test_param, opcode, grid_type, finer_step, fill_missing, nlon, nlat, check_conserve, y_at_center, lonbegin, lonend, latbegin, latend, kbegin, kend, lbegin, lend) # Read the output_file into a variable, and return the variable f = cdms2.open(path + "/" + output_file) vout = f(scalar_name) f.close() return vout
def getCplexStyleArrays( self, lp, senseDict=None, LpVarCategories=None, LpObjSenses=None, infBound=1e20 ): """returns the arrays suitable to pass to a cdll Cplex or other solvers that are similar Copyright (c) Stuart Mitchell 2007 """ if senseDict is None: senseDict = { const.LpConstraintEQ: "E", const.LpConstraintLE: "L", const.LpConstraintGE: "G", } if LpVarCategories is None: LpVarCategories = {const.LpContinuous: "C", const.LpInteger: "I"} if LpObjSenses is None: LpObjSenses = {const.LpMaximize: -1, const.LpMinimize: 1} import ctypes rangeCount = 0 variables = list(lp.variables()) numVars = len(variables) # associate each variable with a ordinal self.v2n = dict(((variables[i], i) for i in range(numVars))) self.vname2n = dict(((variables[i].name, i) for i in range(numVars))) self.n2v = dict((i, variables[i]) for i in range(numVars)) # objective values objSense = LpObjSenses[lp.sense] NumVarDoubleArray = ctypes.c_double * numVars objectCoeffs = NumVarDoubleArray() # print "Get objective Values" for v, val in lp.objective.items(): objectCoeffs[self.v2n[v]] = val # values for variables objectConst = ctypes.c_double(0.0) NumVarStrArray = ctypes.c_char_p * numVars colNames = NumVarStrArray() lowerBounds = NumVarDoubleArray() upperBounds = NumVarDoubleArray() initValues = NumVarDoubleArray() for v in lp.variables(): colNames[self.v2n[v]] = to_string(v.name) initValues[self.v2n[v]] = 0.0 if v.lowBound != None: lowerBounds[self.v2n[v]] = v.lowBound else: lowerBounds[self.v2n[v]] = -infBound if v.upBound != None: upperBounds[self.v2n[v]] = v.upBound else: upperBounds[self.v2n[v]] = infBound # values for constraints numRows = len(lp.constraints) NumRowDoubleArray = ctypes.c_double * numRows NumRowStrArray = ctypes.c_char_p * numRows NumRowCharArray = ctypes.c_char * numRows rhsValues = NumRowDoubleArray() rangeValues = NumRowDoubleArray() rowNames = NumRowStrArray() rowType = NumRowCharArray() self.c2n = {} self.n2c = {} i = 0 for c in lp.constraints: rhsValues[i] = -lp.constraints[c].constant # for ranged constraints a<= constraint >=b rangeValues[i] = 0.0 rowNames[i] = to_string(c) rowType[i] = to_string(senseDict[lp.constraints[c].sense]) self.c2n[c] = i self.n2c[i] = c i = i + 1 # return the coefficient matrix as a series of vectors coeffs = lp.coefficients() sparseMatrix = sparse.Matrix(list(range(numRows)), list(range(numVars))) for var, row, coeff in coeffs: sparseMatrix.add(self.c2n[row], self.vname2n[var], coeff) ( numels, mystartsBase, mylenBase, myindBase, myelemBase, ) = sparseMatrix.col_based_arrays() elemBase = ctypesArrayFill(myelemBase, ctypes.c_double) indBase = ctypesArrayFill(myindBase, ctypes.c_int) startsBase = ctypesArrayFill(mystartsBase, ctypes.c_int) lenBase = ctypesArrayFill(mylenBase, ctypes.c_int) # MIP Variables NumVarCharArray = ctypes.c_char * numVars columnType = NumVarCharArray() if lp.isMIP(): for v in lp.variables(): columnType[self.v2n[v]] = to_string(LpVarCategories[v.cat]) self.addedVars = numVars self.addedRows = numRows return ( numVars, numRows, numels, rangeCount, objSense, objectCoeffs, objectConst, rhsValues, rangeValues, rowType, startsBase, lenBase, indBase, elemBase, lowerBounds, upperBounds, initValues, colNames, rowNames, columnType, self.n2v, self.n2c, )
def test_ctypes_double(self): data = ctypes.c_double(1.234) sz = driver.host_memory_size(data) self.assertTrue(ctypes.sizeof(data) == sz)
def __init__(self, ingrid, outgrid, infile=None, outfile=None, remapfile=None): # Save the grids, make and save a Gridspec remap file. # For now, we are using the libCF/Gridspec API which operates only on files; # thus temporary files are written out and read back in. That requires the # input grids to support a "write_gridspec" method. # If a path is provided, it prefixes whatever filenames were input. # If there is no path provided, each filename must include its path. # Note: it's a bit messy to keep filenames and paths separate internally, # but the presnet Gridspec function expects lots of directories. self.ingrid = ingrid self.outgrid = outgrid if (not hasattr(ingrid, "gsfile")): ingrid.gsfile = None ingrid.gspath = None if (infile == None): self.infile = ingrid.gsfile self.inpath = ingrid.gspath else: self.infile = os.path.basename(infile) self.inpath = os.path.dirname(infile) if (not os.path.isfile(self.inpath + "/" + self.infile)): raise OSError, "cannot open infile " + self.inpath + "/" + self.infile if (not hasattr(outgrid, "gsfile")): outgrid.gsfile = None outgrid.gspath = None if (outfile == None): self.outfile = outgrid.gsfile self.outpath = outgrid.gspath else: self.outfile = os.path.basename(outfile) self.outpath = os.path.dirname(outfile) if (not os.path.isfile(self.outpath + "/" + self.outfile)): raise OSError, "cannot open outfile " + self.outpath + "/" + self.outfile if (remapfile == None): timestr = str(int(time.time())) self.remapfile = "remap" + timestr self.remappath = "/tmp" else: self.remapfile = os.path.basename(remapfile) self.remappath = os.path.dirname(remapfile) if (not os.path.isdir(self.remappath + "/")): raise OSError, "cannot open remapfile directory " + self.remappath + "/" ingrid.write_gridspec(self.inpath + "/" + self.infile) outgrid.write_gridspec(self.outpath + "/" + self.outfile) history = "GS_Regridder" mosaic_in = self.inpath + "/" + self.infile mosaic_out = self.outpath + "/" + self.outfile # No variables to interpolate; the gs_fregrid call will be just to # make a remap file... dir_in = 256 * "\x00" dir_out = 256 * "\x00" input_file = 256 * "\x00" nfiles = 0 output_file = 256 * "\x00" nfiles_out = 0 scalar_name = 256 * "\x00" nscalar = 0 u_name = 256 * "\x00" v_name = 256 * "\x00" nvector = 0 nvector2 = 0 # For a call which only writes the remap files, gs_fregrid # expects remapfile to be a full path. For a call in which # remapping takes place, gs_fregrid expects remapfile to be # a pure filename, in a path it gets from elsewhere, maybe dir_in. # (ARRGH - but with the API slated to be replaced, I'll live with it) remapf = os.path.abspath(self.remappath + "/" + self.remapfile) + "\0" * 256 interp_method = "conserve_order2" test_case = None test_param = c_double(1.0) opcode = c_uint(0) AGRID = 64 grid_type = AGRID finer_step = c_uint(0) fill_missing = 0 nlon = 0 nlat = 0 check_conserve = 0 y_at_center = 0 lonbegin = c_double(0.) lonend = c_double(360.) latbegin = c_double(-90.) latend = c_double(90.) lbegin = 0 lend = -1 kbegin = 0 kend = -1 libcf.gs_fregrid(history, mosaic_in, mosaic_out, dir_in, dir_out, input_file, nfiles, output_file, nfiles_out, remapf, scalar_name, nscalar, u_name, nvector, v_name, nvector2, interp_method, test_case, test_param, opcode, grid_type, finer_step, fill_missing, nlon, nlat, check_conserve, y_at_center, lonbegin, lonend, latbegin, latend, kbegin, kend, lbegin, lend)
def sync_parameters(self) -> None: """ Sync parameters sets the configuration of the instrument using the parameters specified in the Qcodes instrument. Sync parameters consists of five parts 1. Center span configuration (freqs and span) 2. Acquisition configuration lin-scale/log-scale avg/max power 3. Configuring the external 10MHz reference 4. Configuration of the mode that is being used 5. Acquisition mode. At the moment only `sweeping` is implemented This does not currently implement Configuration of the tracking generator used in VNA mode """ # 1. CenterSpan Configuration center = ct.c_double(self.frequency()) span = ct.c_double(self.span()) log.info('Setting device CenterSpan configuration.') err = self.dll.saConfigCenterSpan(self.deviceHandle, center, span) self.check_for_error(err, 'saConfigCenterSpan') # 2. Acquisition configuration detectorVals = { 'min-max': ct.c_uint(self.hf.sa_MIN_MAX), 'average': ct.c_uint(self.hf.sa_AVERAGE) } scaleVals = { 'log-scale': ct.c_uint(self.hf.sa_LOG_SCALE), 'lin-scale': ct.c_uint(self.hf.sa_LIN_SCALE), 'log-full-scale': ct.c_uint(self.hf.sa_LOG_FULL_SCALE), 'lin-full-scale': ct.c_uint(self.hf.sa_LIN_FULL_SCALE) } detector = detectorVals[self.acquisition_mode()] scale = scaleVals[self.scale()] err = self.dll.saConfigAcquisition(self.deviceHandle, detector, scale) self.check_for_error(err, 'saConfigAcquisition') # 3. Reference Level configuration log.info('Setting device reference level configuration.') err = self.dll.saConfigLevel(self.deviceHandle, ct.c_double(self.ref_lvl())) self.check_for_error(err, 'saConfigLevel') # 4. External Reference configuration if self.external_reference(): external = self.hf.sa_REF_EXTERNAL_IN log.info('Setting reference frequency from external source.') err = self.dll.saSetTimebase(self.deviceHandle, external) self.check_for_error(err, 'saSetTimebase') reject_var = ct.c_bool(self.reject_image()) log.info('Setting device Sweeping configuration.') err = self.dll.saConfigSweepCoupling(self.deviceHandle, ct.c_double(self.rbw()), ct.c_double(self.vbw()), reject_var) self.check_for_error(err, 'saConfigSweepCoupling') modeOpts = { 'sweeping': self.hf.sa_SWEEPING, 'real_time': self.hf.sa_REAL_TIME, # not implemented 'IQ': self.hf.sa_IQ, # not implemented 'idle': self.hf.sa_IDLE } mode = modeOpts[self.device_mode()] # the third argument to saInitiate is a flag that is # currently not used err = self.dll.saInitiate(self.deviceHandle, mode, 0) if err == saStatus.saInvalidParameterErr: extrainfo = """ In real-time mode, this value may be returned if the span limits defined in the API header are broken. Also in real-time mode, this error will be returned if the resolution bandwidth is outside the limits defined in the API header. In time-gate analysis mode this error will be returned if span limits defined in the API header are broken. Also in time gate analysis, this error is returned if the bandwidth provided require more samples for processing than is allowed in the gate length. To fix this increase rbw/vbw. """ elif err == saStatus.saBandwidthErr: extrainfo = 'RBW is larger than your span. (Sweep Mode)!' else: extrainfo = None self.check_for_error(err, 'saInitiate', extrainfo) self._parameters_synced = True
def event_loop(self, nev=-1): #nev is number of events to analyze, negative for all #input generated particles pdg = std.vector(int)() en = std.vector(float)() self.tree.SetBranchAddress("gen_pdg", pdg) self.tree.SetBranchAddress("gen_en", en) #spectrometer hits up_hits = ParticleCounterHits("up", self.tree) up_hits.local_from_geo(self.geo, "LumiSUbox") down_hits = ParticleCounterHits("down", self.tree) down_hits.local_from_geo(self.geo, "LumiSDbox") #photon detector hits phot_hits = ParticleCounterHits("phot", self.tree) phot_hits.local_from_geo(self.geo, "LumiDbox") #flow counters hits ew_front_hits = ParticleCounterHits("cnt_ew_front", self.tree) ew_rear_hits = ParticleCounterHits("cnt_ew_rear", self.tree) mag_front_hits = ParticleCounterHits("cnt_mag_front", self.tree) mag_rear_hits = ParticleCounterHits("cnt_mag_rear", self.tree) ew_front_hits.local_from_geo(self.geo, "ExitWinBox") ew_rear_hits.local_from_geo(self.geo, "ExitWinBox") mag_front_hits.local_from_geo(self.geo, "lumi_dipole") mag_rear_hits.local_from_geo(self.geo, "lumi_dipole") #outputs out = TFile(self.outfile, "recreate") #interaction tree otree = TTree("event", "event") gen_en = c_double(0) up_en = c_double(0) down_en = c_double(0) is_spect = c_bool(0) phot_en = c_double(0) otree.Branch("gen_en", gen_en, "gen_en/D") otree.Branch("up_en", up_en, "up_en/D") otree.Branch("down_en", down_en, "down_en/D") otree.Branch("is_spect", is_spect, "is_spect/O") otree.Branch("phot_en", phot_en, "phot_en/D") #hit trees up_hits.CreateOutput("up") down_hits.CreateOutput("down") phot_hits.CreateOutput("phot") #hit trees for flow counters ew_front_hits.CreateOutput("ew_front") ew_rear_hits.CreateOutput("ew_rear") mag_front_hits.CreateOutput("mag_front") mag_rear_hits.CreateOutput("mag_rear") #bunch crossing tree btree = TTree("bunch", "bunch") bun_ni = c_int(0) bun_up_en = c_double(0) bun_down_en = c_double(0) bun_phot_en = c_double(0) btree.Branch("bun_ni", bun_ni, "bun_ni/I") btree.Branch("bun_up_en", bun_up_en, "bun_up_en/D") btree.Branch("bun_down_en", bun_down_en, "bun_down_en/D") btree.Branch("bun_phot_en", bun_phot_en, "bun_phot_en/D") #Poisson distribution for bunch crossings lam = self.get_scale()["lambda"] print("Lambda:", lam) fPois = TF1( "Pois", "TMath::Power([0], Int_t(TMath::Floor(x)) )\ *TMath::Exp(-[0])/TMath::Factorial( Int_t(TMath::Floor(x)) )", 0, 12. * lam) fPois.SetParameter(0, lam) #number of interactions in bunch crossing nI = int(TMath.Floor(fPois.GetRandom())) bun_ni.value = nI #print period if nev < 0: nev = self.tree.GetEntries() iprint = int(nev / 12) #interaction loop for ievt in range(nev): self.tree.GetEntry(ievt) if ievt % iprint == 0 and ievt > 0: print("{0:.1f} %".format(100. * ievt / nev)) stdout.flush() gen_en.value = 0. up_en.value = 0. down_en.value = 0. is_spect.value = 0 phot_en.value = 0. #generated photon energy for imc in range(pdg.size()): if pdg.at(imc) == 22: gen_en.value = en.at(imc) #flow counters hits ew_front_hits.LoopInLocal() ew_rear_hits.LoopInLocal() mag_front_hits.LoopInLocal() mag_rear_hits.LoopInLocal() #spectrometer hits for i in range(up_hits.GetN()): hit = up_hits.GetHit(i) hit.GlobalToLocal() up_en.value += hit.en up_hits.FillOutput() for i in range(down_hits.GetN()): hit = down_hits.GetHit(i) hit.GlobalToLocal() down_en.value += hit.en down_hits.FillOutput() #coincidence selection if up_en.value > self.emin and down_en.value > self.emin: is_spect.value = 1 #photon hits for i in range(phot_hits.GetN()): hit = phot_hits.GetHit(i) hit.GlobalToLocal() phot_en.value += hit.en phot_hits.FillOutput() otree.Fill() #bunch crossing if nI == 0: btree.Fill() nI = int(TMath.Floor(fPois.GetRandom())) bun_ni.value = nI bun_up_en.value = 0. bun_down_en.value = 0. bun_phot_en.value = 0. else: nI -= 1 bun_up_en.value += up_en.value bun_down_en.value += down_en.value bun_phot_en.value += phot_en.value #interaction loop otree.Write() up_hits.otree.Write() down_hits.otree.Write() phot_hits.otree.Write() ew_front_hits.otree.Write() ew_rear_hits.otree.Write() mag_front_hits.otree.Write() mag_rear_hits.otree.Write() btree.Write() self.print_stat(out) out.Close() print("Hit analysis done")
def read_base_temperature(self): "gets the cooler's hot side in degrees Celcius" T = c_double() self._libfli.FLIReadTemperature(self._dev, FLI_TEMPERATURE_BASE, byref(T)) return T.value
def _fitorpredict_ptr( self, source_dev, m_train, n, m_valid, double_precision, order, a, # trainX_ptr or train_xptr b, # trainY_ptr c, # validX_ptr d, # validY_ptr or valid_xptr # keep consistent with later uses e, # weight_ptr do_predict=0, free_input_data=0): """Train a GLM with pointers to data on the GPU (if fit_intercept, then you should have added 1's as last column to m_train) :param source_dev GPU ID of device :param m_train Number of rows in the training set :param n Number of columns in the training set :param m_valid Number of rows in the validation set :param double_precision float32 (0) or double point precision (1) of fit No Default. :param order: Order of data. Default is None and set elsewhere whether row 'r' or column 'c' major order. :param a Pointer to training features array :param b Pointer to training response array :param c Pointer to validation features :param d Pointer to validation response :param e Pointer to weight column :param int do_predict : Indicate if prediction should be done on validation set after train. Default is 0. :param int free_input_data : Indicate if input data should be freed at the end of fit(). Default is 1. """ #store some things for later call to predict_ptr() self.source_dev = source_dev self.m_train = m_train self.n = n self.m_valid = m_valid self.a = a self.b = b self.c = c self.d = d self.e = e # ########## # #if fitted earlier clear #otherwise don't clear solution, just use it if do_predict == 0 and self.did_fit_ptr == 1: free_sols(self) # ############## # self.did_fit_ptr = 1 # ############## #not calling with self.source_dev because want option to never use #default but instead input pointers from foreign code's pointers if order is not None: # set order if not already set if order in ['r', 'c']: self.ord = ord(order) else: self.ord = order if hasattr(self, 'double_precision') and self.double_precision is not None: which_precision = self.double_precision else: which_precision = double_precision self.double_precision = double_precision # ############ # if do_predict == 0: #initialize if doing fit x_vs_alpha_lambda = c_void_p(0) x_vs_alpha = c_void_p(0) valid_pred_vs_alpha_lambda = c_void_p(0) valid_pred_vs_alpha = c_void_p(0) count_full = c_size_t(0) count_short = c_size_t(0) count_more = c_size_t(0) else: #restore if predict x_vs_alpha_lambda = self.x_vs_alpha_lambda x_vs_alpha = self.x_vs_alpha valid_pred_vs_alpha_lambda = self.valid_pred_vs_alpha_lambda valid_pred_vs_alpha = self.valid_pred_vs_alpha count_full = self.count_full count_short = self.count_short count_more = self.count_more # ############## # # c_size_t_p = POINTER(c_size_t) if which_precision == 1: c_elastic_net = self.lib.elastic_net_ptr_double self.dtype = np.float64 self.myctype = c_double if self.verbose > 0: print('double precision fit') sys.stdout.flush() else: c_elastic_net = self.lib.elastic_net_ptr_float self.dtype = np.float32 self.myctype = c_float if self.verbose > 0: print('single precision fit') sys.stdout.flush() #precision - independent commands if self.alphas_list is not None: pass_alphas = (self.alphas_list.astype(self.dtype, copy=False)) c_alphas = pass_alphas.ctypes.data_as(POINTER(self.myctype)) else: c_alphas = cast(0, POINTER(self.myctype)) if self.lambdas_list is not None: pass_lambdas = (self.lambdas_list.astype(self.dtype, copy=False)) c_lambdas = pass_lambdas.ctypes.data_as(POINTER(self.myctype)) else: c_lambdas = cast(0, POINTER(self.myctype)) #call elastic net in C backend c_elastic_net( c_int(self._family), c_int(do_predict), c_int(source_dev), c_int(1), c_int(self._shared_a), c_int(self.n_threads), c_int(self._gpu_id), c_int(self.n_gpus), c_int(self._total_n_gpus), c_int(self.ord), c_size_t(m_train), c_size_t(n), c_size_t(m_valid), c_int(self.fit_intercept), c_int(self._standardize), c_double(self.lambda_max), c_double(self.lambda_min_ratio), c_int(self.n_lambdas), c_int(self.n_folds), c_int(self.n_alphas), c_double(self.alpha_min), c_double(self.alpha_max), c_alphas, c_lambdas, c_double(self.tol), c_double(self.tol_seek_factor), c_int(self.lambda_stop_early), c_int(self.glm_stop_early), c_double(self.glm_stop_early_error_fraction), c_int(self.max_iter), c_int(self.verbose), a, b, c, d, e, self.store_full_path, pointer(x_vs_alpha_lambda), pointer(x_vs_alpha), pointer(valid_pred_vs_alpha_lambda), pointer(valid_pred_vs_alpha), cast(addressof(count_full), c_size_t_p), cast(addressof(count_short), c_size_t_p), cast(addressof(count_more), c_size_t_p), ) #if should or user wanted to save or free data, #do that now that we are done using a, b, c, d, e #This means have to upload_data() again before fit_ptr # or predict_ptr or only call fit and predict if free_input_data == 1: free_data(self) # #################################### #PROCESS OUTPUT #save pointers self.x_vs_alpha_lambda = x_vs_alpha_lambda self.x_vs_alpha = x_vs_alpha self.valid_pred_vs_alpha_lambda = valid_pred_vs_alpha_lambda self.valid_pred_vs_alpha = valid_pred_vs_alpha self.count_full = count_full self.count_short = count_short self.count_more = count_more count_full_value = count_full.value count_short_value = count_short.value count_more_value = count_more.value if self.store_full_path == 1: num_all = int(count_full_value / (self.n_alphas * self.n_lambdas)) else: num_all = int(count_short_value / self.n_alphas) num_all_other = num_all - n num_error = 3 # should be consistent w/ src/common/elastic_net_ptr.cpp num_other = num_all_other - num_error if num_other != 3: print('num_other=%d but expected 3' % num_other) print('count_full_value=%d ' 'count_short_value=%d ' 'count_more_value=%d ' 'num_all=%d num_all_other=%d' % (int(count_full_value), int(count_short_value), int(count_more_value), int(num_all), int(num_all_other))) sys.stdout.flush() #TODO raise an exception instead exit(0) if self.store_full_path == 1 and do_predict == 0: #x_vs_alpha_lambda contains solution(and other data) #for all lambda and alpha self.x_vs_alpha_lambdanew = \ np.fromiter(cast(x_vs_alpha_lambda, POINTER(self.myctype)), dtype=self.dtype, count=count_full_value) self.x_vs_alpha_lambdanew = \ np.reshape(self.x_vs_alpha_lambdanew, (self.n_lambdas, self.n_alphas, num_all)) self.x_vs_alpha_lambdapure = \ self.x_vs_alpha_lambdanew[:, :, 0:n] self.error_vs_alpha_lambda = \ self.x_vs_alpha_lambdanew[:, :, n:n + num_error] self._lambdas = \ self.x_vs_alpha_lambdanew[:, :, n + num_error:n + num_error + 1] self._alphas = self.x_vs_alpha_lambdanew[:, :, n + num_error + 1:n + num_error + 2] self._tols = self.x_vs_alpha_lambdanew[:, :, n + num_error + 2:n + num_error + 3] if self.fit_intercept == 1: self.intercept_ = self.x_vs_alpha_lambdapure[:, :, -1] else: self.intercept_ = None if self.store_full_path == 1 and do_predict == 1: thecount = int(count_full_value / (n + num_all_other) * m_valid) self.valid_pred_vs_alpha_lambdanew = \ np.fromiter(cast(valid_pred_vs_alpha_lambda, POINTER(self.myctype)), dtype=self.dtype, count=thecount) self.valid_pred_vs_alpha_lambdanew = \ np.reshape(self.valid_pred_vs_alpha_lambdanew, (self.n_lambdas, self.n_alphas, m_valid)) self.valid_pred_vs_alpha_lambdapure = \ self.valid_pred_vs_alpha_lambdanew[:, :, 0:m_valid] if do_predict == 0: # store_full_path==0 or 1 #x_vs_alpha contains only best of all lambda for each alpha self.x_vs_alphanew = np.fromiter(cast(x_vs_alpha, POINTER(self.myctype)), dtype=self.dtype, count=count_short_value) self.x_vs_alphanew = np.reshape(self.x_vs_alphanew, (self.n_alphas, num_all)) self.x_vs_alphapure = self.x_vs_alphanew[:, 0:n] self.error_vs_alpha = self.x_vs_alphanew[:, n:n + num_error] self._lambdas2 = self.x_vs_alphanew[:, n + num_error:n + num_error + 1] self._alphas2 = self.x_vs_alphanew[:, n + num_error + 1:n + num_error + 2] self._tols2 = self.x_vs_alphanew[:, n + num_error + 2:n + num_error + 3] if self.fit_intercept == 1: self.intercept2_ = self.x_vs_alphapure[:, -1] else: self.intercept2_ = None #preds exclusively operate for x_vs_alpha or x_vs_alpha_lambda if self.store_full_path == 0 and do_predict == 1: thecount = int(count_short_value / (n + num_all_other) * m_valid) if self.verbose > 0: print('thecount=%d ' 'count_full_value=%d ' 'count_short_value=%d ' 'n=%d num_all_other=%d ' 'm_valid=%d' % ( thecount, count_full_value, count_short_value, n, num_all_other, m_valid, )) sys.stdout.flush() self.valid_pred_vs_alphanew = \ np.fromiter(cast(valid_pred_vs_alpha, POINTER(self.myctype)), dtype=self.dtype, count=thecount) self.valid_pred_vs_alphanew = \ np.reshape(self.valid_pred_vs_alphanew, (self.n_alphas, m_valid)) self.valid_pred_vs_alphapure = \ self.valid_pred_vs_alphanew[:, 0:m_valid] return self
def get_temperature(self): "gets the camera's temperature in degrees Celcius" T = c_double() self._libfli.FLIGetTemperature(self._dev, byref(T)) return T.value
def get_cooler_power(self): "gets the cooler's power in watts (undocumented API function)" P = c_double() self._libfli.FLIGetCoolerPower(self._dev, byref(P)) return P.value
def set_temperature(self, T): "set the camera's temperature target in degrees Celcius" self._libfli.FLISetTemperature(self._dev, c_double(T))
def read_CCD_temperature(self): "gets the CCD's temperature in degrees Celcius" T = c_double() self._libfli.FLIReadTemperature(self._dev, FLI_TEMPERATURE_CCD, byref(T)) return T.value