def begin(): """ 启动多进程 """ plist = [] htmls = sharedctypes.Value('l', 0, lock=False) items = sharedctypes.Value('l', 0, lock=False) seeds = sharedctypes.Value('l', 0, lock=False) p = Process(target=keep_log, args=(htmls, items, seeds)) p.start() plist.append(p) for i in range(settings.Num_process): p = Process(target=pre_run, args=(htmls, items, seeds)) p.start() plist.append(p) for p in plist: p.join()
def __init__(self, roaster, recipes): super(RoastTab, self).__init__() # Class variables. self.sectTimeSliderPressed = False self.tempSliderPressed = False # Use a blinker for connect_state == CS_CONNECTING... self._connecting_blinker = True self.CONNECT_TXT_PLEASE_CONNECT = "Please connect your roaster." self.CONNECT_TXT_CONNECTING = "Found roaster, connecting. This could take >20 seconds " # process-safe flag to schedule controller vars update from recipe obj self._flag_update_controllers = sharedctypes.Value('i', 0) # store roaster object self.roaster = roaster # store recipes object self.recipes = recipes # Create the tab ui. self.create_ui() # Update initial GUI information self.update_section_time() self.update_total_time() # Create timer to update gui data. self.timer = QtCore.QTimer() self.timer.setInterval(1000) self.timer.timeout.connect(self.update_data) self.timer.start() # Set the roast tab diabled when starting. self.setEnabled(False)
def __init__(self, update_data_func=None, state_transition_func=None, thermostat=False): """Create variables used to send in packets to the roaster. The update data function is called when a packet is opened. The state transistion function is used by the timer thread to know what to do next. See wiki for more information on packet structure and fields.""" self.update_data_func = update_data_func self.state_transition_func = state_transition_func self._header = sharedctypes.Array('c', b'\xAA\xAA') self._temp_unit = sharedctypes.Array('c', b'\x61\x74') self._flags = sharedctypes.Array('c', b'\x63') self._current_state = sharedctypes.Array('c', b'\x02\x01') self._footer = b'\xAA\xFA' self._fan_speed = sharedctypes.Value('i', 1) self._heat_setting = sharedctypes.Value('i', 0) self._target_temp = sharedctypes.Value('i', 150) self._current_temp = sharedctypes.Value('i', 150) self._time_remaining = sharedctypes.Value('i', 0) self._total_time = sharedctypes.Value('i', 0) self._cont = sharedctypes.Value('i', 1) if (thermostat is True): self.thermostat_process = mp.Process(target=self.thermostat) self.thermostat_process.start()
def __init__(self, roaster, app, max_recipe_size_bytes=64 * 1024): # this object is accessed by multiple processes, in part because # freshroastsr700 calls Recipe.move_to_next_section() from a # child process. Therefore, all data handling must be process-safe. # recipe step currently being applied self.currentRecipeStep = sharedctypes.Value('i', 0) # Stores recipe # Here, we need to use shared memory to store the recipe. # Tried multiprocessing.Manager, wasn't very successful with that, # resorting to allocating a fixed-size, large buffer to store a JSON # string. This Array needs to live for the lifetime of the object. self.recipe_str = Array(ctypes.c_char, max_recipe_size_bytes) # Tells if a recipe has been loaded self.recipeLoaded = sharedctypes.Value('i', 0) # boolean # we are not storing this object in a process-safe manner, # but its members are process-safe (make sure you only use # its process-safe members from here!) self.roaster = roaster self.app = app
def __init__(self, settings, pipe_buffered_data_after_pause=True, chunk_size=10000): """ForceSensorProcess return_buffered_data_after_pause: does not write shared data queue continuously and writes it the buffer data to queue only after pause (or stop) """ # DOC explain usage # type checks if not isinstance(settings, SensorSettings): raise RuntimeError( "settings has to be force_sensor.Settings object") super(SensorProcess, self).__init__() self.sensor_settings = settings self._pipe_buffer_after_pause = pipe_buffered_data_after_pause self._chunk_size = chunk_size self._pipe_i, self._pipe_o = Pipe() self._event_is_polling = Event() self._event_sending_data = Event() self._event_new_data = Event() self.event_bias_is_available = Event() self.event_trigger = Event() self._last_Fx = sharedctypes.RawValue(ct.c_float) self._last_Fy = sharedctypes.RawValue(ct.c_float) self._last_Fz = sharedctypes.RawValue(ct.c_float) self._last_Tx = sharedctypes.RawValue(ct.c_float) self._last_Ty = sharedctypes.RawValue(ct.c_float) self._last_Tz = sharedctypes.RawValue(ct.c_float) self._buffer_size = sharedctypes.RawValue(ct.c_uint64) self._sample_cnt = sharedctypes.Value(ct.c_uint64) self._event_quit_request = Event() self._determine_bias_flag = Event() self._bias_n_samples = 200 atexit.register(self.join)
def __init__(self, update_data_func=None, state_transition_func=None, thermostat=False, kp=0.06, ki=0.0075, kd=0.01, heater_segments=8, ext_sw_heater_drive=False): """Create variables used to send in packets to the roaster. The update data function is called when a packet is opened. The state transistion function is used by the timer thread to know what to do next. See wiki for more information on packet structure and fields.""" # constants for protocol decoding self.LOOKING_FOR_HEADER_1 = 0 self.LOOKING_FOR_HEADER_2 = 1 self.PACKET_DATA = 2 self.LOOKING_FOR_FOOTER_2 = 3 # constants for connection state monitoring self.CS_NOT_CONNECTED = -2 self.CS_ATTEMPTING_CONNECT = -1 self.CS_CONNECTING = 0 self.CS_CONNECTED = 1 # constants for connection attempt type self.CA_NONE = 0 self.CA_AUTO = 1 self.CA_SINGLE_SHOT = 2 self._create_update_data_system(update_data_func) self._create_state_transition_system(state_transition_func) self._header = sharedctypes.Array('c', b'\xAA\xAA') self._temp_unit = sharedctypes.Array('c', b'\x61\x74') self._flags = sharedctypes.Array('c', b'\x63') self._current_state = sharedctypes.Array('c', b'\x02\x01') self._footer = b'\xAA\xFA' self._fan_speed = sharedctypes.Value('i', 1) self._heat_setting = sharedctypes.Value('i', 0) self._target_temp = sharedctypes.Value('i', 150) self._current_temp = sharedctypes.Value('i', 150) self._time_remaining = sharedctypes.Value('i', 0) self._total_time = sharedctypes.Value('i', 0) self._disconnect = sharedctypes.Value('i', 0) self._teardown = sharedctypes.Value('i', 0) self._cooling_for_pid_control = False # for SW PWM heater setting self._heater_level = sharedctypes.Value('i', 0) # the following vars are not process-safe, do not access them # from the comm or timer threads, nor from the callbacks. self._ext_sw_heater_drive = ext_sw_heater_drive if not self._ext_sw_heater_drive: self._thermostat = thermostat else: self._thermostat = False self._pid_kp = kp self._pid_ki = ki self._pid_kd = kd self._heater_bangbang_segments = heater_segments # initialize to 'not connected' self._connected = sharedctypes.Value('i', 0) self._connect_state = sharedctypes.Value('i', self.CS_NOT_CONNECTED) # initialize to 'not trying to connect' self._attempting_connect = sharedctypes.Value('i', self.CA_NONE) # create comm process self.comm_process = mp.Process( target=self._comm, args=( self._thermostat, self._pid_kp, self._pid_ki, self._pid_kd, self._heater_bangbang_segments, self._ext_sw_heater_drive, self.update_data_event,)) self.comm_process.daemon = True self.comm_process.start() # create timer process that counts down time_remaining self.time_process = mp.Process( target=self._timer, args=( self.state_transition_event,)) self.time_process.daemon = True self.time_process.start()
def func0(lock, a): lock.acquire() print a.value func.add1(a) #a.value += 1 time.sleep(1) proc = current_process() print proc.name, proc.pid lock.release() lock0 = Lock() lock = Semaphore(2) # Semaphore(1) == Lock() v = Value('f', 0.0) a = Array('i', range(10)) sv = sharedctypes.Value('f', 0.0) sa = sharedctypes.Array('i', range(10)) sub_proc0 = Process(target=func0, args=(lock, sa)) sub_proc1 = Process(target=func0, args=(lock, sa)) sub_proc2 = Process(target=func0, args=(lock, sa)) sub_proc0.start() sub_proc1.start() sub_proc2.start() sub_proc0.join() sub_proc1.join() sub_proc2.join() func0(lock, sa)
def modifiedGreedyInsertionCPU(asdGMM, xTilde, y, nPartial=10, speedUp=False, regVal=1e-2, doOpt=True, add2Sigma=1e-3, iterMax=100, relTolLogLike=1e-3, absTolLogLike=1e-3, relTolMSE=1e-3, absTolMSE=1e-3,mseConverged=0., convBounds=[0.5,0.3], regValStep=[0.08, 4], addKWARGS={} ): addKWARGS.setdefault('JacSpace') nPt = xTilde.shape[1] nK = asdGMM.nK weights = asdGMM._getWeightsCPU(xTilde)# TBD or based on x only; I odn't think so indKernel = np.argmax(weights,axis=0) if mainDoParallel_: xL = nPartial*[x] doOptL = nPartial*[doOpt] add2SigmaL = nPartial*[add2Sigma] iterMaxL = nPartial*[iterMax] relTolL = nPartial*[relTol] absTolL = nPartial*[absTol] mseConvergedL = nPartial*[mseConverged] # Now split each of the kernels #This is somewhat dirty bestOverall = sharedctypes.Value(ctypes.c_double); bestOverall = 1e200 bestThetaOpt = sharedctypes.Array(ctypes.c_double,varsInMat(y.shape[0])*y.shape[1]) addKWARGS.update({'bestOverall':bestOverall,'bestThetaOpt':bestThetaOpt}) resultList = [] usedClass = asdGMM.__class__ for k in range(nK): indKernelK = indKernel == k xTildeK = xTilde[:,indKernelK] nPtK = xTildeK.shape[1] if xTildeK.shape[1] < asdGMM.nVarTot*4: warnings.warn("Skipping kernel {0} in update process due to a lack of points".format(k)) continue if speedUp: # Check consistency if assumption that the base of each kernel is disjoint from the others parasitInfl = np.mean(weights[np.hstack((np.arange(0,k),np.arange(k+1,nK))),indKernelK]) if parasitInfl > 0.05: warnings.warn("Disjoint base assumption might not be valid for kernel {0}".format(k)) del parasitInfl # Generate random samples randSampI = np.random.choice(nPtK,nPartial) randSampJ = np.random.choice(nPtK,nPartial) indReplace = randSampI == randSampJ while np.any(indReplace): randSampI[indReplace] = np.random.choice(nPtK,sum(indReplace)) randSampJ[indReplace] = np.random.choice(nPtK,sum(indReplace)) indReplace = randSampI == randSampJ if speedUp: assert 0,"TBD" else: if mainDoParallel_: assert 0, "TBD cuda driver error and other" GMMparsL = [asdGMM.toPars() for k in range(nPartial)] xTildeKL = nPartial*[xTildeK] # partialInsertionCPU(GMMpars,x,xK,k,i,j,doOpt=True,add2Sigma=1e-3,iterMax=100,relTol=1e-3,absTol=1e-3) with Pool(4) as p: newList = p.starmap(modifiedPartialInsertionCPU, zip(asdGMMparsL,xTildeL,xTildeKL,y,nPartial*[k],randSampI,randSampJ,doOptL,regValL,add2SigmaL, iterMaxL,relTolL,absTolL,mseConvergedL)) resultList += newList else: resultList += lmap( lambda ij:modifiedPartialInsertionCPU(asdGMM.toPars(),xTilde,xTildeK,y,k,ij[0],ij[1],doOpt=doOpt,regVal=regVal,add2Sigma=add2Sigma, iterMax=iterMax,relTolLogLike=relTolLogLike,absTolLogLike=absTolLogLike,relTolMSE=relTolMSE,absTolMSE=absTolMSE,mseConverged=mseConverged, convBounds=convBounds, regValStep=regValStep, addKWARGS=addKWARGS, usedClass=usedClass), zip(randSampI,randSampJ)) #Get the best updated model among all tested ones #Here we care more about mse than loglike bestVal = np.Inf bestPars = None for newVal, newPars in resultList: if newVal < bestVal: bestVal = newVal bestPars = newPars # Load pars newasdGMM = aSymDynamicsGMM(parDict=bestPars) return newasdGMM