def zelda(): # sorry, dit moest even... :) winsound.Beep(210,200) winsound.Beep(225,200) winsound.Beep(238,200) winsound.Beep(250,200) winsound.Beep(210,200) winsound.Beep(225,200) winsound.Beep(238,200) winsound.Beep(250,200) winsound.Beep(225,180) winsound.Beep(238,180) winsound.Beep(250,180) winsound.Beep(262,180) winsound.Beep(225,180) winsound.Beep(238,180) winsound.Beep(250,180) winsound.Beep(262,180) winsound.Beep(238,150) winsound.Beep(250,150) winsound.Beep(262,150) winsound.Beep(275,150) winsound.Beep(238*2,150) winsound.Beep(250*2,150) winsound.Beep(262*2,150) winsound.Beep(275*2,160) time.sleep(1) winsound.Beep(210*4,150) winsound.Beep(225*4,150) winsound.Beep(238*4,150) winsound.Beep(250*4,1100)
def _send(self, command, retries=5, timeout=100): fd = self._fd if len(command) != 33: raise ValueError("command must be 33 bytes long") handler = signal.signal(signal.SIGALRM, _TimeoutError.timeout) for attempt in range(retries): signal.setitimer(signal.ITIMER_REAL, timeout/1000.0) try: if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Write: {}", hexlify(command[1:])) fd.write(command) fd.flush() reply = bytearray(fd.read(32)) if LOG.isEnabledFor(logging.DEBUG): LOG.debug("Recv: {}", hexlify(reply)) signal.setitimer(signal.ITIMER_REAL, 0) if reply[0] != command[1]: msg = "Expected msg type {} but got {}" raise IOError(msg.format(command[1], reply[0])) return reply[1:] except _TimeoutError: print("IO timed out, try #%d." % attempt) time.sleep(0.000001) finally: signal.signal(signal.SIGALRM, handler) msg = "Gving up on PlasmaTrim {}" raise IOError(msg.format(self))
def save_table(self, code, date): TR_REQ_TIME_INTERVAL = 4 time.sleep(TR_REQ_TIME_INTERVAL) data_81 = self.wrapper.get_data_opt10081(code, date) time.sleep(TR_REQ_TIME_INTERVAL) data_86 = self.wrapper.get_data_opt10086(code, date) col_86 = ['전일비', '등락률', '금액(백만)', '신용비', '개인', '기관', '외인수량', '외국계', '프로그램', '외인비', '체결강도', '외인보유', '외인비중', '외인순매수', '기관순매수', '개인순매수', '신용잔고율'] data = pd.concat([data_81, data_86.loc[:, col_86]], axis=1) #con = sqlite3.connect("../data/stock.db") try: data = data.loc[data.index > int(self.kiwoom.start_date.strftime("%Y%m%d"))] #orig_data = pd.read_sql("SELECT * FROM '%s'" % code, con, index_col='일자').sort_index() orig_data = pd.read_hdf("../data/hdf/%s.hdf" % code, 'day').sort_index() end_date = orig_data.index[-1] orig_data = orig_data.loc[orig_data.index < end_date] data = data.loc[data.index >= end_date] data = pd.concat([orig_data, data], axis=0) except (FileNotFoundError, IndexError) as e: print(e) pass finally: data.index.name = '일자' if len(data) != 0: #data.to_sql(code, con, if_exists='replace') data.to_hdf('../data/hdf/%s.hdf'%code, 'day', mode='w')
def do_GET(self): parts = urlparse(self.path) query = parts[2] params = [param.split("=") for param in parts[4].split("&")] if query == "/book/": if params and params[0][0] == "id": id = int(params[0][1]) if id == 1: title = "SICP" elif id == 2: title = "jQuery programming" else: title = "NA" self.wfile.write( simplejson.dumps( {'id':id,'title':title} ) ) else: self.wfile.write("") elif query == "/timeout/": time.sleep(4) self.wfile.write("") else: self.wfile.write("")
def run(self): while 1: try: self.run_once() except: print now(), "Error - ", get_err() time.sleep(60 * self.interval)
def main_loop(self): running = False while running != True: print 'STAND-BY' stick = self.read_stick() print stick if (self.stick_rest - int(stick[0])) > self.stick_grens: running = True else: time.sleep(self.waiting_period_sensors) print 'LOOP' location = self.read_arduino()[0:2] print self.session_names new_table(self.current_session_name) self.session_names += [self.current_session_name] past_periods_pic = 0 pic_name = None while running: # Controleer of de knuppel in de juiste richting bewogen wordt om te stoppen met tracken: stick = self.read_stick() if (int(stick[0]) - self.stick_rest) > self.stick_grens: self.upload_mode() # Controleer of het voldoende lussen geleden is dat er nog een foto genomen is, en neem indien nodig een foto: if past_periods_pic >= self.number_of_periods_pic: pic_name = self.take_pic() past_periods_pic = 0 else: pic_name = None past_periods_pic += 1 # Run de tracker-functie en sla de teruggegeven gegevens op in een tijdelijke variabele temp: temp = self.tracker(self.current_session_name, location, pic_name) self.current_session_name = temp[0] location = temp[1] time.sleep(self.waiting_period_sensors)
def test(): colouring_file_test = True if colouring_file_test: n = 16 G = ColorGraph() G.build_sudoku_graph() G.draw(save=True, ind=0) G.color_graph(save=True) print(G.get_chromatic_number()) G = ColorGraph() G.revert = False G.build_rand_graph(nb_nodes=n) G.color_graph(save=True) print(G.get_chromatic_number()) G.draw_calendar(save=True) # convert to animation import time time.sleep(1) # delays for 5 seconds os.system("convert -delay 70 -loop 0 plots/*jpg animated.gif") print(G.colours)
def __init__(self, port, baudrate, wait_ready = True, timeout = True): # si le port n'est pas utilisé if port not in ARDUINO_PORTS: raise InvalidPort(port) # si le baudrate n'existe pas if baudrate not in Serial.BAUDRATES: raise InvalidBaudRate(baudrate) # on appel le constructeur parent Serial.__init__(self, port, baudrate, timeout = timeout) # connection start : temps de début de connexion self.connection_start = time() # on dort un petit moment if not isinstance(wait_ready, bool): time.sleep(wait_ready) # temps qu'on n'a pas de réponse on attend elif True: while not self.read(): continue # temps nécéssaire pour obtenir la première connexion self.connection_first_answer = time() - self.connection_start print(self.connection_first_answer)
def __call__(self, user_name, cache={}): first_seen_metric = userstats.user.api.core.FirstSeen() first_seen = cache.get(str(first_seen_metric), first_seen_metric(user_name, cache)) start = get_date(self.start, cache['reference_date'], first_seen, self.strict) end = get_date(self.end, cache['reference_date'], first_seen, self.strict) if not start or not end: return None contribs_metric = Contribs() contribs = cache.get(str(contribs_metric), contribs_metric(user_name, cache)) if self.rq: q = rq.Queue('diffs', connection=redis.Redis(port=self.redis_port)) results = [q.enqueue(process_contrib_bytes_added, (contrib, start, end, self.ns, user_name, cache)) for contrib in contribs] while not all(map(lambda res: res.is_finished or res.is_failed, results)): time.sleep(.1) bytes_added_results = map(attrgetter('result'), results) else: bytes_added_results = map(process_contrib_bytes_added, zip(contribs, itertools.repeat(start), itertools.repeat(end), itertools.repeat(self.ns), itertools.repeat(user_name), itertools.repeat(cache))) #bytes_added = {} for b, contrib in zip(bytes_added_results, contribs): #bytes_added[contrib['revid']] = b contrib['bytes_added'] = b return sum(bytes_added_results)
def connect_db(db_name): """Connect to the database if open, and start database if not running.""" try: client = couchdb.Server() except: subprocess.call(['couchdb', '-b']) time.sleep(2) client = couchdb.Server() try: db = client[db_name] except: client.create(db_name) db = client[db_name] toc = {} toc['n_runs'] = 0 toc['_id'] = 'toc' db.save(toc) #create permanent view to all if one doesn't exist if '_design/all' not in db: view_def = ViewDefinition('all', 'all',''' function(doc) { if( doc.run_number ) emit(parseInt(doc.run_number), doc); }''') view_def.sync(db) return db
def flush(self): ''' release the modified and deleted keys queued for this table ''' # remove deleted keys for key in self.deleted_keys: self.values.remove_key_data(key) self.dynamo_table.remove_range_obj(key) time.sleep(0.5) if settings.DEBUG: print 'removed' self.deleted_keys = [] # store modified changes for key in self.modified_keys: # save data by assigning from cached data = self.values.get_key_data(key) self.dynamo_table.set_range_obj(key, data) time.sleep(0.5) if settings.DEBUG: print 'updated '+key self.modified_keys = [] self.dynamo_table.remove_range_obj('drilldown-lock') self.is_locked = False
def wait_for_server_status(self, server_id, desired_status, interval_time=None, timeout=None): interval_time = int(interval_time or self.servers_api_config.server_status_interval) timeout = int(timeout or self.servers_api_config.server_build_timeout) end_time = time.time() + timeout time.sleep(interval_time) while time.time() < end_time: resp = self.nova_cli_client.show_server(server_id) server = resp.entity if server.status.lower() == ServerStates.ERROR.lower(): raise BuildErrorException('Build failed. Server with uuid "{0} entered ERROR status.'.format(server.id)) if server.status == desired_status: break time.sleep(interval_time) else: raise TimeoutException( "wait_for_server_status ran for {0} seconds and did not " "observe server {1} reach the {2} status.".format(timeout, server_id, desired_status) ) return resp
def test_fail_login(self): print "Starting Login and Logout Test" wait = WebDriverWait(self.driver, self.waitTime) self.driver.get(self.url) wait.until(EC.element_to_be_clickable((By.ID, 'login-button'))).click() wait.until( EC.element_to_be_clickable((By.ID, 'weebly-username'))).click() wait.until( EC.element_to_be_clickable((By.ID, 'weebly-password'))).click() Username = self.driver.find_element_by_id('weebly-username') Username.send_keys(self.email) Password = self.driver.find_element_by_id('weebly-password') Password.send_keys("wrong") self.driver.find_element_by_xpath("//input[@value='Log in']").click() for i in range(60): try: if "Wrong username or password" == self.driver.find_element_by_css_selector("div.popover-content").text: break except: pass time.sleep(1) else: self.fail("time out") try: self.assertEqual("Wrong username or password", self.driver.find_element_by_css_selector( "div.popover-content").text) except AssertionError as e: self.verificationErrors.append(str(e))
def main(self, name): if name != "__main__": return import optparse parser = optparse.OptionParser() options, args = parser.parse_args() if not 0 < len(args) < 3: parser.error("Must supply exactly one event type and" " optionally one JSON data structure.") event_type = args[0] if len(args) > 1: json_data = args[1] try: data = json.loads(json_data) except Exception: parser.error("Could not parse JSON") else: data = {} self.record(event_type, **data) #print "Recorded event: %s (%s)" % (event_type, data) # This needs to be removed but for now is necessary to make sure # everything gets flushed import time time.sleep(1)
def crawl(self): _starttime = time.time() if self.restrict == None: self.restrict = "http://%s.*" % self.init_domain print "Deadlink-crawler version 1.1" print "Starting crawl from URL %s at %s with restriction %s\n" % (self.init_url, strftime("%Y-%m-%d %H:%M:%S", gmtime()), "http://%s.*" % self.init_domain) while len(self.frontier) > 0: time.sleep(self.wait_time) next_time, next_url = self.frontier.next() while time.time() < next_time: time.sleep(0.5) try: self.visit_url(next_url[0], next_url[1]) except urllib2.URLError: continue self.print_deadlinks(self.deadlinks) _elapsed = time.time() - _starttime print "\nSummary:\n--------" print "Crawled %d pages and checked %d links in %s time." % (self._pages, self._links, strftime("%H:%M:%S", gmtime(_elapsed))) print "Found a total of %d deadlinks in %d different pages" % (self._dead, self._via) if len(self.deadlinks) == 0: exit(0) else: exit(2)
def push(fout, docker_version, image, tag): """ Tags an image with the docker version and pushes it. Returns the sha and expected size. """ clear_tuf() # tag image with the docker version run_cmd( "{0} tag -f alpine {1}:{2}".format(DOCKERS[docker_version], image, tag), fout) # push! output = run_cmd("{0} push {1}:{2}".format(DOCKERS[docker_version], image, tag), fout) sha = _DIGEST_REGEX.search(output).group(1) size = _SIZE_REGEX.search(output).group(1) # sleep for 1s after pushing, just to let things propagate :) time.sleep(1) # list targets = notary_list(fout, image) for target in targets: if target[0] == tag: assert_equality(target, [tag, sha, size, "targets"]) return sha, size
def stop(self): if self._cur_trace: self.end_trace() if self._proxy_proc: self._stop_signaled = True os.kill(self._proxy_proc.pid,signal.SIGUSR1) time.sleep(0.5)
def bench_all(options): import time error = 0 names = all() random.shuffle(names) for func in names: cmd = "%s %s %s --ignore-import-errors" % (sys.executable, __file__, func) print(cmd) sys.stdout.flush() time.sleep(0.01) if os.system(cmd): error = 1 print("%s failed" % cmd) print("") for func in names: cmd = "%s %s --with-kwargs %s --ignore-import-errors" % (sys.executable, __file__, func) print(cmd) sys.stdout.flush() if os.system(cmd): error = 1 print("%s failed" % cmd) print("") if error: sys.exit(1)
def location(request, loc, format=None): ''' Will one day be a wrapper for all data models, searching over all locations and organizations, maybe even people too ''' from campus.models import Building, Location try: location = Building.objects.get(pk=loc) except Building.DoesNotExist: try: location = Location.objects.get(pk=loc) except Location.DoesNotExist: raise Http404() html = location_html(location, request) location = location.json() location['info'] = html base_url = request.build_absolute_uri('/')[:-1] location['marker'] = base_url + settings.MEDIA_URL + 'images/markers/yellow.png' if format == 'bubble': return render(request, template, context) if format == 'json': if settings.DEBUG: import time time.sleep(.5) response = HttpResponse(json.dumps(location)) response['Content-type'] = 'application/json' return response return home(request, location=location)
def poll(self, timeout=None): rfds, wfds, xfds = self._read_fds.keys(), self._write_fds.keys(), self._exception_fds.keys() # FIXME Hack for Windows if not rfds and not wfds and not xfds: import platform if platform.system() == 'Windows': import time time.sleep(0.1) return (rfds, wfds, xfds) = select.select(rfds, wfds, xfds, timeout) ready_fds = {} for fd in rfds: ready_fds[fd] = self._read_fds[fd] for fd in wfds: if fd in ready_fds: ready_fds[fd] |= self._write_fds[fd] else: ready_fds[fd] = self._write_fds[fd] for fd in xfds: if fd in ready_fds: ready_fds[fd] |= self._exception_fds[fd] else: ready_fds[fd] = self._exception_fds[fd] return [fd_evt for fd_evt in ready_fds.iteritems()]
def reliable_put(self,path,key,value): pickled_value = pickle.dumps(value) if key == "meta" or key == "list_nodes": key = path +"&&" + key self.meta_hdl.put(Binary(key),Binary(pickled_value),6000) else: update_checksum(self.meta_hdl,path,key,pickled_value) count = 0 #contains handlers for failed puts failed = [] key = path + "&&" + key check = False for url in self.data_urls: isConnected = False while isConnected == False: try: # try to connect to the server dh = xmlrpclib.Server(url) dh.put(Binary(key),Binary(pickled_value),6000) isConnected = True except: #print "appending fault servers list" time.sleep(1) print "Trying to reconnect to the server" continue '''
def run(): print "Starting iotdata script" # feed = api.feeds.get(FEED_ID) # datastream = get_datastream(feed) # datastream.max_value = None # datastream.min_value = None client_carriots = Client(API_KEY) while True: timestamp = int(mktime(datetime.utcnow().timetuple())) load_avg = read_loadavg() if DEBUG: print "Updating feed with value: %s" % load_avg print "Time : %s" % timestamp data = {"protocol": "v2", "device": FEED_ID, "at": timestamp, "data": load_avg} carriots_response = client_carriots.send(data) print carriots_response.read() # datastream.current_value = load_avg # datastream.at = datetime.datetime.utcnow() # try: # datastream.update() # except requests.HTTPError as e: # print "HTTPError({0}): {1}".format(e.errno, e.strerror) time.sleep(10)
def setup_backend(self): from pymongo import ASCENDING, DESCENDING from pymongo.connection import Connection, _parse_uri from pymongo.errors import AutoReconnect _connection = None uri = self.options.pop('uri', u'') _connection_attempts = 0 hosts, database, user, password = _parse_uri(uri, Connection.PORT) # Handle auto reconnect signals properly while _connection_attempts < 5: try: if _connection is None: _connection = Connection(uri) database = _connection[database] break except AutoReconnect: _connection_attempts += 1 time.sleep(0.1) self.database = database # setup correct indexes database.tickets.ensure_index([('record_hash', ASCENDING)], unique=True) database.tickets.ensure_index([('solved', ASCENDING), ('level', ASCENDING)]) database.occurrences.ensure_index([('time', DESCENDING)])
def echoServer(hostname='localhost',port=1972,timeout=5000): ftc = FieldTrip.Client() # Wait until the buffer connects correctly and returns a valid header hdr = None; while hdr is None : print('Trying to connect to buffer on %s:%i ...'%(hostname,port)) try: ftc.connect(hostname, port) print('\nConnected - trying to read header...') hdr = ftc.getHeader() except IOError: pass if hdr is None: print('Invalid Header... waiting') time.sleep(1) else: print(hdr) print(hdr.labels) # Now do the echo server nEvents=hdr.nEvents; endExpt=None; while endExpt is None : (curSamp,curEvents)=ftc.wait(-1,nEvents,timeout) # Block until there are new events to process if curEvents>nEvents : # get any new events evts=ftc.getEvents([nEvents,curEvents-1]) nEvents=curEvents # update record of which events we've seen ftc.putEvents(evt) ftc.disconnect() # disconnect from buffer when done
def _read(self, url, params=None): from .helpers import retry if PYTHON_VERSION == 3: if params is not None: params = ast.literal_eval(params) #print (params) params = urllib.parse.urlencode(params) params = params.encode('utf-8') res = self.opener.open(url, params) else: res = self.opener.open(url) elif PYTHON_VERSION == 2: process = 1 backofftime = 1 trial = 1 max_trials = 5 while ( process and trial < max_trials) : try: res = self.opener.open(url, params) except urllib2.URLError,e: log.error('URL error while trying connect to %s'%url) time.sleep(backofftime) backofftime = backofftime * 2 trial = trial + 1 else: process = 0
def search(request, format=None): ''' one day will search over all data available ''' from campus.models import Building query_string = '' bldgs = None if ('q' in request.GET) and request.GET['q'].strip(): query_string = request.GET['q'] entry_query = get_query(query_string, ['name',]) bldgs = Building.objects.filter(entry_query).order_by('name') if format == 'list': ''' used with the search ajax ''' if settings.DEBUG: # otherwise too many/too fast, gives browser a sad import time time.sleep(.5) response = '' if len(bldgs) < 1: response = '<li><a data-pk="null">No results</a></li>' return HttpResponse(response) count = 0 for item in bldgs: response += '<li>%s</li>' % (item.link) count += 1 if(count > 9): response += '<li class="more"><a href="%s?q=%s" data-pk="more-results">More results …</a></li>' % ( reverse('search'), query_string) return HttpResponse(response) return HttpResponse(response) if format == 'json': def clean(item): return { 'type' : str(item.__class__.__name__), 'name' : item.name, 'id' : item.pk } search = { "query" : query_string, "results" : map(clean, bldgs) } response = HttpResponse(json.dumps(search)) response['Content-type'] = 'application/json' return response from apps.views import phonebook_search phonebook = phonebook_search(query_string) found_entries = { 'buildings' : bldgs, 'phonebook' : phonebook['results'] } context = {'search':True, 'query':query_string, 'results':found_entries } return render(request, 'campus/search.djt', context)
def statusThread(): """Runs as a thread to keep the status information up to date""" global _statusInfo, _runStatus try: # Is status checking enabled enabled = config_getboolean("status", "enabled", True) if not enabled: log_info("Status checking disabled. Exiting status monitor thread") return _runStatus = True; # Setup the thread information ct = threading.currentThread() ct.setName("CCSD Status Monitor") # What interval shall we check hosts at interval = config_get("status", "interval", DEFAULT_CHECK_INTERVAL) # Initialise the host status information hosts = getHostList(ADMIN_SESSION_ID) for host in getHostList(ADMIN_SESSION_ID): if not host["host_active"]: continue name = host["host_name"] _statusInfo[name] = ccs_host_status(ADMIN_SESSION_ID, \ host["host_id"], interval) # Loop forever reading status as appropriate while _runStatus: # wait a bit before checking time.sleep(2) # Does the queue have entries if len(ccs_host_status.update_queue) <= 0: continue # Is the first entry valid if len(ccs_host_status.update_queue[0]) != 2: log_error("Invalid entry in status update queue! - %s" % ccs_host_status.update_queue[0]) ccs_host_status.update_queue.pop(0) continue # Check if it's ready to run if ccs_host_status.update_queue[0][0] > time.time(): continue # Read to run check = ccs_host_status.update_queue.pop(0) try: check[1].update() except: log_error("Failed to update status of %s" % \ check[1]._hostname, sys.exc_info()) # Regardless of what happened, check again sometime soon if it # is still in the list of hosts to check if check[1]._hostname in _statusInfo.keys(): check[1].requeue() except: log_error("Exception in status monitor thread!", sys.exc_info()) log_info("Exiting status monitor thread")
def close(self): try: self.send('quit\n'.encode()) self.s.shutdown(_socket.SHUT_RDWR) time.sleep(0.01) except: pass return self.s.close()
def wait_qsub(job_id): """wait for qsub to finish""" import time cmd = "qstat | grep %s > /dev/null" % job_id while os.system(cmd) == 0: sys.stderr.write("qsub jobs still running. --%s \r" % time.ctime()) time.sleep(60) sys.stderr.write("\n")
def calibrate_angle(self): eprint("Calibrating angle based on beacon." % v) self.check_stop() ir = ev3.InfraredSensor() while True: print(ir.value(0), ir.value(1)) if ir.value(1) == -128: print("Beacon lost, waiting") time.sleep(1) else: # Rotating to face the beacon dir = sign(ir.value(0)) self.left_motor.run_to_rel_pos(speed_sp=self.travel_speed, position_sp=v * self.scale * self.polarity, stop_action="hold") self.right_motor.run_to_rel_pos(speed_sp=self.travel_speed, position_sp=v * self.scale * self.polarity, stop_action="hold")
def devices_scanner(mgr): logger.info('Scanning for badges') mgr.pull_badges_list() while True: logger.info("Scanning for devices...") scanned_devices = scan_for_devices(mgr.badges.keys()) with open(scans_file_name, "a") as fout: for device in scanned_devices: mac = device['mac'] scan_date = device['device_info']['scan_date'] rssi = device['device_info']['rssi'] if device['device_info']['adv_payload']: voltage = device['device_info']['adv_payload']['voltage'] else: voltage = 0.0 logger.debug("{},{},{:.2f},{:.2f}".format( scan_date, mac, rssi, voltage)) fout.write("{},{},{:.2f},{:.2f}\n".format( scan_date, mac, rssi, voltage)) time.sleep(5) # give time to Ctrl-C
def _login(self, version): log.debug('login() as %s', self.username) if (version == 'v4'): params = "{'username':'******','password':'******'}" process = 1 backofftime = 1 trial = 1 max_trials = 5 while (process and trial < max_trials): try: self.opener.open(self.url + 'api/login', params).read() except urllib2.URLError, e: log.error('URL error while trying connect to %s' % self.url) time.sleep(backofftime) backofftime = backofftime * 2 trial = trial + 1 else: process = 0
def run_one_iter (): try: server = run_server('./t-rex-64-debug-gdb-bt -i -c 4 --iom 0') print "sleep 1 sec" time.sleep(1); crash=True; if True: c = STLClient() print 'Connecting to server' c.connect() print 'Connected' print 'Mapping' print 'Map: %s' % stl_map_ports(c) c.disconnect() crash=False; except Exception as e: print(e) finally : if crash: print "Crash seen, wait for the info" # wait the process to make the core file loop=0; while True: if server.poll() is not None: # server ended print 'Server stopped.\nReturn code: %s\nStderr: %s\nStdout: %s' % (server.returncode, server.stdout.read().decode(errors = 'replace'), server.stderr.read().decode(errors = 'replace')) break; time.sleep(1); loop=loop+1; if loop >600: print "Timeout on crash!!" break; return 1 else: print "kill process ",server.pid term_all_trexes(); kill_all_trexes(); return 0
def echoServer(hostname='localhost',port=1972,timeout=5000): ftc = FieldTrip.Client() # Wait until the buffer connects correctly and returns a valid header hdr = None; while hdr is None : print('Trying to connect to buffer on %s:%i ...'%(hostname,port)) try: ftc.connect(hostname, port) print('\nConnected - trying to read header...') hdr = ftc.getHeader() except IOError: pass if hdr is None: print('Invalid Header... waiting') time.sleep(1) else: print(hdr) print(hdr.labels) # Now do the echo server nEvents=hdr.nEvents; endExpt=None; while endExpt is None : (curSamp,curEvents)=ftc.wait(-1,nEvents,timeout) # Block until there are new events to process if curEvents>nEvents : evts=ftc.getEvents([nEvents,curEvents-1]) nEvents=curEvents # update record of which events we've seen for evt in evts: if evt.type == "exit": endExpt=1 if not evt.type == "echo": continue print(evt) # put the echo event evt.type = "ack" evt.sample = -1 #reset sample so autofills later ftc.putEvents(evt) else: print("Wait timeout, waiting") ftc.disconnect() # disconnect from buffer when done
def profiler(path, tag, name, container): influx_ip = os.environ.get('INFLUX') print(influx_ip) influxClient = influxdb.InfluxDBClient(influx_ip, 8086, 'root', 'root', 'cadvisor') result = [] start = time() print(container.status) while container.status != "running": time.sleep(1) container.reload() print(container.status) while container.status == "running": time.sleep(1) container.reload() print(container.status) while not result: print("waiting for cadvisor to post to influx") time.sleep(1) response = influxClient.query( "SELECT max(value) FROM cpu_usage_user WHERE container_name='%s';" % name) meanMEM = influxClient.query( "SELECT mean(value) FROM memory_working_set WHERE container_name='%s';" % name) result = list(response.get_points()) dur = time.time() - start print("It took %s seconds to finish" % dur) print("Wait 60s for cadvisor to update") time.sleep(60) cpuData = influxClient.query( "SELECT max(value) FROM cpu_usage_user WHERE container_name='%s';" % name) memData = influxClient.query( "SELECT mean(value) FROM memory_working_set WHERE container_name='%s';" % name) userCPU = math.ceil(list(cpuData.get_points())[0]["max"] / 1000000) meanMEM = math.ceil(list(meanMEM.get_points())[0]["mean"] / 1000000) print("cpu_usage_user[ms]: %s" % userCPU) print("memory_working_set[MB]: %s" % meanMEM) return userCPU, meanMEM
def takeout_order(): # 提交订单页 # if d(text='选择地址').exists: # d(text='选择地址').click() # time.sleep(1) try: d(resourceId="com.yiwosi.kbb:id/tViewPay").click() takeout_order_click_time = time.time() while not d(text="微信支付").exists: e_appear_time = time.time() e_cost_time = e_appear_time - takeout_order_click_time if e_cost_time > 10: logger.error('收银台页等待微信支付超10s') e_appear_time = time.time() e_cost_time = e_appear_time - takeout_order_click_time logger.info('跳转收银台页耗时{}'.format(e_cost_time)) e = EexeclData(file=target_file, sheet_name='收银台页') e.write_cell(i + 1, 1, i) e.write_cell(i + 1, 2, e_cost_time) now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) e.write_cell(i + 1, 3, now_time) price = d(resourceId="com.yiwosi.kbb:id/text_price").get_text() e.write_cell(i + 1, 4, '价格{}'.format(price)) logger.info('收银台页表格数据更新成功') d.press('back') time.sleep(1) d.press('back') time.sleep(1) d.press('back') time.sleep(1) d.press('back') time.sleep(1) except BaseException as e: logger.error('收银台出错{}'.format(e)) send_msg(takeout_webhook, '收银台出错') now_time = time.strftime("%Y-%m-%d_%H_%M_%S", time.localtime()) d.screenshot(t_screenshot_path + '{}.jpg'.format(now_time)) get_adb_all_process(now_time) time.sleep(1) d.app_stop(app_name)
def scan_for_devices(devices_whitelist): bd = BadgeDiscoverer(logger) try: all_devices = bd.discover(scan_duration=SCAN_DURATION) except Exception as e: # catch *all* exceptions logger.error("Scan failed,{}".format(e)) all_devices = {} scanned_devices = [] for addr, device_info in all_devices.iteritems(): if addr in devices_whitelist: logger.debug( "\033[1;7m\033[1;32mFound {}, added. Device info: {}\033[0m". format(addr, device_info)) scanned_devices.append({'mac': addr, 'device_info': device_info}) else: #logger.debug("Found {}, but not on whitelist. Device info: {}".format(addr, device_info)) pass time.sleep(2) # requires sometimes to prevent connection from failing return scanned_devices
def test_endCurrentRound(self): payerAcct = adminAcct param_list = [] param_list.append("endCurrentRound".encode()) param_list1 = [] param_list1.append(1000) param_list1.append(89) param_list2 = [] param_list3 = [] param_list3.append(payerAcct.get_address().to_array()) param_list3.append(100) param_list2.append(param_list3) param_list1.append(param_list2) param_list.append(param_list1) # params = BuildParams.create_code_params_script(param_list) print("***** endCurrentRound", param_list) hash = self.test_invoke(payerAcct, param_list) print("hash === endCurrentRound", hash) time.sleep(6) self.test_handleEvent("endCurrentRound", hash) return True
def run(self): while True: time.sleep(5) # get all rp to treat (status todo) to_treat = self.memory.get_relative_path('status', 'todo') for rp in to_treat: rp['status'] = 'treating' self.memory.update_relative_path(rp) # for all movement in rp for movement in rp['movements']: try: self.treat_movement(rp, movement) except Exception as e: self.movement_error(movement, "unknown : %s" % (str(e))) rp['status'] = 'error' if rp['status'] == 'treating': rp['status'] = 'done' self.memory.update_relative_path(rp)
def update_time_date(): #Updating time/date/day labels in an infinite while loop #infinite while loop running every 0.1 seconds to update the clock/date/day while True: #Getting today's date today_date = date.today() #update time current_time = strftime('%I:%M:%S') lbl_time.configure(text = current_time) lbl_AMPM.configure(text= strftime('%p')) #update date current_date = today_date.strftime("%d %b %Y") lbl_date.configure(text = current_date) #update day current_day = today_date.strftime("%A") lbl_day.configure(text = current_day) time.sleep(0.1)
def do_GET(self): parts = urlparse(self.path) query = parts[2] params = [param.split("=") for param in parts[4].split("&")] if query == "/book/": if params and params[0][0] == "id": id = int(params[0][1]) if id == 1: title = "SICP" elif id == 2: title = "jQuery programming" else: title = "NA" self.wfile.write(simplejson.dumps({'id': id, 'title': title})) else: self.wfile.write("") elif query == "/timeout/": time.sleep(4) self.wfile.write("") else: self.wfile.write("")
def parse_art(url): global browser while True: try: browser.get(url) Element = WebDriverWait(browser, 100).until(EC.presence_of_element_located((By.XPATH, "//body"))) break except: pass js = "var q=document.documentElement.scrollTop=10000" # 执行脚本 browser.execute_script(js) time.sleep(1.5*random.uniform(1,3)) html = browser.page_source while "您的访问出现异常" in html: print("出现验证,休眠") time.sleep(5*60) browser.get(url) html = browser.page_source return html
def produce(self, topic, start_time, end_time): st = datetime.strptime(start_time, "%Y/%m/%d/%H/%M") ed = datetime.strptime(end_time, "%Y/%m/%d/%H/%M") dt = timedelta(minutes=1) while st < ed: num = 0 producer_start = time() with smart_open( self.s3_url + '/' + st.strftime('%Y/%m/%d/%H/%M.json'), 'r') as f: self.producer.flush() for message in f: self.producer.produce(topic, value=message) num += 1 sleeptime = 0.01 * ( 1 + sin(time() * pi / 30.)) + random() * 0.002 time.sleep(sleeptime) st += dt print("%f time passed" % (time() - producer_start, )) print("%d messages sent" % (num, ))
def test_PLCR_revealPeriodActive(self): payerAcct = ownerAcct param_list = [] param_list.append("revealPeriodActive".encode()) param_list1 = [] param_list1.append(pollID) param_list.append(param_list1) print(param_list) times = 0 while (1): time.sleep(1) res = self.test_invokeRead(payerAcct, param_list) times += 1 print(times) if res[0] != "00": break print(res) return True
def load_data_auto(): taskCompletedDate = None # 注意这里的任务时间,必须在富途牛牛更新完当日本地数据后 taskTime = datetime.time(hour=17, minute=0) # 进入主循环 while True: t = datetime.datetime.now() # 每天到达任务下载时间后,执行数据下载的操作 if t.time() > taskTime and ( taskCompletedDate is None or t.date() != taskCompletedDate): downloadAllMinuteBar() # 更新任务完成的日期 taskCompletedDate = t.date() else: print u'当前时间%s,任务定时%s' % (t, taskTime) time.sleep(60)
def main(): seed = 9999 runCount = 0 dequeueCount = 0 slam = RMHC_SLAM(MinesLaser(), MAP_SIZE_PIXELS, MAP_SIZE_METERS, random_seed=seed) \ if seed \ else Deterministic_SLAM(MinesLaser(), MAP_SIZE_PIXELS, MAP_SIZE_METERS) trajectory = [] while dequeueCount < 1000: time.sleep(10) if q.empty() == False: while (q.empty() == False): slam.update(q.get()) #print "%i" %dequeueCount dequeueCount = dequeueCount + 1 x_mm, y_mm, theta_degrees = slam.getpos() trajectory.append((x_mm, y_mm)) # Create a byte array to receive the computed maps mapbytes = bytearray(MAP_SIZE_PIXELS * MAP_SIZE_PIXELS) # Get final map slam.getmap(mapbytes) # Put trajectory into map as black pixels for coords in trajectory: x_mm, y_mm = coords x_pix = mm2pix(x_mm) y_pix = mm2pix(y_mm) mapbytes[y_pix * MAP_SIZE_PIXELS + x_pix] = 0 # Save map and trajectory as PNG file image = Image.frombuffer('L', (MAP_SIZE_PIXELS, MAP_SIZE_PIXELS), mapbytes, 'raw', 'L', 0, 1) #image.save('map%i.png' %runCount) #image.save("/home/card/webgui/images/" + "map" + str(dequeueCount) + ".png") image.save("/home/card/webgui/images/" + "map" + ".png")
def auto_ACAL_3458A(): instruments["3458B"]=HP3458A(ip=vxi_ip, gpib_address=23, lock=gpiblock, title="3458B") instruments["3458B"].config_10DCV_9digit() #instruments["3458B"].config_10OHMF_9digit() #instruments["3458B"].config_10kOHMF_9digit() instruments["3458B"].config_NPLC100() instruments["3458B"].blank_display() instruments["3458B"].config_trigger_auto() HP3458B_temperature=HP3458A_temp(HP3458A=instruments["3458B"], title="HP3458B Int Temp Sensor") last_temp = instruments["temp_short"].get_read_val() while True: now = datetime.datetime.now() if not(now.minute % 10) and not(now.second) and instruments["3458B"].is_readable(): MySeriesHelper(instrument_name=HP3458B_temperature.get_title(), value=float(HP3458B_temperature.get_read_val())) time.sleep(1) temperature = instruments["temp_short"].get_read_val() logging.debug("Actual Temp = %s Last ACAL temp = %s" % (temperature, last_temp)) if abs(last_temp - temperature) > 1: instruments["3458B"].acal_DCV() time.sleep(80) last_temp = instruments["temp_short"].get_read_val() for i in instruments.values(): if i.is_readable(): MySeriesHelper(instrument_name=i.get_title(), value=float(i.get_read_val())) time.sleep(1)
def process_data(save_path, load_path, triplet_artifacts, label_threshold, epsilon, n_tasks, task): logging.info("Running inference on triplet graphs") # Calculate edge scores from best doublet model checkpoint edge_scores, graph_dataset, graph_names = get_edge_scores(load_path, triplet_artifacts, n_tasks, task) triplet_data = np.array([[gi.edge_index.numpy(), graph_name, oi.numpy()] for gi, graph_name, oi in zip(graph_dataset, graph_names, edge_scores)]) logging.info("Inference complete") # SAVE TRIPLET HITLIST temp_dir = os.path.join(save_path, "temp") if not os.path.exists(temp_dir): os.makedirs(temp_dir, exist_ok=True) with mp.Pool(processes=None) as pool: process_fn = partial(save_triplet_hitlist, threshold=label_threshold, output_dir=temp_dir) pool.map(process_fn, triplet_data) logging.info("All files saved") if task == 0: # IS THIS THE CORRECT LENGTH??? triplet_data_length = len(triplet_data) while(len(os.listdir(temp_dir)) < triplet_data_length): print("Waiting") time.sleep(10) # Want to wait until all files a # RELOAD FILELIST AND SPLIT filelist = os.listdir(temp_dir) split_names = np.array([[os.path.join(temp_dir,file[:-6]), file[-5:]] for file in filelist]) event_names = np.unique(split_names[:,0]) with mp.Pool(processes=None) as pool: process_fn = partial(process_event, split_names = split_names, output_dir=save_path, label_cut=label_threshold, epsilon=epsilon) pool.map(process_fn, event_names) if os.path.exists(temp_dir): shutil.rmtree(temp_dir, ignore_errors=False)
def run(self): _bientang=0 print("Begin") global daikinconnect,dangtruyenmodbus_HC2,dangtuyenmodbus global co_tinhieu_HC2 _reatime=0 while (daikinconnect==0): daikinconnect = self.get_DTA_Ready() daikinconnect=1 time.sleep(1) time.sleep(1) self.get_status_all() global bientruyen print("Ready") while(1): try: _bientang=_bientang+1 time.sleep(0.1) if co_tinhieu_HC2==1: time.sleep(1) co_tinhieu_HC2=0 #get_input() truyen_ve_HC() _bientang = 1 if _bientang % 200 == 0: #while dangtuyenmodbus!=0: # time.sleep(0.1) tam = 0 while dangtuyenmodbus != 0: tam = tam + 1 if tam > 100: break time.sleep(0.05) get_input() if _bientang % 600 ==0: truyen_ve_HC() _bientang = 1 except: logging.warning('Lpi Main') print("Loi Main")
def put(self, key, value): Qw = self.Qw data_servers = self.data_servers if key[-4:] == "meta" or key[-10:] == "list_nodes": print "entering meta put function" number = self.meta_server key = self.path + "&&" + key port = 'http://127.0.0.1:' + str(number) rpc = xmlrpclib.Server(port) putmetastatus = False while (putmetastatus is False): try: putmetastatus = rpc.put(Binary(key), Binary(pickle.dumps(value)), 6000) print "Write is successful at", number except: time.sleep(5) continue else: key = self.path + "&&" + key for number in data_servers: port = 'http://127.0.0.1:' + str(number) rpc = xmlrpclib.Server(port) putdatastatus = False putdatacount = 0 while (putdatastatus is False and putdatacount < 5): try: putdatastatus = rpc.put(Binary(key), Binary(pickle.dumps(value)), 6000) #print "Write is successful at",number except: putdatacount = putdatacount + 1 continue if putdatastatus is True: print "write is successful at ", number else: print "server is down at", number
def GetWall(owner_id, offset='0&', extended='0&', count='10&'): meth_name = 'wall.get?' params = 'owner_id=' + owner_id + 'extended=' + extended + 'offset=' + offset + 'count=' + count link = url + meth_name + params + token + version temp = None _count = 0 while temp is None: ans = requests.get(link).text ans = loads(ans) temp = ans.get('response') if _count > 1: time.sleep(1) _count += 1 if _count == 5: return None walls = [] items = temp.get('items') for i in range(len(items)): walls.append(items[i].get('text')) return walls
def api_set_mode(): global dangtuyenmodbus,dungmaserial dangtruyenmodbus_HC2 = 1 #co_tinhieu_HC2 = 1 try: tam=0 while dangtuyenmodbus!=0: dangtuyenmodbus=2 tam = tam+1 if tam>100: break time.sleep(0.1) kien = json.loads(request.data) TCP_HC21.set_mode_filterreset_statusopera(int(kien["zone"]),int(kien["status"]),503,503,1) logging.info('INFO: HC2 SET MODE {}'.format(request.data)) if dungmaserial==1: return "F**K YOU" else: return "OK" except: print "Loi API Mode" return "Error"
def pi_capture(): global train_labels, train_img, is_capture_running, key # init the train_label array print('start capture') is_capture_running = True with picamera.PiCamera(resolution=(160, 120), framerate=30) as camera: # 根据实际情况,镜头是否需要的上下翻转 #camera.vflip = True camera.start_preview() # Give the camera some warm-up time time.sleep(2) output = SplitFrames() start = time() camera.start_recording(output, format='mjpeg') camera.wait_recording(120) camera.stop_recording() finish = time() print("Captured {} frames in {} seconds".format(output.frame_num, finish - start)) print('quit capture') is_capture_running = False
def distribute_patches(allpatches, mpi_barrier=False, run_kwargs={}): try: from mpi4py import MPI have_mpi = True comm = MPI.COMM_WORLD rank = comm.Get_rank() size = comm.Get_size() except: have_mpi = False rank = 0 size = 1 assert not mpi_barrier import time if rank == 0: print("{} ranks handling {} patches".format(size, len(allpatches))) print(allpatches) for i in range(rank, len(allpatches), size): patch = allpatches[i] time.sleep(rank * 2) out = run_patch(patch, rank=rank, **run_kwargs)
def game_over(self): time.sleep(1) game_over = True pygame.mixer.music.play(-1) while game_over: for event in pygame.event.get(): if event.type == pygame.QUIT: pygame.quit() quit() keyPresses = pygame.key.get_pressed() if keyPresses[K_ESCAPE]: pygame.quit() quit() self.screen.fill(BLACK) self.message_to_screen("Game", YELLOW, -100, "large") self.message_to_screen("Over", YELLOW, 0, "large") self.button("Play", 150, 500, 100, 50, "small", GREEN, LIGHT_GREEN, "play") self.button("Quit", 550, 500, 100, 50, "small", RED, LIGHT_RED, "quit") pygame.display.update() self.clock.tick(50)
def GetInfoGroup(id): meth_name = 'groups.getById?' params = 'group_id=' + id link = url + meth_name + params + token + version temp = None _count = 0 A = {} while temp is None: ans = requests.get(link).text ans = loads(ans) temp = ans.get('response') if _count > 1: time.sleep(1) _count += 1 if _count == 5: return None name = temp[0].get('name') description = temp[0].get('description') A = {'name': name, 'description': description} return A
def burst(nshots): nshots = int(nshots) lcls_linac.set_fburst("Full") rate = lcls_linac.get_xraybeamrate() if not lcls_linac.isburstenabled(): print "ERROR: Burst mode not enabled. Call MCC and enable it." return if rate == 0: print "!!! beam rate is 0. Cannot setup burst. !!!" return print "setting burst to %d shots" % nshots lcls_linac.set_nburst(nshots) # db = daqconfig.db.get_key("BEAM") print "taking shots" try: daq.connect() eventStart = daq.eventnum() print "start event: %d" % eventStart lcls_linac.get_burst() print "waiting for shots..." while True: if daq.eventnum() >= eventStart + nshots: print "Received %d shots!" % nshots break time.sleep(0.1) pass finally: print "stop daq" daq.stop()
def api_set_fanvolume(): global dangtuyenmodbus,dangtruyenmodbus_HC2,dungmaserial try: dangtruyenmodbus_HC2=1 #co_tinhieu_HC2 = 1 tam=0 while dangtuyenmodbus!=0: dangtuyenmodbus=2 tam = tam+1 if tam>100: break time.sleep(0.1) kien = json.loads(request.data) TCP_HC21.set_status_forcusstatus_fandir_fanvolume(int(kien["zone"]),503,503,503,int(kien["status"]),1) logging.info('INFO: HC2 SET FANVOLUME {}'.format(request.data)) if dungmaserial==1: return "F**K YOU" else: return "OK" except: print "Loi API Volume" return "Error"
def run(self): while True: comando = input("Ingrese el comando: ") self.socket.send(bytes(comando, 'utf-8')) respuesta = str(self.socket.recv(2048), 'utf-8') print(respuesta) if respuesta == "desconectado": log = 1 print("En 5 segundos se va cerrar la sesión: ...") time.sleep(5) self.socket.close() sys.exit() elif respuesta == "usuario ya existe": print("usuario ya existe -.-") self.socket.close() sys.exit()
def wait_element_view(img, wait_time=1, confidence=0.999): for it in range(1, wait_time): if pyautogui.locateCenterOnScreen(img, confidence=confidence) is None: time.sleep(1) continue else: break