def run_clock3(stripe): LOGGER.debug("running...") from control import get_stop_flag while not get_stop_flag(): try: now = set_brightness_depending_on_daytime(stripe)[0] led_for_hour = int(int(now.hour) % 12 * 2) led_for_minute = int(now.minute // 2.5) leds_per_2500ms = int(round(now.second / 2.5)) _dial(stripe) _seconds(leds_per_2500ms, stripe) _minute(led_for_minute, led_for_hour, stripe) _hour(led_for_hour, stripe) stripe.show() time.sleep(0.2) if leds_per_2500ms == stripe.numPixels(): time.sleep(1.3) clear(stripe) except KeyboardInterrupt: LOGGER.warn("KeyboardInterrupt.") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(stripe)
def updateIdentSet(self): """ Safely update the identifier set of the traductor """ for anUpdate in lazzyUpdate.objects: LOGGER.warn("id : {} || state : {}".format(anUpdate.idToUpdate,anUpdate.newState)) if(anUpdate.idToUpdate==""): with self.lock: self.identSet=set([]) for lsensor in sensor.Sensor.objects: self.identSet.add(lsensor.physic_id) LOGGER.info(lsensor.physic_id) LOGGER.info("Traductor's set of captors updated") elif(anUpdate.newState==""): with self.lock: if (anUpdate.idToUpdate in things.physic_id for things in sensor.Sensor.objects): self.identSet.add(anUpdate.idToUpdate) LOGGER.info("{} added".format(anUpdate.idToUpdate)) else: #send a trame from a captor with a newState LOGGER.error("Sensor to update : {} ||new state : {}".format(anUpdate.idToUpdate,anUpdate.newState)) self.sendTrame(anUpdate.idToUpdate,anUpdate.newState) anUpdate.delete() LOGGER.warn(" {} update GROS delete de : {} || {}".format(lazzyUpdate.objects.count(),anUpdate.idToUpdate,anUpdate.newState)) return LOGGER.debug("nothing to update")
def run_theater(strip): LOGGER.debug("running...") from control import get_stop_flag while not get_stop_flag(): try: set_brightness_depending_on_daytime(strip) color_wipe_full(strip, Color(127, 0, 0)) # Red wipe if not get_stop_flag(): color_wipe_full(strip, Color(0, 127, 0)) # Green wipe if not get_stop_flag(): color_wipe_full(strip, Color(0, 0, 127)) # Blue wipe if not get_stop_flag(): color_wipe_full(strip, Color(127, 127, 127)) # White wipe if not get_stop_flag(): theater_chase(strip, Color(127, 127, 127)) # White theater chase if not get_stop_flag(): theater_chase(strip, Color(0, 0, 127)) # Blue theater chase if not get_stop_flag(): theater_chase(strip, Color(0, 127, 0)) # Green theater chase if not get_stop_flag(): theater_chase(strip, Color(127, 0, 0)) # Red theater chase except KeyboardInterrupt: LOGGER.warn("KeyboardInterrupt") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(strip)
def run_rainbow(strip): LOGGER.debug("running...") from control import get_stop_flag while not get_stop_flag(): try: set_brightness_depending_on_daytime(strip) for j in range(256 * 5): if not get_stop_flag(): for i in range(strip.numPixels()): if not get_stop_flag(): strip.setPixelColor( i, wheel((int(i * 256 / strip.numPixels()) + j) & 255)) if not get_stop_flag(): strip.show() time.sleep(.02) except KeyboardInterrupt: LOGGER.warn("KeyboardInterrupt") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(strip)
def run_clock6(strip): LOGGER.debug("running...") from control import get_stop_flag while not get_stop_flag(): try: hour_value, minute_value = _get_pointer(strip)[:2] # arc mode intensity = 100 for i in range(strip.numPixels()): # calculates a faded arc from low to maximum brightness h = _get_color_value(i, hour_value, intensity=intensity) m = _get_color_value(i, minute_value, intensity=intensity) red, green, blue = 0, m, h color = Color(red, green, blue) strip.setPixelColor(i % 24, color) strip.show() time.sleep(0.1) except KeyboardInterrupt: LOGGER.warn("KeyboardInterrupt.") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(strip)
def play(self, valid_points, board): while True: action_str = input("action: >>>\n") action = tuple([int(e) for e in action_str.split(",")]) if action in valid_points: return action LOGGER.warn("invalid action:{}".format(action))
def get_custom_field(self, field_key): result = '' try: result = self.custom_fields[field_key] except KeyError: LOGGER.warn('Issue %s does not have value for field %s' % (self.key, field_key)) return result
def gimmeTrame(self,daNewState): """ Return the update trame to be sent """ if (str(daNewState)=="toggle"): if self.current_state=="off": #on met à on rawTrame="A55A6B0570000000"+ "FF9F1E0"+"7" +"30" self.current_state="on" elif(self.current_state=="on"): #on met à off rawTrame="A55A6B0550000000"+ "FF9F1E0"+"7" +"30" self.current_state="off" else: rawTrame="A55A6B0550000000"+ "FF9F1E0"+"7" +"30" self.current_state="off" else: LOGGER.warn("Strange new state : {}. Trram not send".format(daNewState)) return "" LOGGER.info("State after : {}".format(self.current_state)) myTrame=Trame.trame(rawTrame) myTrame.calculateChecksum() LOGGER.info("Actuator trame generated, to be send : {}".format(myTrame.lessRawView())) self.save() return myTrame.rawView()
def format_date(date_string): result = date_string try: time_struct = strptime(date_string, "%d/%b/%y") result = date.fromtimestamp(mktime(time_struct)) except (TypeError, ValueError): LOGGER.warn('[%s] is not valid date' % date_string) return result
def run_clock5(strip): LOGGER.debug("running...") from control import get_stop_flag p_left = 0 p_right = len(pendulum) - 1 while not get_stop_flag(): try: clear(strip) now = set_brightness_depending_on_daytime(strip)[0] hour = int(int(now.hour) % 12 * 2) minute = int(now.minute // 2.5) # pendulum of second for i in range(len(pendulum)): strip.setPixelColor(pendulum[i], COLOR_SECOND_DIMMED) if p_left >= len(pendulum) - 1: if p_right <= 0: p_right = len(pendulum) - 1 p_left = 0 else: strip.setPixelColor(pendulum[p_right], COLOR_SECOND) p_right -= 1 else: strip.setPixelColor(pendulum[p_left], COLOR_SECOND) p_left += 1 # pointer # hour if 12 < minute <= 23: strip.setPixelColor(hour, COLOR_HOUR) strip.setPixelColor(hour + 1, COLOR_HOUR_DIMMED) else: strip.setPixelColor(hour, COLOR_HOUR) # minute if minute == hour: if 12 < minute < strip.numPixels(): if hour <= 23: strip.setPixelColor(hour + 1, COLOR_HOUR) strip.setPixelColor(minute, COLOR_MINUTE) else: strip.setPixelColor(0, COLOR_HOUR) strip.setPixelColor(minute - 1, COLOR_MINUTE) else: strip.setPixelColor(minute + 1, COLOR_MINUTE) else: strip.setPixelColor(minute, COLOR_MINUTE) strip.show() time.sleep(wait_ms) except KeyboardInterrupt: print() LOGGER.warn("KeyboardInterrupt.") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(strip)
def retro_browse_points(request): selected_team_name = 'allteams' selected_sprint_number = 'allsprints' selected_sticker_type = 'alltypes' count = 15 teams = Team.objects.all().order_by('name') sprints = Sprint.objects.all().order_by('-number') types = BoardSticker.TYPE_CHOICES stickers = BoardSticker.objects.all().order_by('-creation_date') try: selected_team_name = request.GET["team"] selected_sprint_number = request.GET["sprint"] selected_sticker_type = request.GET["type"] count = request.GET["count"] except KeyError: LOGGER.warn('Request with incorrect parameters. Using defaults.') if selected_team_name != 'allteams': selected_team = Team.objects.get(name=selected_team_name) board = RetroBoard.objects.filter(team=selected_team) stickers = stickers.filter(retroBoard__in=board) if selected_sprint_number != 'allsprints': selected_sprint = Sprint.objects.get(number=selected_sprint_number) boards = RetroBoard.objects.filter(sprint=selected_sprint) stickers = stickers.filter(retroBoard__in=boards) if selected_sticker_type != 'alltypes': stickers = stickers.filter(type=selected_sticker_type) stickers = stickers[:count] types_dict = {} for item in BoardSticker.TYPE_CHOICES: types_dict[item[0]] = item[1] for point in stickers: point.type_str = types_dict[point.type] if selected_sprint_number != 'allsprints': selected_sprint_number = int(selected_sprint_number) return render_to_response('retro/dpq_retro_action_points.html', RequestContext(request, {'stickers': stickers, 'teams': teams, 'sprints': sprints, 'types': types, 'selected_team': selected_team_name, 'selected_sprint': selected_sprint_number, 'selected_type': selected_sticker_type, 'count': int(count), 'count_options': [15, 30, 45, 60], 'active_branches': get_active_branches()}))
def play(self, valid_points, board): if not self.cur_node or self.cur_node.is_leaf(): LOGGER.warn("current status not trained, use random strategy") status = (board, self.piece) action = self.fast_move_func(status=status, action_list=valid_points) return action action, _, _ = self.cur_node.select(mark_piece=self.mcst.mark_piece, c=0) return action
def create_datetime(timestamp): """Helper function create datetime for mongo load""" try: timestamp = datetime.datetime.strptime(timestamp, "%a %b %d %H:%M:%S %z %Y") except Exception as e: l.warn("Could not convert created_at {}\n{}".format(timestamp, e)) return None return timestamp
def fetch_cookies(cls): LOGGER.info('get cookies from reids') r = redis.Redis(connection_pool=cls.redis_pool) while True: user = r.spop('users') r.sadd('users', user) c = r.hget('account', user) if c: user_cookies = c.decode('utf-8') cookies_json = json.loads(user_cookies) LOGGER.info('cookies got-------') return cookies_json LOGGER.warn('cookies not get')
def main(): args = argument_parser(TOOLS).parse_args() log.setLevel(LOG_LEVELS[args.loglevel]) cdb = None ret = 0 processcdb_config = ConfigParser() if args.dumpconfigs: for tool_name in TOOLS: tool = TOOLS[tool_name](tool_name) processcdb_config[tool_name] = tool.default_config() config_file = args.config.absolute() config_file.parent.mkdir(parents=True, exist_ok=True) if config_file.exists(): log.warn( f"Configuration file {config_file} already exists, overwriting" ) with config_file.open("w") as output: processcdb_config.write(output) log.info(f"Configuration file written to {config_file}") sys.exit(0) processcdb_config.read(args.config) try: tool = TOOLS[args.tool](args.tool, processcdb_config) except KeyError: log.error(f"Unknown tool '{args.tool}' - cant initilize") return 127 if args.cdb.is_file(): cdb = json.loads(args.cdb.read_text()) if cdb: #if args.commit_a is not None: #cdb = filterByChangelist(cdb, (args.commit_a, args.commit_b)) if not args.allow_dupes: cdb = remove_dupes(cdb) try: ret = tool.execute(cdb, args) log.info(f"Return value from tool process: {ret}") except EnvironmentError as e: log.error(f"Cant process: {e}") except Exception as e: log.error(f"{e}") traceback.print_exc() else: log.error(f"File '{args.cdb}' is empty") else: log.error(f"File '{args.cdb}' does not exist") return ret # TODO: Add proper return codes
def run_candles(strip): LOGGER.debug("running...") from control import get_stop_flag while not get_stop_flag(): try: candle(strip, strip.numPixels()) except KeyboardInterrupt: LOGGER.warn("KeyboardInterrupt") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(strip)
def translateTrame(self,inTrame): """ return close if data0=09, open if data0=08 else nothing """ if (inTrame.data0=='09'): LOGGER.info("Door sensor {} with state [close]".format(inTrame.ident)) dataToRet = "close" elif(inTrame.data0=='08'): LOGGER.info("Door sensor {} with state [open]".format(inTrame.ident)) dataToRet = "open" else: LOGGER.warn("Door sensor {}Strange state : {}".format(inTrame.ident, inTrame.data2)) dataToRet='' return dataToRet
def gimmeTrame(self,daNewState): """ Return the update trame to be sent """ if daNewState=="close": data="00000009" elif daNewState=="open": data="00000008" else : LOGGER.warn("Strange state : {}. Trame not sent".format(daNewState)) return "" strTrame=elf.trameStart+data+self.physic_id+self.trameEnd myTrame=Trame.trame(strTrame) myTrame.calculateChecksum() LOGGER.debug("Trame returned : {}".format(myTrame.rawView())) return myTrame.rawView()
def run_clock1(strip): LOGGER.debug("running...") from control import get_stop_flag while not get_stop_flag(): # noinspection PyBroadException try: now = set_brightness_depending_on_daytime(strip)[0] hour = int(int(now.hour) % 12 * 2) minute = int(now.minute // 2.5) second = int(now.second // 2.5) for i in range(0, strip.numPixels(), 1): # hour strip.setPixelColor(hour, COLOR_HOUR) # minute if minute == hour: if 12 < minute < strip.numPixels(): if hour <= 23: strip.setPixelColor(hour + 1, COLOR_HOUR) strip.setPixelColor(minute, COLOR_MINUTE) else: strip.setPixelColor(0, COLOR_HOUR) strip.setPixelColor(minute - 1, COLOR_MINUTE) else: strip.setPixelColor(minute + 1, COLOR_MINUTE) else: strip.setPixelColor(minute, COLOR_MINUTE) # second if i == second: strip.setPixelColor(i, COLOR_SECOND) else: strip.setPixelColor(i, Color(0, 0, 0)) strip.show() time.sleep(0.1) except KeyboardInterrupt: print() LOGGER.warn("KeyboardInterrupt.") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(strip)
def checkTrame(self): if self.trameUsed: LOGGER.debug("Trame received : {}".format(self.trameUsed.lessRawView())) if ("A55A" not in self.trameUsed.sep): LOGGER.warn("Wrong separator, rejected") if (self.doChecksum(self.trameUsed) not in self.trameUsed.checkSum): #Mauvais checkSum LOGGER.warn("Wrong checksum, expected : {}, rejected".format(self.doChecksum(self.trameUsed))) with self.lock: if (self.trameUsed.ident in self.identSet): #Recuperer le capteur en bdd sensorUsed = sensor.Sensor.objects(physic_id=self.trameUsed.ident)[0] newData = '' #la nouvelle data a entrer en base, type dynamique if (sensorUsed.__class__.__name__=="Switch"): newData=sensorUsed.translateTrame(self.trameUsed) elif (sensorUsed.__class__.__name__=="Temperature"): newData = sensorUsed.translateTrame(self.trameUsed) elif (sensorUsed.__class__.__name__=="Position"): newData = sensorUsed.translateTrame(self.trameUsed) else : LOGGER.warn("Other Captor (not handle (YET !) )") # Update de la trame au niveau de la base if newData : sensorUsed.update(newData) LOGGER.info(" Sensor {} ||New data {}".format(sensorUsed.physic_id, sensorUsed.current_state)) self.trameUsed=''
def save_tweets(self): bulk_insert = [] if self.backend == "mongodb": for tweet in self.tweet_list: tweet = mongo_preprocessor(tweet) bulk_insert.append(tweet) try: self.table.insert_many(bulk_insert) l.info("Batch complete. Saved {} tweets to db".format( self.counter)) except DuplicateKeyError as e: l.info("{}".format(e)) except Exception as e: l.warn("Unable to save to DB\n{}".format(e)) elif self.backend == "sqlite": bulk_insert = [] for tweet in self.tweet_list: try: tweet = sqlite_preprocessor(tweet) except Exception as e: l.warn("unable to map {}".format(tweet)) continue bulk_insert.append(tweet) try: self.table.insert_many(bulk_insert) l.info("Batch complete. Saved {} tweets to db".format( len(bulk_insert))) except Exception as e: # Better to miss a few tweets and keep script running l.warn("Unable to save to DB {}".format(e))
def run_clock2(strip): LOGGER.debug("running...") from control import get_stop_flag while not get_stop_flag(): try: hour, minute, next_minute = _get_pointer(strip) while not minute == next_minute: # hour if 12 < minute <= 23: strip.setPixelColor(hour, COLOR_HOUR) strip.setPixelColor(hour + 1, COLOR_HOUR_DIMMED) else: strip.setPixelColor(hour, COLOR_HOUR) # minute if minute == hour: if 12 < minute < strip.numPixels(): if hour <= 23: strip.setPixelColor(hour + 1, COLOR_HOUR) strip.setPixelColor(minute, COLOR_MINUTE) else: strip.setPixelColor(0, COLOR_HOUR) strip.setPixelColor(minute - 1, COLOR_MINUTE) else: strip.setPixelColor(minute + 1, COLOR_MINUTE) else: strip.setPixelColor(minute, COLOR_MINUTE) strip.show() time.sleep(0.2) minute = _get_pointer(strip)[1] _wipe_second(strip, COLOR_MINUTE_DIMMED, minute - 1, backward=True) clear(strip) except KeyboardInterrupt: print() LOGGER.warn("KeyboardInterrupt.") exit() except Exception as e: LOGGER.error(f"Any error occurs: {e}") exit() clear(strip)
async def dashboard(hostname, sar_params, time_range, nested_elem): config.read(CFG_PATH) api_endpoint = config.get('Grafana','api_url') payload = { "ts_beg": time_range['grafana_range_begin'], "ts_end": time_range['grafana_range_end'], "nodename": hostname, "modes": sar_params, "nested_elem":nested_elem } LOGGER.debug(api_endpoint) LOGGER.debug(payload) try: res = requests.post(api_endpoint, json=payload) if res.status_code == 200: LOGGER.debug("status code: %s" % res.status_code) LOGGER.debug("content: \n%s" % res.content) LOGGER.debug("Dashboard created for -- %s" % hostname); else: LOGGER.warn("status code: %s" % res.status_code) LOGGER.warn("content: \n%s" % res.content) slug = json.loads(res.text)['slug'] LOGGER.debug(json.loads(res.text)) LOGGER.debug(slug) except ConnectionError: LOGGER.error("endpoint not active. Couldn't connect.") slug = None except Exception as e: LOGGER.error(str(e)) LOGGER.error("unknown error. Couldn't trigger request.") slug = None return slug
def get_temperature(self): device_file = DEVICE_PATH.format(device_id=self.device_id) lines = None empty_reads = 0 while not lines or len(lines) == 0 or lines[0].strip()[-3:] != 'YES': try: with open(device_file, 'r') as f: lines = f.readlines() except FileNotFoundError: LOGGER.warn(f'file {device_file} not found') time.sleep(3) if not lines: empty_reads += 1 if empty_reads % 100 == 0: LOGGER.warn(f'file {device_file} is empty too long') time.sleep(0.2) temp_pos = lines[1].find('t=') if temp_pos != -1: temp_string = lines[1][temp_pos + 2:] temp_c = float(temp_string) / 1000.0 return round(temp_c, 2) else: raise ValueError(f"Can't read temperature for {self.device_id}")
def fans(user_id): if db_session.query(exists().where(CrawlInfo.user_id == user_id)).scalar(): return sleep(3) pages = 10 curr_page = 1 result = [] get_pages = False while curr_page <= pages: curr_page += 1 page_url = 'http://weibo.com/p/100505%s/follow?pids=Pl_Official_HisRelation__60&relate=fans&page=%d' \ '#Pl_Official_HisRelation__60' % (user_id, curr_page) while True: cookies_json = RedisCookies.fetch_cookies() cookies = cookies_json['cookies'] if cookies_json['unique_id'] != user_id: break LOGGER.info('login user: %s' % cookies_json['user_name']) LOGGER.info('fans task: %s' % page_url) resp_text = requests.get(page_url, cookies=cookies, headers=get_header2()).text view_json = find_fm_view_json(html=resp_text) for j in view_json: if 'Pl_Official_HisRelation__60' == j['domid']: fans_html_str = j['html'] break else: LOGGER.warn('fans tasks: not found ') continue fans_html = BeautifulSoup(fans_html_str, 'html.parser') follow_list = fans_html.find('ul', 'follow_list') user_id_pattern = re.compile('id=(\d*?)&refer_flag=(\d*?)_') if follow_list: txt_as = follow_list.find_all('a', 'S_txt1') for a in txt_as: r = { 'href': a.get('href'), 'name': a.text, 'usesrcard': a.get('usercard') } result.append(r) find_result = user_id_pattern.findall(a.get('usercard')) # LOGGER.info("fans info: %s" % str(r)) if find_result: fans_user_id = find_result[0][0] if not db_session.query(exists().where( Relationship.user_id == user_id and Relationship.fan_id == fans_user_id)).scalar(): relationship = Relationship() relationship.user_id = user_id relationship.fan_id = fans_user_id db_session.add(relationship) db_session.commit() app.send_task('tasks.user.info', args=(fans_user_id, )) if not get_pages: pages_as = fans_html.find_all('a', 'page') if pages_as: get_pages = True pages = int(pages_as[-2].text) else: break crawl_info = CrawlInfo() crawl_info.user_id = user_id crawl_info.last_crawl_date = datetime.datetime.now().strftime( '%Y-%m-%d %H:%M:%S') db_session.add(crawl_info) db_session.commit()
async def prepare(core, cache, userID, target, sa_filename, q): file_metadata = "file_metadata:%s:%s" % (userID, sa_filename) LOGGER.debug("file_metadata: %s" % file_metadata) SA_FILEPATH = os.path.join(target, sa_filename) res = oscode.determine_version(file_path=SA_FILEPATH) LOGGER.debug("SA_FILEPATH: %s" % SA_FILEPATH) #get local sysstat version CMD_GET_VERSION = ['scripts/bin/sadf', '-V'] stdout_data, stderr_data = await core.run_async_shell_command( CMD_GET_VERSION) if stderr_data: LOGGER.error(stderr_data) if stdout_data: output = stdout_data.decode().replace('\n', ' ').split() localSysstatVersion = output[2] if res[0] and res[1] is localSysstatVersion: sadf_type_res = res[1] cache.hset(file_metadata, "sa_file_path", SA_FILEPATH) else: LOGGER.warn( "sysstat version is unmatched between local sysstat version and safile" ) SA_FILEPATH_CONV = "%s_conv" % SA_FILEPATH CMD_CONVERT = ['scripts/bin/sadf', '-c', SA_FILEPATH] stderr_data = await core.run_async_shell_command(CMD_CONVERT, output=open( SA_FILEPATH_CONV, 'w')) if stderr_data is not None: err = stderr_data.decode() if "successfully" not in err and "up-to-date" not in err: LOGGER.error(err) LOGGER.error("SAR data extraction *failed*!") q[sa_filename] = (None, "Invalid", None) return if res[0]: sadf_type_res = res[1] else: sadf_type_res = "f23" _tmp = p2.communicate()[0] LOGGER.warn(_tmp) LOGGER.info('sysstat version was incompatible but dealt with') if "up-to-date" in err: cache.hset(file_metadata, "sa_file_path", SA_FILEPATH) else: cache.hset(file_metadata, "sa_file_path_conv", SA_FILEPATH_CONV) cache.hset(file_metadata, "sadf_type_det", sadf_type_res) #FIXME: handle exceptons q[sa_filename] = await extract_sa.extract(core, cache, userID, target, sa_filename) LOGGER.debug(q) return
def _warn(self, msg, *args): LOGGER.warn(msg % args) if _DEBUG > 9: LOGGER.warn(repr(self.stack))
def _bulk_upload(self): if _DEBUG > 1: self._dump_actions() if len(self.actions) == 0: LOGGER.error('0 actions found..') beg, end = time.time(), None start = beg if _DEBUG > 0: LOGGER.debug("\tbulk index (beg ts: %s) ..." % tstos(beg)) delay = _read_timeout tries = 20 try: while True: try: res = helpers.bulk(self.es, self.actions) except es_excs.ConnectionError as err: end = time.time() if isinstance(err.info, ul_excs.ReadTimeoutError): tries -= 1 if tries > 0: LOGGER.warn( "\t\tWARNING (end ts: %s, duration: %.2fs):" " read timeout, delaying %d seconds before" " retrying (%d attempts remaining)..." % (tstos(end), end - beg, delay, tries)) time.sleep(delay) delay *= 2 beg, end = time.time(), None LOGGER.warn( "\t\tWARNING (beg ts: %s): retrying..." % (tstos(beg))) continue LOGGER.error("\tERROR (end ts: %s, duration: %.2fs): %s" % (tstos(end), end - start, err)) self.exceptions += 1 except Exception as err: end = time.time() # print("\tERROR (end ts: %s, duration: %.2fs): %s" % # tstos(end), end - start, err)) self.exceptions += 1 LOGGER.error("\tERROR (end ts: %s, duration: %.2fs): %s" % (tstos(end), end - start, err)) else: end = time.time() lcl_successes = res[0] self.successes += lcl_successes lcl_duplicates = 0 lcl_errors = 0 len_res1 = len(res[1]) for idx, ires in enumerate(res[1]): sts = ires[_op_type]['status'] if sts not in (200, 201): if _op_type == 'create' and sts == 409: self.duplicates += 1 lcl_duplicates += 1 else: LOGGER.error( "\t\tERRORS (%d of %d): %r" % (idx, len_res1, ires[_op_type]['error'])) self.errors += 1 lcl_errors += 1 else: self.successes += 1 lcl_successes += 1 if _DEBUG > 0 or lcl_errors > 0: LOGGER.debug( "\tdone (end ts: %s, duration: %.2fs," " success: %d, duplicates: %d, errors: %d)" % (tstos(end), end - start, lcl_successes, lcl_duplicates, lcl_errors)) break finally: del self.actions[0:len(self.actions)]
def get_issue(key): try: db_record = DbJiraIssues.get(DbJiraIssues.key == key) return DatabaseWrapper.__record_to_issue(db_record) except DoesNotExist: LOGGER.warn('Issue %s does not exist in database' % key)
def on_error(self, status_code): l.warn("Error {}".format(status_code))
async def extract(core, cache, userID, target, sa_filename): TSTAMPS = {} CMD_CONVERT = ['-x', "--", "-A"] SAR_XML_FILEPATH = os.path.join(target, "%s.%s" % (sa_filename, "sar.xml")) file_metadata = "file_metadata:%s:%s" % (userID, sa_filename) sadf_type_det = cache.hget(file_metadata, "sadf_type_det").decode() LOGGER.debug('sysstat version found for: %s' % sadf_type_det) _SCRIPT = "%s" % ('scripts/bin/sadf') CMD_CONVERT.insert(0, _SCRIPT) conv_path = cache.hget(file_metadata, "sa_file_path_conv") if conv_path: target_file = conv_path.decode() else: target_file = cache.hget(file_metadata, "sa_file_path").decode() CMD_CONVERT.insert(-2, target_file) #FIXME: check if env in Popen is working fine LOGGER.debug("spawned CMD: %s" % " ".join(CMD_CONVERT)) stderr_data = await core.run_async_shell_command(CMD_CONVERT, output=open( SAR_XML_FILEPATH, 'w')) if stderr_data: LOGGER.error(stderr_data) CMD_GREP = ["scripts/detect_nodename", SAR_XML_FILEPATH] stdout_data, stderr_data = await core.run_async_shell_command(CMD_GREP) if stderr_data: LOGGER.error(stderr_data) if stdout_data: NODENAME = stdout_data.decode().replace("\n", "").lower() LOGGER.debug("Nodename is %s " % NODENAME) #FIXME: check if call_indexer works everytime. And if it handles errors try: state, beg, end, nested_elem = await call_indexer( core, file_path=SAR_XML_FILEPATH, _nodename=NODENAME, cfg_name=CFG_PATH, run_unique_id=userID, run_md5=userID) if state: TSTAMPS['grafana_range_begin'] = beg TSTAMPS['grafana_range_end'] = end except Exception as E: LOGGER.warn(E) LOGGER.error("Error in call_indexer") if TSTAMPS: LOGGER.debug("[ES data ingested] -- %s" % NODENAME) LOGGER.debug('beg: %s' % TSTAMPS['grafana_range_begin']) LOGGER.debug('end: %s' % TSTAMPS['grafana_range_end']) GRAPHING_OPTIONS = cache.hget("sar_args:%s" % userID, "fields").decode() slug = await creation.dashboard(NODENAME, GRAPHING_OPTIONS, TSTAMPS, nested_elem) LOGGER.debug('slug: %s' % slug) return (NODENAME, TSTAMPS, sadf_type_det, slug) else: return (NODENAME, False, sadf_type_det, None)