def process_exception(self, request, exception): """ Captures Django view exceptions as Datadog events """ # ignore the Http404 exception if isinstance(exception, Http404): return # Get a formatted version of the traceback. exc = traceback.format_exc() # Make request.META json-serializable. szble = {} for k, v in request.META.items(): if isinstance(v, string_types + integer_types + (list, bool, float)): # TODO: check within the list szble[k] = v else: szble[k] = str(v) title = 'Exception from {0}'.format(request.path) text = "Traceback:\n@@@\n{0}\n@@@\nMetadata:\n@@@\n{1}\n@@@" \ .format(exc, json.dumps(szble, indent=2)) # Submit the exception to Datadog create_event(title=title, text=text, tags=self.event_tags, aggregation_key=request.path, alert_type='error') # Increment our errors metric tags = self._get_metric_tags(request) statsd.increment(self.error_metric, tags=tags)
def main(): parser = argparse.ArgumentParser() parser.add_argument("-i", "--api_key") parser.add_argument("-p", "--app_key") parser.add_argument("-e", "--events") args = parser.parse_args() options = { 'api_key': args["api_key"], 'app_key': args["app_key"] } events_file = args["events"] initialize(**options) with open(events_file) as f: events_json = json.load(f) for e in events_json["events"]: zulip_event = zulip_objects.ZulipEvent(e) if zulip_event.is_message(): if zulip_event.message.is_stream(): stream = zulip_event.message.get_stream() metric_name = 'zulip.test.streams.' + stream + '.message_count' # api.Metric.send(metric=metric_name, points=1, host="test.example.com", tags=["environment:test"], type="counter") statsd.increment('zulip.test.streams.' + stream + '.message_count', tags=["environment:test"])
def get_story_list(self, batch): """ get a list of stories corresponding to a list of hashes """ req_str = self.nb_endpoint + '/reader/starred_stories?' for a_hash in batch: req_str += 'h=' + a_hash + '&' stories = {} stories_req = requests.Request('GET', req_str, cookies=self.cookies) try: stories = self.request_with_backoff(stories_req) except requests.exceptions.ConnectionError as e: rollbar.report_exc_info() msg = 'Failed to get stories' logger.error(msg) logger.debug('Request string: %s', req_str) logger.error(e) statsd.event(msg, e.message, alert_type='error') logger.debug(stories.text) statsd.increment('nb.http_requests.get') story_list = [] try: story_list = json.loads(stories.text)['stories'] except ValueError as e: rollbar.report_exc_info() msg = 'Failed to parse stories response' logger.error(msg) logger.error(e) statsd.event(msg, e.message, alert_type='error') logger.debug(stories.text) return story_list
def do_GET(self): self._set_headers() if self.path == "/controllers.js": # Return the JS with open("controllers.js") as f: self.wfile.write(f.read()) elif self.path.startswith("/guestbook"): args = parse_qs(urlparse(self.path).query) cmd = args.get("cmd", [None])[0] key = args.get("key", [None])[0] val = args.get("value", [None])[0] r = redis.Redis(host=os.environ.get("REDIS_MASTER_SERVICE_HOST"), port=6379) if cmd == "set": r.set(key, val) self.wfile.write('{"message": "Updated"}') statsd.increment("meetup.guestbook.cmd", tags=["cmd:set"]) else: val = r.get(key) self.wfile.write('{"data": "%s"}' % val) statsd.increment("meetup.guestbook.cmd", tags=["cmd:get"]) else: # We return the HTML with open("index.html") as f: self.wfile.write(f.read())
def event_handler(name, **kwargs): if name == 'request_finished': statsd.increment('mod_wsgi.request.count') application_time = kwargs.get('application_time') statsd.histogram('mod_wsgi.request.application_time', application_time) statsd.gauge('mod_wsgi.request.input_reads', kwargs.get('input_reads')) statsd.gauge('mod_wsgi.request.input_length', kwargs.get('input_length')) statsd.gauge('mod_wsgi.request.input_time', kwargs.get('input_time')) statsd.gauge('mod_wsgi.request.output_writes', kwargs.get('output_writes')) statsd.gauge('mod_wsgi.request.output_length', kwargs.get('output_length')) statsd.gauge('mod_wsgi.request.output_time', kwargs.get('output_time')) cpu_user_time = kwargs.get('cpu_user_time') cpu_system_time = kwargs.get('cpu_system_time') statsd.gauge('mod_wsgi.request.cpu_user_time', cpu_user_time) statsd.gauge('mod_wsgi.request.cpu_system_time', cpu_system_time) if cpu_user_time is not None and application_time: cpu_burn = (cpu_user_time + cpu_system_time) / application_time statsd.gauge('mod_wsgi.request.cpu_burn', cpu_burn)
async def on_command(self, command, ctx): statsd.increment('bot.commands', tags=[*self.tags, 'command_name:' + str(command), 'cog_name:' + type(ctx.cog).__name__ ] )
def check_if_starred(self, story_hash): starred_req = requests.Request('GET', self.nb_endpoint + '/reader/starred_story_hashes', cookies=self.cookies) hashes = self.request_with_backoff(starred_req) statsd.increment('nb.http_requests.get') hashlist = hashes.json()['starred_story_hashes'] return bool(story_hash in hashlist)
def GET(self): start = time.time() delay=random.uniform(0.4,0.9) sleep(delay) statsd.increment('web.get.count', tags = ["support","page:page2"]) duration = time.time() - start statsd.histogram('web.get.latency', duration, tags = ["support","page:page2"]) return "Messages Page"
def test_batched_buffer_autoflush(self): fake_socket = FakeSocket() with DogStatsd() as statsd: statsd.socket = fake_socket for i in range(51): statsd.increment('mycounter') t.assert_equal('\n'.join(['mycounter:1|c' for i in range(50)]), fake_socket.recv()) t.assert_equal('mycounter:1|c', fake_socket.recv())
def main(): #metric to count the web.page_view statsd.increment('web.page_views',tags = ["page:home"]) #metric to count the overall number of page views statsd.increment('web.page_views_total') """Render the main page.""" return render_template('index.html')
def _send(self, *messages): for msg in messages: try: self.drain.emit_nowait(msg) except QueueFull: self.logger.debug('Drain full, waiting...') yield self.drain.emit(msg) else: self._throughput_tracker.num_emitted += 1 statsd.increment('%s.queued' % self.metric_prefix)
def make_request(self, action, params=None, path='/', verb='GET'): http_request = self.build_base_http_request(verb, path, None, params, {}, '', self.host) if action: http_request.params['Action'] = action if self.APIVersion: http_request.params['Version'] = self.APIVersion statsd.increment('boto.request', tags=["action:%s" % action or 'unknown']) return self._mexe(http_request)
def process_batch(story_list, config): db_client = client_factory.get_db_client() for story in story_list: if story['story_feed_id'] == int(config.get('NB_HN_FEED_ID')): hnurl = get_hn_url(story['story_content']) db_client.add_story(story['story_hash'], story['story_date'], hnurl, story['story_permalink']) statsd.increment('nb.stories_added') statsd.increment('nb.stories.batches_processed') db_client.close_connection()
def __setitem__(self, key, value): try: self.set_value(key, value, self._expiretime) statsd.increment('session.set.success') except Exception, e: log.exception({ 'name': 'beaker_extensions.nosql', 'description': traceback.format_exc() }) statsd.increment('session.set.redis_exception')
def hello(): statsd.increment('page.views.hello') name = request.forms.get('name') if name: db = Dbc() db.insert('insert into guests (name) values ("%s")' % name) txt = '''<b>Hello {{name}}</b>!<p> <hr><br> <a href="/">Back</a><br><a href="/list">List entries</a><p>''' return template(txt, name=name) else: return template('index')
def __getitem__(self, key): # make sure we don't try to pickle.loads(None) try: payload = self.db_conn.get(self._format_key(key)) statsd.increment('session.get.success') except Exception, e: log.exception({ 'name': 'beaker_extensions.nosql', 'description': traceback.format_exc() }) statsd.increment('session.get.redis_exception') return {}
def showCommunity(): #metric to count the web.page_views_community statsd.increment('web.page_views_community',tags = ["page:community"]) #metric to count the overall number of page views statsd.increment('web.page_views_total') #start timer start_time = time() print start_time #connection to the DB connection = MySQLdb.connect (host = "localhost", user = "******", passwd = "cacapipi", db = "bucketlist") #prepare a cursor object using cursor() method cursor = connection.cursor() #execute the SQL query using execute() method cursor.execute("select user_name, user_username, user_password from tbl_user ") #fetch all the rows from the query data = cursor.fetchall() #print the rows #THIS_DIR = os.path.dirname(os.path.abspath(__file__)) # Create the jinja2 environment # Notice the use of trim_blocks, which greatly helps control whitespace. #j2_env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True) #print j2_env.get_template('community.html').render(items=data) #env = Environment(loader=PackageLoader('app', 'template')) #template = env.get_template('community.html') #print template.render(items=data) for row in data: print row[0], row[1] cursor.close() #close the connection connection.close() #return timer duration = time() - start_time print duration statsd.histogram('database.query.time', duration, tags = ["page:community"]) statsd.gauge('test2',200) #exit the program sys.exit()
def showSignUp(): #metric to count the web.page_views_signup statsd.increment('web.page_views_signup',tags = ["page:signup"]) #metric to count the overall number of page views statsd.increment('web.page_views_total') #use of the agent check that samples a random value print(random.random()) return render_template('signup.html')
def main(args): if len(args) < 2: print usage elif args[0] == 'i': statsd.increment(args[1]) elif args[0] == 'd': statsd.decrement(args[1]) elif args[0] == 'g': statsd.gauge(args[1], float(args[2])) elif args[0] == 'e': api.Event.create(title=args[1], text=args[2], tags=args[3]) else: print usage
def incr(self, key, instance=None, tags=None, amount=1, sample_rate=1): if tags is None: tags = {} if self.tags: tags.update(self.tags) if instance: tags['instance'] = instance if tags: tags = [u'{}:{}'.format(*i) for i in tags.items()] statsd.increment( self._get_key(key), amount, sample_rate=sample_rate, tags=tags, )
def login(): #metric to count the web.page_views_login statsd.increment('web.page_views_login',tags = ["page:login"]) #metric to count the overall number of page views statsd.increment('web.page_views_total') error = None if request.method == 'POST': if request.form['username'] != 'admin' or request.form['password'] != 'admin': error = 'Invalid Credentials. Please try again.' else: return redirect(url_for('main')) return render_template('login.html', error=error)
async def stream_worker_task(self, worker_id: int) -> None: if self._redis_stream is None or self._redis_cache is None: raise RuntimeError("redis clients are not ready") # NOTE(sileht): This task must never fail, we don't want to write code to # reap/clean/respawn them stream_processor = StreamProcessor(self._redis_stream, self._redis_cache) stream_selector = StreamSelector(self._redis_stream, worker_id, self.worker_count) while not self._stopping.is_set(): try: stream_name = await stream_selector.next_stream() if stream_name: LOG.debug("worker %s take stream: %s", worker_id, stream_name) try: with statsd.timed("engine.stream.consume.time"): await stream_processor.consume(stream_name) finally: LOG.debug( "worker %s release stream: %s", worker_id, stream_name, ) else: LOG.debug("worker %s has nothing to do, sleeping a bit", worker_id) await self._sleep_or_stop() except asyncio.CancelledError: LOG.debug("worker %s killed", worker_id) return except aredis.exceptions.ConnectionError: statsd.increment("redis.client.connection.errors") LOG.warning("worker lost Redis connection", worker_id, exc_info=True) await self._sleep_or_stop() except Exception: LOG.error("worker %s fail, sleeping a bit", worker_id, exc_info=True) await self._sleep_or_stop() LOG.debug("worker %s exited", worker_id)
def dd_log_clans(self, message: discord.Message): """RACF specific. Log only clans.""" server = message.server server_id = server.id server_name = server.name clans = [ 'alpha', 'bravo', 'charlie', 'delta', 'echo', 'foxtrot', 'golf', 'hotel' ] for r in message.author.roles: if r.name.lower() in clans: statsd.increment('bot.msg.clan', tags=[ *self.tags, 'server_id:' + str(server_id), 'server_name:' + str(server_name), 'role:' + str(r.name) ])
def meter_event(event_type, data): tags = [f"event_type:{event_type}"] if "action" in data: tags.append(f"action:{data['action']}") if ( event_type == "pull_request" and data["action"] == "closed" and data["pull_request"]["merged"] ): if data["pull_request"]["merged_by"] and data["pull_request"]["merged_by"][ "login" ] in ["mergify[bot]", "mergify-test[bot]"]: tags.append("by_mergify") statsd.increment(f"github.events", tags=tags)
def on_message_create(self, event): tags = { 'channel_id': event.channel_id, 'author_id': event.author.id, } if event.guild: tags['guild_id'] = event.guild.id if event.author.id == self.client.state.me.id: if event.nonce in self.nonces: statsd.timing('latency.message_send', time.time() - self.nonces[event.nonce], tags=to_tags(tags)) del self.nonces[event.nonce] statsd.increment('guild.messages.create', tags=to_tags(tags))
def add_comment_counts(): logger.info('Add comment counts to stories in DB') db_client = client_factory.get_db_client() rows = db_client.list_stories_without_comment_count() logger.debug('Found %s rows', len(rows)) nb_client = client_factory.get_newsblur_client() nb_client.login() for row in rows: # url = row.hnurl url = row[0] count = nb_client.get_comment_count(url) logger.debug("Count for %s is %s", url, count) if count is not None: db_client.add_comment_count(url, count) statsd.increment('nb.add_comment_counts.comment_counts_added') logger.info('Finished adding comment counts')
def onInput(self): respawn = True try: msg = self.collector.readline() if msg: yield self.gate.put(msg) statsd.increment('%s.queued' % self.metric_prefix, tags=[self.sender_tag]) else: self.end_of_input.set() respawn = False except Exception as err: self.logger.exception(err) self.input_error.set() respawn = False finally: if respawn: self.loop.spawn_callback(self.onInput)
def submission_id_get(request, submission_id): statsd.increment('core.hits.get.submission_id') submission = get_object_or_404(Submission, pk=submission_id) assignment_type = get_assignment_type(submission.assignment) if assignment_type == HWCentralAssignmentType.STUDENT: return SubmissionIdGetStudent(request, submission).handle() elif assignment_type == HWCentralAssignmentType.INACTIVE: raise InvalidStateError("Submission %s for inactive assignment %s" % (submission, submission.assignment)) elif assignment_type == HWCentralAssignmentType.UNCORRECTED: return SubmissionIdGetUncorrected(request, submission).handle() elif assignment_type == HWCentralAssignmentType.CORRECTED: return SubmissionIdGetCorrected(request, submission).handle() else: raise InvalidHWCentralAssignmentTypeError(assignment_type)
def assignment_id_get(request, assignment_id): statsd.increment('core.hits.get.assignment_id') assignment = get_object_or_404(Assignment, pk=assignment_id) assignment_type = get_assignment_type(assignment) if assignment_type == HWCentralAssignmentType.STUDENT: # Student assignments should not be accessible from assignment_id raise Http404 elif assignment_type == HWCentralAssignmentType.INACTIVE: return AssignmentIdGetInactive(request, assignment).handle() elif assignment_type == HWCentralAssignmentType.UNCORRECTED: return AssignmentIdGetUncorrected(request, assignment).handle() elif assignment_type == HWCentralAssignmentType.CORRECTED: raise Http404 # Only submissions are viewed after an assignment has been corrected else: raise InvalidHWCentralAssignmentTypeError(assignment_type)
def update_comment_counts(): logger.info('Update comment counts to stories in DB') db_client = client_factory.get_db_client() rows = db_client.list_comment_count_update_candidates() logger.debug('Found %s candidates for updating comment count', len(rows)) nb_client = client_factory.get_newsblur_client() nb_client.login() for row in rows: url = row[0] count = nb_client.get_comment_count(url) logger.debug("Count for %s is %s", url, count) if count is not None: db_client.add_comment_count(url, count) statsd.increment('nb.add_comment_counts.comment_counts_added') logger.info('Finished updating comment counts')
async def beta(self, ctx): """Ooooo new stuff!""" data = discord.Embed(title="__**YouTube Beta**__", colour=discord.Colour(value=11735575)) data.add_field(name="Owo what's this?", value="This is a beta version of the bot. New stuff has been added and should be stable, but may break.") data.add_field(name="Oh, cool! What is new?", value="In this version, I've added some new commands (yt beta) and upgraded to [discord.py 1.0.0](http://discordpy.readthedocs.io/en/rewrite/)") data.add_field(name="What if it breaks?", value="I'm always happy to give support, and look for feedback. You can find me on the [support server](https://discord.gg/yp8WpMh)!") data.set_footer(text="Made with \U00002665 by Francis#6565.") try: await ctx.send(embed=data) statsd.increment('bot.commands.run') except discord.HTTPException: logger.exception("Missing embed links perms") statsd.increment('bot.commands.errored', 1) await ctx.send("Looks like the bot doesn't have embed links perms... It kinda needs these, so I'd suggest adding them!")
def _reattach_the_job(dbitem): """ :param dbitem: :type dbitem: Report | Jobs """ log = get_logger() assert isinstance(dbitem, Report) or isinstance(dbitem, Jobs) # if not dbitem.aborting: try: p = Process(target=dbitem.waiter, args=(s, )) p.start() proc_lst.update({ dbitem.id: ProcItem(p, dbitem) }) log.debug('%s%s : reattaching job.waiter in PID%s' % (dbitem.short_id + (p.pid,))) if statsd: statsd.increment('python.breeze.running_jobs') except Exception as e: log.exception('%s%s : unhandled exception : %s' % (dbitem.short_id + (e,))) return False
def DataDogCmd(cmd): args = shlex.split(cmd) print args if len(args) < 2: print usage return False elif args[0] == 'i': statsd.increment(args[1]) elif args[0] == 'd': statsd.decrement(args[1]) elif args[0] == 'g': statsd.gauge(args[1], float(args[2])) elif args[0] == 'e': api.Event.create(title=args[1], text=args[2], tags=args[3]) else: print usage return False return True
def make_locations_table(header, locations_data): locations_table = collections.OrderedDict() dictionaries.append(locations_table) header["locations_offset"] = offset_count() # Offset for the locations table. locations_number = 0 for loc_coord in list(locations_data.values()): locations_table["location_%s_offset" % locations_number] = u32(0) # Offset for the locations. locations_table["location_%s_coordinates" % locations_number] = loc_coord[0] # Coordinates of the locations. locations_number += 1 header["locations_number"] = u32(locations_number) # Number of entries for the locations. if config["production"]: statsd.increment("news.total_locations", locations_number) return locations_table
def test_batched_buffer_autoflush(self): fake_socket = FakeSocket() bytes_sent = 0 with DogStatsd(telemetry_min_flush_interval=0) as statsd: single_metric = 'mycounter:1|c' assert_equal(statsd._max_payload_size, UDP_OPTIMAL_PAYLOAD_LENGTH) metrics_per_packet = statsd._max_payload_size // (len(single_metric) + 1) statsd.socket = fake_socket for i in range(metrics_per_packet + 1): statsd.increment('mycounter') payload = '\n'.join([single_metric for i in range(metrics_per_packet)]) telemetry = telemetry_metrics(metrics=metrics_per_packet+1, bytes_sent=len(payload)) bytes_sent += len(payload) + len(telemetry) assert_equal(payload, fake_socket.recv()) assert_equal(telemetry, fake_socket.recv()) assert_equal(single_metric, fake_socket.recv()) telemetry = telemetry_metrics(metrics=0, packets_sent=2, bytes_sent=len(single_metric) + len(telemetry)) assert_equal(telemetry, fake_socket.recv())
def youtube_request(self, groups, e): statsd.increment('as2.new_request') link, v, mode = groups channel = e.target user_quarry = execute(self.connection, self.cursor, "SELECT id FROM users WHERE username = %s", [channel.replace("#", "")]) user_result = user_quarry.fetchone() if mode == None or mode == "": mode = 'not specified' yt = youtube_api(v) title = yt["items"][0]["snippet"]["title"] duration = yt["items"][0]["contentDetails"]["duration"] request_quarry = execute( self.connection, self.cursor, "INSERT INTO requests (user_id, requested_by, song_id, title, duration, mode, action) VALUES(%s, %s, %s, %s, %s, %s, %s)", [user_result["id"], e.source.nick, v, title, duration, mode, 0]) msg = '{} is added.'.format(title) self.connection.privmsg(channel, msg)
def onInput(self): respawn = True try: try: msg = self.collector.get_nowait() except QueueEmpty: msg = yield self.collector.get() self.gate.put_nowait(msg) except QueueFull: yield self.gate.put(msg) except Exception as err: self.logger.exception(err) self.input_error.set() respawn = False finally: statsd.increment('%s.queued' % self.metric_prefix, tags=[self.sender_tag]) if respawn: self.loop.spawn_callback(self.onInput)
def make_articles_table(mode, locations_data, header, data): articles_table = collections.OrderedDict() dictionaries.append(articles_table) p_number = 0 numbers = 0 header["articles_offset"] = offset_count() for keys, article in list(data.items()): numbers += 1 articles_table["article_%s_number" % numbers] = u32(numbers) # Number for the article. articles_table["source_%s_number" % numbers] = u32(0) # Number for the source. articles_table["location_%s_number" % numbers] = u32(4294967295) # Number for the location. for locations in list(locations_data.keys()): for article_name in locations_data[locations][2]: if keys == article_name: articles_table["location_%s_number" % numbers] = u32(list(locations_data.keys()).index(locations)) # Number for the location. if article[4] is not None: articles_table["term_timestamp_%s" % numbers] = get_timestamp(1) # Timestamp for the term. articles_table["picture_%s_number" % numbers] = u32(p_number) # Number for the picture. p_number += 1 else: articles_table["term_timestamp_%s" % numbers] = u32(0) # Timestamp for the term. articles_table["picture_%s_number" % numbers] = u32(4294967295) # Number for the picture. articles_table["published_time_%s" % numbers] = article[0] # Published time. articles_table["updated_time_%s" % numbers] = get_timestamp(1) # Updated time. articles_table["headline_%s_size" % numbers] = u32(len(article[3].replace(b'\n', b''))) # Size of the headline. articles_table["headline_%s_offset" % numbers] = u32(0) # Offset for the headline. articles_table["article_%s_size" % numbers] = u32(len(article[2])) # Size of the article. articles_table["article_%s_offset" % numbers] = u32(0) # Offset for the article. header["articles_number"] = u32(numbers) # Number of entries for the articles table. if config["production"]: statsd.increment("news.total_articles", numbers) statsd.increment("news.articles." + mode, numbers) return articles_table
def segment2datadog(source): # check signature signature = request.headers['x-signature'] digest = hmac.new(SEGMENT_SHARED_SECRET.encode(), msg=request.data, digestmod=hashlib.sha1).hexdigest() if digest != signature: abort(403, 'Signature not valid.') if not source: abort(404, 'Source parameter not present.') content = request.get_json(silent=True) # increment event counter in datadog if content['type'] == 'track': statsd.increment('segment.event', tags=[ 'source:' + source, 'event:' + '-'.join(content['event'].split()), 'type:' + content['type'] ]) return jsonify({'source': source, 'data': content})
def prune_starred(): db_client = client_factory.get_db_client() config = db_client.read_config() rows = db_client.list_stories_with_comments_fewer_than(config.get('COMMENTS_THRESHOLD')) nb_client = client_factory.get_newsblur_client() nb_client.login() logger.info('Remove all stars on stories with fewer than %s comments', config.get('COMMENTS_THRESHOLD')) removed = 0 candidates = 0 for row in rows: candidates += 1 if nb_client.remove_star_with_backoff(row[0]): db_client.unstar(row[0]) statsd.increment('nb.prune.stars_removed') removed += 1 logger.info('Successfully removed %s out of %s candidate stars', removed, candidates) db_client.close_connection() logger.info('Finished pruning stars')
def request_stop(metrics, response): metrics['Request-Timer'].stop() metrics['Request-Metric-Millis'] = metrics['Request-Timer'].ms statsd.incr(metrics['Request-Metric-ID']) statsd.incr("{}.{}".format(metrics['Request-Metric-ID'], response.status_code)) if DATADOG_METRICS: datadog_statsd.increment(metrics['Request-Metric-ID']) datadog_statsd.increment("{}.{}".format(metrics['Request-Metric-ID'], response.status_code)) datadog_statsd.timing(metrics['Request-Metric-ID'], metrics['Request-Timer'].ms) metrics.pop('Request-Timer') for name, value in metrics.items(): response._headers[name] = (name, str(value))
def test_batched_buffer_autoflush(self): fake_socket = FakeSocket() bytes_sent = 0 with DogStatsd() as statsd: statsd.socket = fake_socket for i in range(51): statsd.increment('mycounter') payload = '\n'.join(['mycounter:1|c' for i in range(50)]) telemetry = telemetry_metrics(metrics=50) bytes_sent += len(payload) + len(telemetry) assert_equal_telemetry(payload, fake_socket.recv(), telemetry=telemetry) assert_equal_telemetry('mycounter:1|c', fake_socket.recv(), telemetry=telemetry_metrics( packets_sent=1, bytes_sent=bytes_sent))
def run_twitter_searches(since_id: int, queries: List[str], job_mode: str) -> int: if not since_id: since_id = get_since_id_from_file() api = build_tweepy_api() total_returned_tweets = 0 processed_id_tweets = set() max_processed_id = 0 max_processed_time_stamp = 0 for query in queries: logger.info(f"Querying: {query}") cursor = query_twitter(api, query, since_id) query_submissions = [] for resp in cursor: statsd.increment(f"twitter.read_post", 1, tags=[f"job_mode:{job_mode}", f"query:{query}"]) total_returned_tweets += 1 tweet = resp._json id_tweet = tweet["id"] if id_tweet > max_processed_id: max_processed_id = id_tweet max_processed_time_stamp = tweet["created_at"] # submissions is a list so we can handle single tweet with multiple media objects submissions, processed_id_tweets = convert_tweet( tweet, processed_id_tweets) if not submissions: continue query_submissions.extend(submissions) statsd.increment(f"twitter.query_processed", 1, tags=[f"job_mode:{job_mode}", f"query:{query}"]) if query_submissions: bulk_upload_submissions(query_submissions, TWITTER_LARAVEL_API_KEY, READER_MODE) logger.info(f"total_returned_tweets {total_returned_tweets}") if max_processed_id > 0: log_last_processed_id(max_processed_id, max_processed_time_stamp) return max_processed_id
async def info(self, ctx): """Information about the bot""" msg = await ctx.send('Getting statistics...') shards = self.bot.shard_count shard_id = ctx.message.guild.shard_id guilds = len(list(self.bot.guilds)) users = str(len([m for m in set(self.bot.get_all_members())])) channels = str(len([m for m in set(self.bot.get_all_channels())])) # await msg.edit("Getting uptime...") up = abs(self.bot.uptime - int(time.perf_counter())) up = str(datetime.timedelta(seconds=up)) data = discord.Embed(title="__**Information**__", colour=discord.Colour(value=11735575)) data.add_field(name="Version", value="2.5-beta", inline=False) data.add_field(name="Shard ID", value=ctx.message.guild.shard_id) data.add_field(name="Total Shards", value=shards) data.add_field(name="Total Servers", value=guilds) # data.add_field(name="Servers (total)", value=total_guilds) data.add_field(name="Users", value=users) data.add_field(name="Channels", value=channels) data.add_field(name="Uptime", value="{}".format(up)) data.add_field( name="Support Development", value= "Donate on [Patreon](https://www.patreon.com/franc_ist) or [PayPal](https://paypal.me/MLAutomod/5)" ) data.set_footer( text= "Made with \U00002665 by Francis#6565. Support server: https://discord.gg/yp8WpMh" ) try: await msg.edit(content=None, embed=data) statsd.increment('bot.commands.run', 1) except discord.HTTPException: logger.exception("Missing embed links perms") statsd.increment('bot.commands.errored', 1) await ctx.send( "Looks like the bot doesn't have embed links perms... It kinda needs these, so I'd suggest adding them!" )
def secure_static_get(request, b64_string): statsd.increment('core.hits.get.secure_static') # first we decode the signed id id_signed = urlsafe_base64_decode(b64_string) # then we unsign the id - make sure the url is not tampered try: id_unsigned = SIGNER.unsign(id_signed) except BadSignature: raise Http404 # validation username = id_unsigned.split(ENCODING_SEPERATOR)[0] if request.user.username != username: raise Http404 # validation passed - send request to static resource server and relay the response resource_url = id_unsigned[len(username) + 1:] return HttpResponse(cabinet_api.get_static_content(resource_url), content_type='image/jpeg')
def _reattach_the_job(dbitem): """ :param dbitem: Runnable :type dbitem: Report | Jobs """ assert isinstance(dbitem, Report) or isinstance(dbitem, Jobs) try: if not dbitem.is_done: # p = Thread(target=dbitem.waiter, args=(s, )) p = Thread(target=dbitem.compute_if.busy_waiting, args=(False, )) p.start() # dbitem.waiter(s) proc_lst.update({ dbitem.id: ProcItem(p, dbitem) }) dbitem.log.debug('reattaching job.waiter in tID%s' % p.ident) if statsd: statsd.increment('python.breeze.running_jobs') except Exception as e: dbitem.log.exception('unhandled exception : %s' % e) return False
async def on_command_error(ctx, error): statsd.increment('bot.commands.errored', 1) if isinstance(error, commands.MissingRequiredArgument): await send_cmd_help(ctx) elif isinstance(error, commands.BadArgument): await send_cmd_help(ctx) elif isinstance(error, commands.CommandOnCooldown): await ctx.send("Woah there, {}. That command is on cooldown!".format(ctx.message.author.mention)) elif isinstance(error, commands.CommandInvokeError): logger.exception("Exception in command '{}'".format( ctx.command.qualified_name), exc_info=error.original) oneliner = "Error in command '{}' - {}: {}".format( ctx.command.qualified_name, type(error.original).__name__, str(error.original)) await ctx.send(oneliner) elif isinstance(error, commands.CommandNotFound): pass elif isinstance(error, commands.CheckFailure): pass else: logger.exception(type(error).__name__, exc_info=error)
def _check_rate_limit(response: httpx.Response) -> None: remaining = response.headers.get("X-RateLimit-Remaining") if remaining is None: return remaining = int(remaining) if remaining < RATE_LIMIT_THRESHOLD: reset = response.headers.get("X-RateLimit-Reset") if reset is None: delta = datetime.timedelta(minutes=5) else: delta = (datetime.datetime.utcfromtimestamp(int(reset)) - datetime.datetime.utcnow()) if response.url is not None: statsd.increment( "http.client.rate_limited", tags=[f"hostname:{response.url.host}"], ) raise exceptions.RateLimited(delta, remaining)
def process_message(message): trades_box.values = [datetime.datetime.now()] bids_box.values = ["QTY PRICE Broker Customer"] asks_box.values = ["QTY PRICE Broker Customer"] F.display() trades, resulting_orders= order_book.process_order(message, False, False) #print order_book #print resulting_orders #print trades statsd.increment('messageRate', tags=['exchange:aex','application:matching_engine','symbol:'+os.environ['symbol'], 'mode:'+os.environ['mode']]) book = {} book['bids'] = order_book.bids book['asks'] = order_book.asks response = client.put_record(DeliveryStreamName='matching-engine-' + os.environ['symbol'], Record={'Data':pickle.dumps(book)}) tick_group = ('239.254.254.2', 5000) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(0.2) ttl = struct.pack('b', 1) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) for trade in trades: sock.sendto(json.dumps(trade), tick_group)
def _reattach_the_job(dbitem): """ :param dbitem: Runnable :type dbitem: Report | Jobs """ assert isinstance(dbitem, Report) or isinstance(dbitem, Jobs) try: if not dbitem.is_done: # p = Thread(target=dbitem.waiter, args=(s, )) p = Thread(target=dbitem.compute_if.busy_waiting, args=(False,)) p.start() # dbitem.waiter(s) proc_lst.update({dbitem.id: ProcItem(p, dbitem)}) dbitem.log.debug("reattaching job.waiter in tID%s" % p.ident) if statsd: statsd.increment("python.breeze.running_jobs") except Exception as e: dbitem.log.exception("unhandled exception : %s" % e) return False
def _reattach_the_job(dbitem): """ :param dbitem: :type dbitem: Report | Jobs """ log = get_logger() assert isinstance(dbitem, Report) or isinstance(dbitem, Jobs) # if not dbitem.aborting: try: p = Process(target=dbitem.waiter, args=(s, )) p.start() proc_lst.update({dbitem.id: ProcItem(p, dbitem)}) log.debug('%s%s : reattaching job.waiter in PID%s' % (dbitem.short_id + (p.pid, ))) if statsd: statsd.increment('python.breeze.running_jobs') except Exception as e: log.exception('%s%s : unhandled exception : %s' % (dbitem.short_id + (e, ))) return False
def process_response(self, request, response): if not hasattr(request, self.DATADOG_TIMING_ATTRIBUTE): return response request_time = time.time() - getattr(request, self.DATADOG_TIMING_ATTRIBUTE) timing_metric = '{0}.request_time'.format(self.app_name) count_metric = '{0}.no_of_requests_metric'.format(self.app_name) success_metric = '{0}.no_of_successful_requests_metric'.format( self.app_name) unsuccess_metric = '{0}.no_of_unsuccessful_requests_metric'.format( self.app_name) tags = self._get_metric_tags(request) if 200 <= response.status_code < 400: statsd.increment(success_metric, tags=tags) else: statsd.increment(unsuccess_metric, tags=tags) statsd.increment(count_metric, tags=tags) statsd.histogram(timing_metric, request_time, tags=tags) return response
def _spawn_the_job(dbitem): """ :param dbitem: Runnable :type dbitem: Report | Jobs """ assert isinstance(dbitem, Report) or isinstance(dbitem, Jobs) if not dbitem.aborting: try: p = Thread(target=dbitem.compute_if.send_job) p.start() # dbitem.run() proc_lst.update({ dbitem.id: ProcItem(p, dbitem) }) dbitem.log.debug('spawning job.run in tID%s' % p.ident) if statsd: statsd.increment('python.breeze.running_jobs') except Exception as e: dbitem.log.exception('unhandled exception : %s' % e) return False else: dbitem.breeze_stat = JobStat.ABORTED
def request_stop(metrics, response): metrics['Request-Timer'].stop() metrics['Request-Metric-Millis'] = metrics['Request-Timer'].ms statsd.incr(metrics['Request-Metric-ID']) statsd.incr(f"{metrics['Request-Metric-ID']}.{response.status_code}") if DATADOG_METRICS: datadog_statsd.increment(metrics['Request-Metric-ID'], tags=DATADOG_TAGS) datadog_statsd.increment( f"{metrics['Request-Metric-ID']}.{response.status_code}", tags=DATADOG_TAGS) datadog_statsd.timing(metrics['Request-Metric-ID'], metrics['Request-Timer'].ms, tags=DATADOG_TAGS) metrics.pop('Request-Timer') for name, value in metrics.items(): response._headers[name] = (name, str(value))
def bulk_upload_submissions(raw_submissions: List[RawSubmission], api_key: str, job_mode: str) -> None: start_time = time.time() json_submissions = [] for raw_submission in raw_submissions: json_submissions.append(raw_submission.to_dict()) headers = {"Content-Type": "application/json", "Api-Token": api_key} data_dump = dumps({"data": json_submissions}) resp = requests.post(url=f"{LARAVEL_HOST}/{LARAVEL_ENDPOINT}", data=data_dump, headers=headers) end_time = time.time() statsd.gauge(f"laravel.{LARAVEL_ENDPOINT}.duration", end_time - start_time, tags=[f"job_mode:{job_mode}"]) statsd.gauge( f"laravel.{LARAVEL_ENDPOINT}.request_size", len(pickle.dumps(data_dump)), tags=[f"job_mode:{job_mode}"] ) statsd.increment(f"laravel.{LARAVEL_ENDPOINT}.success", 1, tags=[f"job_mode:{job_mode}"]) statsd.increment(f"laravel.{LARAVEL_ENDPOINT}.num_rows", len(raw_submissions), tags=[f"job_mode:{job_mode}"]) if resp.status_code != 200: raise ValueError(f"Failed to upload to Laravel with resp: {resp.text}")
def _spawn_the_job(dbitem): """ :param dbitem: Runnable :type dbitem: Report | Jobs """ assert isinstance(dbitem, Report) or isinstance(dbitem, Jobs) if not dbitem.aborting: try: p = Thread(target=dbitem.compute_if.send_job) p.start() # dbitem.run() proc_lst.update({dbitem.id: ProcItem(p, dbitem)}) dbitem.log.debug("spawning job.run in tID%s" % p.ident) if statsd: statsd.increment("python.breeze.running_jobs") except Exception as e: dbitem.log.exception("unhandled exception : %s" % e) return False else: dbitem.breeze_stat = JobStat.ABORTED
def request(self, method, url, *args, **kwargs): reply = None try: reply = super().request(method, url, *args, **kwargs) except http.HTTPClientSideError as e: if e.status_code == 403: _check_rate_limit(e.response) raise finally: if reply is None: status_code = "error" else: status_code = reply.status_code statsd.increment( "http.client.requests", tags=[ f"hostname:{self.base_url.host}", f"status_code:{status_code}" ], ) self._requests.append((method, url)) return reply
def _spawn_the_job(dbitem): """ :param dbitem: :type dbitem: Report | Jobs """ log = get_logger() assert isinstance(dbitem, Report) or isinstance(dbitem, Jobs) if not dbitem.aborting: try: p = Process(target=dbitem.run) p.start() # proc_lst.update({ dbitem.id: (p, dbitem) }) proc_lst.update({ dbitem.id: ProcItem(p, dbitem) }) log.debug('%s%s : spawning job.run in PID%s' % (dbitem.short_id + (p.pid,))) if statsd: statsd.increment('python.breeze.running_jobs') except Exception as e: log.exception('%s%s : unhandled exception : %s' % (dbitem.short_id + (e,))) return False else: # abort_sub(ProcItem(None, dbitem), s) dbitem.breeze_stat = JobStat.ABORTED
def process_message(message): #os.system('clear') trades, resulting_orders= order_book.process_order(message, False, False) #print order_book #print resulting_orders #print trades statsd.increment('messageRate', tags=['exchange:aex','application:matching_engine','symbol:'+os.environ['symbol'], 'mode:'+os.environ['mode']]) book = {} book['bids'] = order_book.bids book['asks'] = order_book.asks response = client.put_record(DeliveryStreamName='matching-engine-' + os.environ['symbol'], Record={'Data':pickle.dumps(book)}) book = {} book['symbol'] = os.environ['symbol'] book['data'] = order_book book['trades'] = trades #response2 = kin.put_record(StreamName='aex-matching-engine', Data=pickle.dumps(book), PartitionKey='YHOO') tick_group = ('239.254.254.2', 5000) sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock.settimeout(0.2) ttl = struct.pack('b', 1) sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl) for trade in trades: sock.sendto(json.dumps(trade), tick_group)