def process_response(self, request, response):
        if not hasattr(request, self.DATADOG_TIMING_ATTRIBUTE):
            return response

        request_time = time.time() - getattr(request,
                                             self.DATADOG_TIMING_ATTRIBUTE)

        timing_metric = '{0}.request_time'.format(self.app_name)
        count_metric = '{0}.no_of_requests_metric'.format(self.app_name)
        success_metric = '{0}.no_of_successful_requests_metric'.format(
            self.app_name)
        unsuccess_metric = '{0}.no_of_unsuccessful_requests_metric'.format(
            self.app_name)

        tags = self._get_metric_tags(request)

        if 200 <= response.status_code < 400:
            statsd.increment(success_metric, tags=tags)
        else:
            statsd.increment(unsuccess_metric, tags=tags)

        statsd.increment(count_metric, tags=tags)
        statsd.histogram(timing_metric, request_time, tags=tags)

        return response
Exemplo n.º 2
0
    async def _extract_pulls_from_stream(self, stream_name):
        messages = await self.redis.xrange(stream_name, count=config.STREAM_MAX_BATCH)
        LOG.debug("read stream", stream_name=stream_name, messages_count=len(messages))
        statsd.histogram("engine.streams.size", len(messages))

        # Groups stream by pull request
        pulls = collections.OrderedDict()
        for message_id, message in messages:
            data = msgpack.unpackb(message[b"event"], raw=False)
            owner = data["owner"]
            repo = data["repo"]
            source = data["source"]

            if data["pull_number"] is not None:
                key = (owner, repo, data["pull_number"])
                group = pulls.setdefault(key, ([], []))
                group[0].append(message_id)
                group[1].append(source)
            else:
                logger = daiquiri.getLogger(
                    __name__, gh_repo=repo, gh_owner=owner, source=source
                )
                logger.debug("unpacking event")
                try:
                    converted_messages = await self._convert_event_to_messages(
                        stream_name, owner, repo, source
                    )
                except IgnoredException:
                    converted_messages = []
                    logger.debug("ignored error", exc_info=True)
                except StreamRetry:
                    raise
                except StreamUnused:
                    raise
                except Exception:
                    # Ignore it, it will retried later
                    logger.error("failed to process incomplete event", exc_info=True)
                    continue

                logger.debug("event unpacked into %s messages", len(converted_messages))
                messages.extend(converted_messages)
                deleted = await self.redis.xdel(stream_name, message_id)
                if deleted != 1:
                    # FIXME(sileht): During shutdown, heroku may have already started
                    # another worker that have already take the lead of this stream_name
                    # This can create duplicate events in the streams but that should not
                    # be a big deal as the engine will not been ran by the worker that's
                    # shutdowning.
                    contents = await self.redis.xrange(
                        stream_name, start=message_id, end=message_id
                    )
                    if contents:
                        logger.error(
                            "message `%s` have not been deleted has expected, "
                            "(result: %s), content of current message id: %s",
                            message_id,
                            deleted,
                            contents,
                        )
        return pulls
Exemplo n.º 3
0
def event_handler(name, **kwargs):
    if name == 'request_finished':
        statsd.increment('mod_wsgi.request.count')

        application_time = kwargs.get('application_time')

        statsd.histogram('mod_wsgi.request.application_time', application_time)

        statsd.gauge('mod_wsgi.request.input_reads', kwargs.get('input_reads'))
        statsd.gauge('mod_wsgi.request.input_length', kwargs.get('input_length'))
        statsd.gauge('mod_wsgi.request.input_time', kwargs.get('input_time'))

        statsd.gauge('mod_wsgi.request.output_writes', kwargs.get('output_writes'))
        statsd.gauge('mod_wsgi.request.output_length', kwargs.get('output_length'))
        statsd.gauge('mod_wsgi.request.output_time', kwargs.get('output_time'))

        cpu_user_time = kwargs.get('cpu_user_time')
        cpu_system_time = kwargs.get('cpu_system_time')

        statsd.gauge('mod_wsgi.request.cpu_user_time', cpu_user_time)
        statsd.gauge('mod_wsgi.request.cpu_system_time', cpu_system_time)

        if cpu_user_time is not None and application_time:
            cpu_burn = (cpu_user_time + cpu_system_time) / application_time
            statsd.gauge('mod_wsgi.request.cpu_burn', cpu_burn)
Exemplo n.º 4
0
 def mates_metrics(sender: AlquilerMate, instance: AlquilerMate,
                   created: bool, **kwargs):
     if created:
         statsd.increment("ceitba.mates.count")
     elif instance.time_returned is not None:
         time = instance.time_returned - instance.time_taken
         statsd.histogram("ceitba.mates.time", time)
Exemplo n.º 5
0
    async def _extract_pulls_from_stream(self, stream_name, installation):
        LOG.debug("read stream", stream_name=stream_name)
        messages = await self.redis.xrange(stream_name,
                                           count=config.STREAM_MAX_BATCH)
        statsd.histogram("engine.streams.size", len(messages))

        # Groups stream by pull request
        pulls = collections.OrderedDict()
        for message_id, message in messages:
            data = msgpack.unpackb(message[b"event"], raw=False)
            owner = data["owner"]
            repo = data["repo"]
            source = data["source"]
            if data["pull_number"] is not None:
                key = (owner, repo, data["pull_number"])
                group = pulls.setdefault(key, ([], []))
                group[0].append(message_id)
                group[1].append(source)
            else:
                logger = logs.getLogger(__name__, gh_repo=repo, gh_owner=owner)
                try:
                    messages.extend(await self._convert_event_to_messages(
                        stream_name, installation, owner, repo, source))
                except IgnoredException:
                    logger.debug("ignored error", exc_info=True)
                except StreamRetry:
                    raise
                except Exception:
                    # Ignore it, it will retried later
                    logger.error("failed to process incomplete event",
                                 exc_info=True)
                    continue

                await self.redis.xdel(stream_name, message_id)
        return pulls
def event_handler(name, **kwargs):
    if name == 'request_finished':
        statsd.increment('mod_wsgi.request.count')

        application_time = kwargs.get('application_time')

        statsd.histogram('mod_wsgi.request.application_time', application_time)

        statsd.gauge('mod_wsgi.request.input_reads', kwargs.get('input_reads'))
        statsd.gauge('mod_wsgi.request.input_length',
                     kwargs.get('input_length'))
        statsd.gauge('mod_wsgi.request.input_time', kwargs.get('input_time'))

        statsd.gauge('mod_wsgi.request.output_writes',
                     kwargs.get('output_writes'))
        statsd.gauge('mod_wsgi.request.output_length',
                     kwargs.get('output_length'))
        statsd.gauge('mod_wsgi.request.output_time', kwargs.get('output_time'))

        cpu_user_time = kwargs.get('cpu_user_time')
        cpu_system_time = kwargs.get('cpu_system_time')

        statsd.gauge('mod_wsgi.request.cpu_user_time', cpu_user_time)
        statsd.gauge('mod_wsgi.request.cpu_system_time', cpu_system_time)

        if cpu_user_time is not None and application_time:
            cpu_burn = (cpu_user_time + cpu_system_time) / application_time
            statsd.gauge('mod_wsgi.request.cpu_burn', cpu_burn)
Exemplo n.º 7
0
 def run(self):
     datadog_start_time = time.time()
     job_cfg = self.get_appserver_job_config()
     logging.info('Running rollup job...')
     appserver_jobsubmit_url = CmvLib.get_appserver_job_submit_url(
         self.appserver_host_port, self.appserver_app_name,
         self.appserver_app_type)
     rslt_json = CmvLib.submit_config_to_appserver(job_cfg,
                                                   appserver_jobsubmit_url)
     job_id = rslt_json['payload']['jobId']
     appserver_jobstatus_url = CmvLib.get_appserver_job_status_url(
         self.appserver_host_port, self.appserver_app_name, job_id)
     appserver_resp = CmvLib.poll_appserver_job_status(
         appserver_jobstatus_url)
     if appserver_resp['payload']['status'] != 'Finished':
         logging.error(
             "AppServer responded with an error. AppServer Response: %s",
             appserver_resp['payload']['result'])
         raise Exception('Error in Appserver Response.')
     else:
         logging.info("Rollup job completed successfully.")
     self.output().touch()
     statsd.histogram(self.metric_name,
                      time.time() - datadog_start_time,
                      tags=self.tag_name)
Exemplo n.º 8
0
    async def _consume_pulls(self, stream_name, pulls):
        LOG.debug("stream contains %d pulls",
                  len(pulls),
                  stream_name=stream_name)
        for (owner, repo, pull_number), (message_ids,
                                         sources) in pulls.items():

            statsd.histogram("engine.streams.batch-size", len(sources))
            for source in sources:
                if "timestamp" in source:
                    statsd.histogram(
                        "engine.streams.events.latency",
                        (datetime.datetime.utcnow() -
                         datetime.datetime.fromisoformat(
                             source["timestamp"])).total_seconds(),
                    )

            logger = daiquiri.getLogger(__name__,
                                        gh_repo=repo,
                                        gh_owner=owner,
                                        gh_pull=pull_number)

            try:
                logger.debug("engine start with %s sources", len(sources))
                start = time.monotonic()
                await self._run_engine_and_translate_exception_to_retries(
                    stream_name, owner, repo, pull_number, sources)
                await self.redis.execute_command("XDEL", stream_name,
                                                 *message_ids)
                end = time.monotonic()
                logger.debug("engine finished in %s sec", end - start)
            except IgnoredException:
                await self.redis.execute_command("XDEL", stream_name,
                                                 *message_ids)
                logger.debug("failed to process pull request, ignoring",
                             exc_info=True)
            except MaxPullRetry as e:
                await self.redis.execute_command("XDEL", stream_name,
                                                 *message_ids)
                logger.error(
                    "failed to process pull request, abandoning",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except PullRetry as e:
                logger.info(
                    "failed to process pull request, retrying",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except StreamRetry:
                raise
            except StreamUnused:
                raise
            except vcr_errors_CannotOverwriteExistingCassetteException:
                raise
            except Exception:
                # Ignore it, it will retried later
                logger.error("failed to process pull request", exc_info=True)
Exemplo n.º 9
0
 def GET(self):
     start = time.time()
     delay=random.uniform(0.4,0.9)
     sleep(delay)
     statsd.increment('web.get.count', tags = ["support","page:page2"])
     duration = time.time() - start
     statsd.histogram('web.get.latency', duration, tags = ["support","page:page2"])
     return "Messages Page"
Exemplo n.º 10
0
def showFriends():

    #metric to count the web.page_views_friends
    statsd.increment('web.page_views_friends', tags=["page:friends"])

    #metric to count the overall number of page views
    statsd.increment('web.page_views_total')

    #start timer
    start_time = time()
    print start_time
    #connection to the DB
    connection = MySQLdb.connect(host="localhost",
                                 user="******",
                                 passwd="cacapipi",
                                 db="bucketlist")

    #prepare a cursor object using cursor() method
    cursor = connection.cursor()

    #execute the SQL query using execute() method
    cursor.execute(
        "select user_name, user_username, user_password from tbl_user_friends "
    )

    #fetch all the rows from the query
    data = cursor.fetchall()

    #print the rows

    #THIS_DIR = os.path.dirname(os.path.abspath(__file__))
    # Create the jinja2 environment
    # Notice the use of trim_blocks, which greatly helps control whitespace.
    #j2_env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True)
    #print j2_env.get_template('community.html').render(items=data)

    #env = Environment(loader=PackageLoader('app', 'template'))
    #template = env.get_template('community.html')
    #print template.render(items=data)

    for row in data:
        print row[0], row[1]

    cursor.close()

    #close the connection
    connection.close()

    #return timer
    duration = time() - start_time
    print duration
    statsd.histogram('databaseFriends.query.time',
                     duration,
                     tags=["page:friends"])
    statsd.gauge('test3', 300)
    #exit the program
    sys.exit()
Exemplo n.º 11
0
def log_dawg(sbs):
    statsd.increment("adsb.message", '1')
    if sbs.groundSpeed is not None:
        statsd.gauge('adsb.airspeed', sbs.groundSpeed)
    if sbs.altitude is not None: statsd.gauge('adsb.altitude', sbs.altitude)
    if sbs.track is not None: statsd.histogram('adsb.heading', sbs.track)
    if sbs.verticalRate is not None:
        if sbs.verticalRate > 0:
            statsd.gauge('adsb.ascentrate', sbs.verticalRate)
        else:
            statsd.gauge('adsb.decentrate', sbs.verticalRate)
Exemplo n.º 12
0
def gen_custom_metric():
    print(timer)
    print(ran_numb)
    # api.Metric.send(
    #     metric='custom.metrics.series',
    #     points=ran_numb,
    #     host="python.datadog_2.com",
    #     tags=["food:pancakes"]
    # )
    statsd.histogram('example_metric.histogram',
                     ran_numb,
                     tags=["environment:doghouse", "food:hotdogs"])
Exemplo n.º 13
0
def showCommunity():

	#metric to count the web.page_views_community
	statsd.increment('web.page_views_community',tags = ["page:community"])
	
	#metric to count the overall number of page views
	statsd.increment('web.page_views_total')

	#start timer
	start_time = time()
	print start_time
	#connection to the DB
	connection = MySQLdb.connect (host = "localhost", user = "******", passwd = "cacapipi", db = "bucketlist")
	
	#prepare a cursor object using cursor() method
	cursor = connection.cursor()
	
	#execute the SQL query using execute() method
	cursor.execute("select user_name, user_username, user_password from tbl_user ")
	
	#fetch all the rows from the query
	data = cursor.fetchall()
	
	#print the rows
	
	#THIS_DIR = os.path.dirname(os.path.abspath(__file__))
	# Create the jinja2 environment
    # Notice the use of trim_blocks, which greatly helps control whitespace.
	#j2_env = Environment(loader=FileSystemLoader(THIS_DIR), trim_blocks=True)
	#print j2_env.get_template('community.html').render(items=data)
    
	
	#env = Environment(loader=PackageLoader('app', 'template'))
	#template = env.get_template('community.html')
	#print template.render(items=data)
		
	for row in data:	
		print row[0], row[1]
	
	
	cursor.close()
	
	#close the connection
	connection.close()
	
	#return timer
	duration = time() - start_time
	print duration
	statsd.histogram('database.query.time', duration, tags = ["page:community"])
	statsd.gauge('test2',200)
	#exit the program
	sys.exit()
Exemplo n.º 14
0
    def _record_success_count(self, api_name):
        if settings.DEBUG:
            print('Record Success - %s:%s', api_name)

        metric = 'apiserver.success.%s' % api_name
        statsd.histogram(metric, 1)
        service_name = self._get_service_name(api_name)
        metric_success_all = 'apiserver.success.all'
        statsd.histogram(
            metric_success_all,
            1,
            tags=["api_name:%s" % api_name,
                  "service_name:%s" % service_name])
Exemplo n.º 15
0
    def _record_api_error(self, api_name, error_name):
        if settings.DEBUG:
            print('Record Error - %s:%s', api_name, error_name)

        metric = 'apiserver.errors.%s.%s' % (error_name, api_name)
        statsd.histogram(metric, 1)
        service_name = self._get_service_name(api_name)
        metric_all_errors = 'apiserver.errors.%s.all' % error_name
        statsd.histogram(
            metric_all_errors,
            1,
            tags=["api_name:%s" % api_name,
                  "service_name:%s" % service_name])
Exemplo n.º 16
0
 def _generate_metrics(self):
     nb_requests = len(self._requests)
     statsd.histogram(
         "http.client.session",
         nb_requests,
         tags=[f"hostname:{self.base_url.host}"],
     )
     if nb_requests >= LOGGING_REQUESTS_THRESHOLD:
         LOG.warning(
             "number of GitHub requests for this session crossed the threshold (%s): %s",
             LOGGING_REQUESTS_THRESHOLD,
             nb_requests,
             gh_owner=self.auth.owner,
             requests=self._requests,
         )
     self._requests = []
Exemplo n.º 17
0
 async def aclose(self):
     await super().aclose()
     nb_requests = len(self._requests)
     statsd.histogram(
         "http.client.session", nb_requests, tags=[f"hostname:{self.base_url.host}"],
     )
     if nb_requests >= LOGGING_REQUESTS_THRESHOLD:
         LOG.warning(
             "number of GitHub requests for this session crossed the threshold (%s): %s",
             LOGGING_REQUESTS_THRESHOLD,
             nb_requests,
             gh_owner=self.auth.owner,
             gh_repo=self.auth.repo,
             requests=self._requests,
         )
     self._requests = []
Exemplo n.º 18
0
def api_entry():
    start_time = time.time()

    app.logger.info('getting root endpoint')
#    return 'Entrypoint to the Application'
    name = flask_request.args.get('name', str)
    tracer.set_tags({'name': name})
    mycursor.execute("SELECT Name, UUID, Number FROM kikeyama_table where name='%s'" % name)
    myresult = mycursor.fetchall()
    
    for x in myresult:
        result = json.dumps(x)
        return result

    duration = time.time() - start_time
    statsd.distribution('kikeyama.dogstatsd.distribution.latency', duration)
    statsd.histogram('kikeyama.dogstatsd.histogram.latency', duration)
Exemplo n.º 19
0
    async def _consume_pulls(self, stream_name, installation, pulls):
        LOG.debug("stream contains %d pulls",
                  len(pulls),
                  stream_name=stream_name)
        for (owner, repo, pull_number), (message_ids,
                                         sources) in pulls.items():
            statsd.histogram("engine.streams.batch-size", len(sources))
            logger = logs.getLogger(__name__,
                                    gh_repo=repo,
                                    gh_owner=owner,
                                    gh_pull=pull_number)

            try:
                logger.debug("engine start with %s sources", len(sources))
                start = time.monotonic()
                await self._run_engine_and_translate_exception_to_retries(
                    installation, owner, repo, pull_number, sources)
                await self.redis.execute_command("XDEL", stream_name,
                                                 *message_ids)
                end = time.monotonic()
                logger.debug("engine finished in %s sec", end - start)
            except IgnoredException:
                await self.redis.execute_command("XDEL", stream_name,
                                                 *message_ids)
                logger.debug("failed to process pull request, ignoring",
                             exc_info=True)
            except MaxPullRetry as e:
                await self.redis.execute_command("XDEL", stream_name,
                                                 *message_ids)
                logger.error(
                    "failed to process pull request, abandoning",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except PullRetry as e:
                logger.info(
                    "failed to process pull request, retrying",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except StreamRetry:
                raise
            except Exception:
                # Ignore it, it will retried later
                logger.error("failed to process pull request", exc_info=True)
Exemplo n.º 20
0
 def _generate_metrics(self) -> None:
     nb_requests = len(self._requests)
     statsd.histogram(
         "http.client.session",
         nb_requests,
         tags=[f"hostname:{self.base_url.host}"],
     )
     if ((nb_requests / self._requests_ratio) >= LOGGING_REQUESTS_THRESHOLD
             or nb_requests >= LOGGING_REQUESTS_THRESHOLD_ABSOLUTE):
         LOG.warning(
             "number of GitHub requests for this session crossed the threshold (%s/%s): %s/%s",
             LOGGING_REQUESTS_THRESHOLD,
             LOGGING_REQUESTS_THRESHOLD_ABSOLUTE,
             nb_requests / self._requests_ratio,
             nb_requests,
             gh_owner=http.extract_organization_login(self),
             requests=self._requests,
             requests_ratio=self._requests_ratio,
         )
     self._requests = []
Exemplo n.º 21
0
    def tick(self, candlestick):

        self.current_price = float(candlestick.typical_price)
        self.prices.append(self.current_price)

        # Highs
        self.currentHigh = float(candlestick.high)
        self.highs.append(self.currentHigh)

        # Lows
        self.currentLow = float(candlestick.low)
        self.lows.append(self.currentLow)

        # Closes
        self.currentClose = float(candlestick.close)
        self.closes.append(self.currentClose)

        if len(self.prices) > self.hist_period:
            # Price action
            self.output.log("\n{color}Typical Price: {price}".format(
                color=Cyan, price=str(candlestick.typical_price)))

            # MACD
            m, s, h = self.indicators.MACD(self.prices, self.macd_slow,
                                           self.macd_fast, self.macd_signal)
            self.output.log(
                "Last/Current indicator values:\tMACD {}\tSignal: {}\t Hist: {}"
                .format(str(m[-2:]), str(s[-2:]), str(h[-2:])))

            statsd.histogram('macd.macd',
                             m[-1],
                             tags=[
                                 'macd.macd_line',
                                 'bot_name:{}.bot_id:{}'.format(
                                     BOT_NAME, BOT_ID)
                             ])
            statsd.histogram('macd.signal',
                             s[-1],
                             tags=[
                                 'macd.signal_line',
                                 'bot_name:{}.bot_id:{}'.format(
                                     BOT_NAME, BOT_ID)
                             ])
            statsd.histogram('macd.histogram',
                             h[-1],
                             tags=[
                                 'macd.histogram',
                                 'bot_name:{}.bot_id:{}'.format(
                                     BOT_NAME, BOT_ID)
                             ])

            self.output.log(White)

        self.evaluate_positions()
        self.update_open_trades()
        self.show_positions()
Exemplo n.º 22
0
    def report_profiling_to_statsd(self, profiling, tag):
        """
        Report status to datadog
        :param profiling: profile values
        :param tag: tags for the profile
        :return:
        """
        metric = 'human-ai.request.duration'
        path = flask.request.path[1:].replace('/', '_')
        tags = ['type:' + (path or 'index'), 'tag:' + tag]

        # datadog prints warnings when there is no agent running,
        # suppress them here with this ugly check
        debug_or_testing = self.config.get(
            'TESTING', False) or self.config.get('DEBUG', False)

        if debug_or_testing:
            return

        try:
            for k, v in profiling.items():
                statsd.histogram(metric, v, tags + [tag + k])
        except:
            pass
Exemplo n.º 23
0
    def tick(self, price, **kwargs):
        self.current = float(price)

        # If it's a brand new price the open price hasn't been set yet
        # then the current price is the open price.
        if self.open is None:
            self.open = self.current

        # If it's a brand new price the high price hasn't been set yet,
        # or if the current price is greater than the current high
        # then set this current price as the high.
        if (self.high is None) or (self.current > self.high):
            self.high = self.current

        # If it's a brand new price the low price hasn't been set yet,
        # or if the current price is less than the current low
        # then set this current price as the low.
        if (self.low is None) or (self.current < self.low):
            self.low = self.current

        # If the current time is at or after the start time plus the period
        # (i.e. this will be the last price that goes into this candlestick before
        # it is added to the list of past candlesticks) then set this current price
        # as the closing price.
        if time.time() >= (self.startTime + (self.period * 60)):
            self.close = self.current
            # Determine the typical price over entire period of the candlestick.
            self.typical_price = (self.high + self.low + self.close) / float(3)

        # Show OHLC data on each tick
        self.output.log(" Open: " + str(self.open) +
                        " Close: " + str(self.close) +
                        " High: " + str(self.high) +
                        " Low: " + str(self.low) +
                        " Current: " + str(self.current))

        statsd.histogram('candlestick.price.close', self.close, tags=['candlestick.price.close', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
        statsd.histogram('candlestick.price.high', self.high, tags=['candlestick.price.high', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
        statsd.histogram('candlestick.price.low', self.low, tags=['candlestick.price.low', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
        statsd.histogram('candlestick.price.typical_price', self.typical_price, tags=['candlestick.price.typical_price', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
Exemplo n.º 24
0
 def purchaseitem_metrics(sender: PurchaseItem, instance: PurchaseItem,
                          created: bool, **kwargs):
     metric = "ceitba.purchaseitem.%s.count" % instance.product.name
     count = instance.quantity
     statsd.histogram(metric, count)
Exemplo n.º 25
0
def _record_errors(service_name):
    print("Error {0}".format(service_name))
    metric = 'availability.errors.%s' % service_name
    statsd.histogram(metric, 1)
Exemplo n.º 26
0
    async def _consume_pulls(
        self,
        installation: context.Installation,
        pulls: PullsToConsume,
    ) -> None:
        LOG.debug(
            "stream contains %d pulls", len(pulls), stream_name=installation.stream_name
        )
        for (repo, pull_number), (message_ids, sources) in pulls.items():

            statsd.histogram("engine.streams.batch-size", len(sources))
            for source in sources:
                if "timestamp" in source:
                    statsd.histogram(
                        "engine.streams.events.latency",
                        (
                            datetime.datetime.utcnow()
                            - datetime.datetime.fromisoformat(source["timestamp"])
                        ).total_seconds(),
                    )

            logger = daiquiri.getLogger(
                __name__,
                gh_repo=repo,
                gh_owner=installation.owner_login,
                gh_pull=pull_number,
            )

            attempts_key = f"pull~{installation.owner_login}~{repo}~{pull_number}"
            try:
                async with self._translate_exception_to_retries(
                    installation.stream_name, attempts_key
                ):
                    await run_engine(installation, repo, pull_number, sources)
                await self.redis_stream.hdel("attempts", attempts_key)
                await self.redis_stream.execute_command(
                    "XDEL", installation.stream_name, *message_ids
                )
            except IgnoredException:
                await self.redis_stream.execute_command(
                    "XDEL", installation.stream_name, *message_ids
                )
                logger.debug("failed to process pull request, ignoring", exc_info=True)
            except MaxPullRetry as e:
                await self.redis_stream.execute_command(
                    "XDEL", installation.stream_name, *message_ids
                )
                logger.error(
                    "failed to process pull request, abandoning",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except PullRetry as e:
                logger.info(
                    "failed to process pull request, retrying",
                    attempts=e.attempts,
                    exc_info=True,
                )
            except StreamRetry:
                raise
            except StreamUnused:
                raise
            except vcr_errors_CannotOverwriteExistingCassetteException:
                raise
            except Exception:
                # Ignore it, it will retried later
                logger.error("failed to process pull request", exc_info=True)
Exemplo n.º 27
0
    def tick(self, candlestick):
        if self.cool_down_period:
            self.cool_down_period -= 1
        # self.output.log("cool down: {}".format(self.cool_down_period))

        self.current_price = float(candlestick.typical_price)
        self.prices.append(self.current_price)

        # Highs
        self.currentHigh = float(candlestick.high)
        self.highs.append(self.currentHigh)

        # Lows
        self.currentLow = float(candlestick.low)
        self.lows.append(self.currentLow)

        # Closes
        self.currentClose = float(candlestick.close)
        self.closes.append(self.currentClose)

        # Resistance levels - highest highs
        self.highest_high = float(max(self.highs))
        # self.resistance.append(self.highest_high)
        statsd.histogram('stochastic.resistance', self.highest_high, tags=['stochastic.resistance', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

        # Support levels - lowest lows
        self.lowest_low = float(min(self.lows))
        # self.support.append(self.lowest_low)
        statsd.histogram('stochastic.support', self.lowest_low, tags=['stochastic.support', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

        if len(self.prices) > self.hist_period:
            # Price action
            self.output.log("\n{color}Typical Price: {price}".format(color=Red, price=str(candlestick.typical_price)))

            # Moving average
            m_a = self.indicators.moving_average(self.closes, self.ma_period_fast)
            self.moving_avgs.append(m_a)
            self.output.log("Current Moving Average Value: " + str(m_a))
            # self.output.log("Last {} Moving Average Values: {}".format(self.ma_period_fast, str(self.moving_avgs[-self.ma_period_fast:])))
            statsd.histogram('stochastic.moving_average', m_a, tags=['stochastic.moving_average', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

            # Slope of moving averages trend line
            slope = self.indicators.slope(self.moving_avgs, lookback=self.ma_slope_lookback)
            self.ma_slopes.append(slope)
            self.output.log("Slope over last {} periods: {}".format(str(self.ma_slope_lookback), str(self.ma_slopes[-1])))
            # self.output.log("Slope values: {}".format(str(self.ma_slopes)))
            # Create a new list of change in value between values in an old list. e.g. [1,1,2,3,5,8] == [0, 1, 1, 2, 3]
            # Simple approximation for rate of change.
            self.slope_difference = [j-i for i, j in zip(self.ma_slopes[:-1], self.ma_slopes[1:])]
            # self.output.log("Slope differences: {}".format(self.slope_difference))
            statsd.histogram('stochastic.slope', slope, tags=['stochastic.slope', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

            # Trend direction
            trend_dir = self.price_to_ma_trend(candlestick.typical_price, m_a)
            self.output.log("Price to MA trend direction: {} (price to moving average) ".format(str(trend_dir)))
            statsd.histogram('stochastic.trend', trend_dir, tags=['stochastic.trend', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

            # Stochastic
            k, d = self.indicators.STOCH(self.highs, self.lows, self.closes, fastk_period=self.stoch_period)
            self.output.log("\n{color}%K: {k}\t%D: {d}".format(color=Yellow, k=str(k[-1:]), d=str(d[-1:])))

            # Stochastic Ready to Buy - True
            if (k[-1] < self.mid_line and d[-1] < self.mid_line) and k[-1] > d[-1]:
                self.stoch_ready_to_buy = True
            # Stochastic Ready to Buy - False
            if (k[-1] > self.mid_line and d[-1] > self.mid_line) and k[-1] < d[-1]:
                self.stoch_ready_to_buy = False
            self.output.log("Stochastic ready to buy: {}".format(self.stoch_ready_to_buy))

            statsd.histogram('stochastic.percent_k', k[-1], tags=['stochastic.percent_k', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
            statsd.histogram('stochastic.percent_d', d[-1], tags=['stochastic.percent_d', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

            # Support and Resistance
            self.sup, self.res = self.indicators.support_resistance(self.closes, n=self.sr_n)
            self.output.log("\n{color}n={n} - {prices} available prices".format(color=Cyan, n=self.sr_n, prices=len(self.closes)))
            self.output.log("Support: {sup}\nResistance: {res}{color}".format(sup=self.sup, res=self.res, color=White))
            statsd.histogram('sr.support_major', min(self.sup), tags=['sr.support_major', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
            statsd.histogram('sr.resistance_major', max(self.res), tags=['sr.resistance_major', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
            statsd.histogram('sr.support_minor', max(self.sup), tags=['sr.support_minor', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
            statsd.histogram('sr.resistance_minor', min(self.res), tags=['sr.resistance_minor', 'bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

        self.evaluate_positions()
        self.update_open_trades()
        self.show_positions()
Exemplo n.º 28
0
    async def _extract_pulls_from_stream(
        self, installation: context.Installation
    ) -> PullsToConsume:
        messages: typing.List[
            typing.Tuple[T_MessageID, T_MessagePayload]
        ] = await self.redis_stream.xrange(
            installation.stream_name, count=config.STREAM_MAX_BATCH
        )
        LOG.debug(
            "read stream",
            stream_name=installation.stream_name,
            messages_count=len(messages),
        )
        statsd.histogram("engine.streams.size", len(messages))
        statsd.gauge("engine.streams.max_size", config.STREAM_MAX_BATCH)

        # TODO(sileht): Put this cache in Repository context
        opened_pulls_by_repo: typing.Dict[
            github_types.GitHubRepositoryName,
            typing.List[github_types.GitHubPullRequest],
        ] = {}

        # Groups stream by pull request
        pulls: PullsToConsume = PullsToConsume(collections.OrderedDict())
        for message_id, message in messages:
            data = msgpack.unpackb(message[b"event"], raw=False)
            repo = github_types.GitHubRepositoryName(data["repo"])
            source = typing.cast(context.T_PayloadEventSource, data["source"])
            if data["pull_number"] is not None:
                key = (repo, github_types.GitHubPullRequestNumber(data["pull_number"]))
                group = pulls.setdefault(key, ([], []))
                group[0].append(message_id)
                group[1].append(source)
            else:
                logger = daiquiri.getLogger(
                    __name__,
                    gh_repo=repo,
                    gh_owner=installation.owner_login,
                    source=source,
                )
                if repo not in opened_pulls_by_repo:
                    try:
                        opened_pulls_by_repo[repo] = [
                            p
                            async for p in installation.client.items(
                                f"/repos/{installation.owner_login}/{repo}/pulls"
                            )
                        ]
                    except Exception as e:
                        if exceptions.should_be_ignored(e):
                            opened_pulls_by_repo[repo] = []
                        else:
                            raise

                converted_messages = await self._convert_event_to_messages(
                    installation,
                    repo,
                    source,
                    opened_pulls_by_repo[repo],
                )

                logger.debug("event unpacked into %s messages", len(converted_messages))
                messages.extend(converted_messages)
                deleted = await self.redis_stream.xdel(
                    installation.stream_name, message_id
                )
                if deleted != 1:
                    # FIXME(sileht): During shutdown, heroku may have already started
                    # another worker that have already take the lead of this stream_name
                    # This can create duplicate events in the streams but that should not
                    # be a big deal as the engine will not been ran by the worker that's
                    # shutdowning.
                    contents = await self.redis_stream.xrange(
                        installation.stream_name, start=message_id, end=message_id
                    )
                    if contents:
                        logger.error(
                            "message `%s` have not been deleted has expected, "
                            "(result: %s), content of current message id: %s",
                            message_id,
                            deleted,
                            contents,
                        )
        return pulls
Exemplo n.º 29
0
def event_handler(name, **kwargs):
    if name == 'request_finished':
        statsd.increment('mod_wsgi.request.count')
        application_time = kwargs.get('application_time')
        statsd.histogram('mod_wsgi.request.application_time', application_time)
Exemplo n.º 30
0
def dogstats(pagetag):
    start = time.time()
    statsd.increment('web.get.count', tags = ["support",pagetag])
    duration = time.time() - start
    statsd.histogram('web.get.latency', duration, tags = ["support",pagetag])
Exemplo n.º 31
0
                    err,
                    tags=['bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
                continue
            except urllib2.ssl.SSLError, err:
                # For read timeouts
                output.log("{}... Continuing".format(err[0]))
                statsd.histogram(
                    'main_loop.urllib2.ssl.SSLError',
                    err,
                    tags=['bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
                continue
            except Exception as err:
                # Something else happened but we still want ot log it, send it to DD, and keep going.
                output.log("Unexpected error: {}".format(sys.exc_info()[0]))
                statsd.histogram(
                    'main_loop.unknown_exception',
                    err,
                    tags=['bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
                time.sleep(30)
                continue

            #  When close price is present
            if developing_candlestick.isClosed():
                # Add the closed candlestick to the list
                candlesticks.append(developing_candlestick)
                # Enact the strategy
                strategy.tick(developing_candlestick)
                # Create a new candlestick
                developing_candlestick = BotCandlestick(period)

            time.sleep(float(poll_time))
Exemplo n.º 32
0
    async def _extract_pulls_from_stream(self,
                                         stream_name: str) -> PullsToConsume:
        messages = await self.redis.xrange(stream_name,
                                           count=config.STREAM_MAX_BATCH)
        LOG.debug("read stream",
                  stream_name=stream_name,
                  messages_count=len(messages))
        statsd.histogram("engine.streams.size", len(messages))
        statsd.gauge("engine.streams.max_size", config.STREAM_MAX_BATCH)

        opened_pulls_by_repo: typing.Dict[typing.Tuple[
            str, str], typing.List[github_types.GitHubPullRequest]] = {}

        # Groups stream by pull request
        pulls: PullsToConsume = PullsToConsume(collections.OrderedDict())
        for message_id, message in messages:
            data = msgpack.unpackb(message[b"event"], raw=False)
            owner = data["owner"]
            repo = data["repo"]
            source = data["source"]

            if data["pull_number"] is not None:
                key = (owner, repo, data["pull_number"])
                group = pulls.setdefault(key, ([], []))
                group[0].append(message_id)
                group[1].append(source)
            else:
                logger = daiquiri.getLogger(__name__,
                                            gh_repo=repo,
                                            gh_owner=owner,
                                            source=source)
                if (owner, repo) not in opened_pulls_by_repo:
                    try:
                        opened_pulls_by_repo[(
                            owner, repo)] = await self._get_pulls_for(
                                stream_name, owner, repo)
                    except IgnoredException:
                        opened_pulls_by_repo[(owner, repo)] = []

                converted_messages = await self._convert_event_to_messages(
                    owner,
                    repo,
                    source,
                    opened_pulls_by_repo[(owner, repo)],
                )

                logger.debug("event unpacked into %s messages",
                             len(converted_messages))
                messages.extend(converted_messages)
                deleted = await self.redis.xdel(stream_name, message_id)
                if deleted != 1:
                    # FIXME(sileht): During shutdown, heroku may have already started
                    # another worker that have already take the lead of this stream_name
                    # This can create duplicate events in the streams but that should not
                    # be a big deal as the engine will not been ran by the worker that's
                    # shutdowning.
                    contents = await self.redis.xrange(stream_name,
                                                       start=message_id,
                                                       end=message_id)
                    if contents:
                        logger.error(
                            "message `%s` have not been deleted has expected, "
                            "(result: %s), content of current message id: %s",
                            message_id,
                            deleted,
                            contents,
                        )
        return pulls
Exemplo n.º 33
0
from datadog import initialize, statsd
import time
import random

options = {
    'statsd_host': '127.0.0.1',
    'statsd_port': 8125
}

initialize(**options)

statsd.increment('example_metric.increment', tags=["environment:dev"])
statsd.decrement('example_metric.decrement', tags=["environment:dev"])
statsd.gauge('example_metric.gauge', 40, tags=["environment:dev"])
statsd.set('example_metric.set', 40, tags=["environment:dev"])
statsd.histogram('example_metric.histogram', random.randint(0, 20), tags=["environment:dev"])

with statsd.timed('example_metric.timer', tags=["environment:dev"]):
    # do something to be measured
    time.sleep(random.randint(0, 10))

statsd.distribution('example_metric.distribution', random.randint(0, 20), tags=["environment:dev"])
Exemplo n.º 34
0
def main(argv):
    """
    Main entry point
    """

    # Logging
    output = BotLog()

    supported_exchanges = ['kraken']
    exchange = 'kraken'
    pair = "XXBTZUSD"  # Bitcoin/USD pair on Kraken

    period = 5  # Time frame interval in minutes, e.g. width of candlestick.
    poll_time = 1  # How often an API query is made when using real time data.

    script_help = '\n\t-c --currency <currency pair>\n\t-x --exchange <name of the exchange {exchanges}>\n\t-t --poll <poll period length in minutes>\n\nHistorical Mode\n\t-p --period <period of frame>\n\t-s --start <start time in unix timestamp>\n\t-e --end <end time in unix timestamp>\n'.format(
        exchanges=supported_exchanges)

    start_time = False
    end_time = False

    try:
        opts, args = getopt.getopt(
            argv, "h:x:p:c:t:s:e:y:",
            ["exchange=", "period=", "currency=", "poll=", "start=", "end="])
    except getopt.GetoptError:
        output.log(sys.argv[0] + script_help)
        sys.exit(2)

    for opt, arg in opts:
        if opt == ("-h", "--help"):
            output.log(sys.argv[0] + script_help)
            sys.exit()
        elif opt in ("-s", "--start"):
            start_time = arg
        elif opt in ("-e", "--end"):
            end_time = arg
        elif opt in ("-x", "--exchange"):
            if arg in supported_exchanges:
                exchange = arg
            else:
                output.log(
                    'Supported exchanges are {}'.format(supported_exchanges))
                sys.exit(2)
        elif opt in ("-p", "--period"):
            if exchange.lower() == 'kraken':
                # Kraken uses minutes for getting historical data.
                mins = [1, 5, 15, 30, 60, 240, 1440, 10080, 21600]
                if (int(arg) in mins):
                    period = int(arg)
                else:
                    output.log(
                        'Kraken requires intervals 1, 5, 15, 30, 60, 240, 1440, 10080, 21600 minute intervals'
                    )
                    sys.exit(2)
            else:
                period = int(arg)
        elif opt in ("-c", "--currency"):
            pair = arg
        elif opt in ("-t", "--poll"):
            poll_time = arg

    ################ Strategy in use ################
    strategy = MACDStrategy(pair, period)
    strategy_name = strategy.get_name()
    #################################################

    # Log bot startup event to DataDog
    statsd.event(title='Bot started',
                 text='{}:{} started on {} trading {} using {}'.format(
                     BOT_ID, BOT_NAME, exchange, pair, strategy_name),
                 alert_type='success',
                 tags=['bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])

    trade_session_details = "{bg}Trading {pair} on {exchange} with {strat}" \
                            " @ {period} minute period{White}".format(pair=pair,
                                                                      exchange=exchange.upper(),
                                                                      strat=strategy_name,
                                                                      period=period,
                                                                      bg=On_Cyan,
                                                                      White=White)

    if start_time:
        # Backtesting
        chart = BotChart(exchange, pair, period)
        for candlestick in chart.get_points():
            strategy.tick(candlestick)

        output.log(trade_session_details)

    else:
        # Live Trading
        output.log(trade_session_details)

        chart = BotChart(exchange, pair, period, backtest=False)

        candlesticks = []
        developing_candlestick = BotCandlestick(period)

        progress_counter = 0
        while True:
            # Log trade details every so often
            if progress_counter == 50:
                output.log(trade_session_details)
                progress_counter = 0
            progress_counter += 1

            try:
                developing_candlestick.tick(
                    chart.get_current_price_and_vol()[0])
            except urllib2.URLError, err:
                # If network or site is down
                output.log("{}... Continuing".format(err[0]))
                # TODO: These calls to statsd should be Rollbar. Set up Rollbar
                statsd.histogram(
                    'main_loop.urllib2.URLError',
                    err,
                    tags=['bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
                continue
            except ValueError, err:
                # For screwy JSON
                output.log('{}... Continuing'.format(err[0]))
                statsd.histogram(
                    'main_loop.ValueError',
                    err,
                    tags=['bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
                continue
            except urllib2.ssl.SSLError, err:
                # For read timeouts
                output.log("{}... Continuing".format(err[0]))
                statsd.histogram(
                    'main_loop.urllib2.ssl.SSLError',
                    err,
                    tags=['bot_name:{}.bot_id:{}'.format(BOT_NAME, BOT_ID)])
                continue
 def histogram(self, metric, value, tags=[]):
     statsd.histogram(metric=metric, value=value, tags=tags)
     if self.debug:
         print "{0} = {1} :: type={3} :: tags={2}".format(metric, value, tags, 'histogram')
Exemplo n.º 36
0
def generateHistogram():
    print("Generate histogram")
    statsd.histogram('demo.python_statsd.histogram',
                     random.randint(0, 20),
                     tags=["env:" + ENV])