def get_stathat(self): stathat_enabled = (self.config.get('stathat', 'enabled') == 'yes') if not stathat_enabled: log.debug('Running without stathat ...') return else: log.debug('StatHat ingeration enabled ...') return StatHat()
def stat_log(statistic): stats = StatHat(MY_STATHAT_USER) app.logger.info(statistic) try: stats.count(statistic, 1) except Exception: app.logger.info('push to stathat failed') pass
def trans_c(text, lang='zh-CN', detect=1): translated_cleaned = output(trans(text, lang)) if STAT: try: stathat = StatHat() stathat.ez_post_count(STAT_ACCOUNT, STAT_INSTANCE, 1) except Exception as e: cprint('Request susceed but stat failed!' + e, 'white', 'on_red') return translated_cleaned
def main(): parser = argparse.ArgumentParser() parser.add_argument("--last_id", type=int, default=1) parser.add_argument("--index", type=str, default='sunrise3') parser.add_argument("--force", action="store_true") parser.add_argument("--cached", action="store_true") parser.add_argument("--stathat", action="store_true") args = parser.parse_args() print args db = MongoClient()['test'] es = Elasticsearch() stats = StatHat('hq08Ng2ujA8o3VPe') lastfm_url = "http://ws.audioscrobbler.com/2.0/?api_key=048cd62229f507d6d577393a6d7ac972&format=json" factory = Factory(db, lastfm_url) factory.cached = args.cached last_id = args.last_id while True: where = {'_id': {'$gt': last_id}, 'end_ts': {'$gt': 0}} #where = {'_id': {'$gt': last_id}} if not args.force: where['pub_ts'] = 0 print where oid = last_id for air in db.air.find(where).sort('ts').limit(100): oid = air['_id'] audio = factory.build_audio_from_air(air) es.index(index=args.index, doc_type='audio', id=oid, body=audio) if not args.force: db.air.update({'_id': oid}, {'$set': { 'pub_ts': int(time.time()) }}) print '---' * 10, oid pp(audio) if args.stathat: stats.count('index.audio', 1) if audio.get('is_track'): stats.count('index.track', 1) if oid == last_id: if args.force: continue else: print 'wait for new tracks...' time.sleep(10) else: last_id = oid
def stathat_count(stat, count=1): if hasattr(settings, 'STATHAT_EMAIL'): stathat = StatHat() try: result = stathat.ez_post_count(settings.STATHAT_EMAIL, stat, count) except HTTPError as e: mail_admins("StatHat API Error", e.message) return False else: return result
def translate_text(text, lang='zh-CN', detect=1, type=0): if type == 0: # Specific language translated_cleaned = output(trans(text, lang)) elif type == 1: # Auto Translation translated_cleaned = output(trans_auto(text)) else: # To Chinese translated_cleaned = output(trans(text, lang)) if STAT: try: stathat = StatHat() stathat.ez_post_count(STAT_ACCOUNT, STAT_INSTANCE, 1) except Exception as e: cprint('Request susceed but stat failed!' + str(e), 'white', 'on_red') capture_message('Request susceed but stat failed!') return translated_cleaned
def main(opts, bootstrap_dir, tmpdir): walkstart = run_test.walltime() tests = collect_all_tests(bootstrap_dir) if should_run_tests_in_parallel(opts): run_all_tests_fast(tests, bootstrap_dir, tmpdir) else: run_all_tests_slow(tests, bootstrap_dir, tmpdir) walkend = run_test.walltime() if not "--typecheck-only" in opts.meargs: run_test.print_result_table(run_test.aggregate_results(all_results)) print("Total (wall-clock) time: %d ms" % run_test.elapsed(walkstart, walkend)) print(len(run_test.tests_passed), " tests passed") print(len(run_test.tests_failed), " tests failed") if len(run_test.tests_failed) > 0: for test in run_test.tests_failed: try: size = os.path.getsize(test) print(test + " (" + str(size) + " bytes)") except: print(test) num_tests_attempted = len(run_test.tests_passed) + len( run_test.tests_failed) num_tests_not_attempted = len(tests) - num_tests_attempted if num_tests_not_attempted > 0: print(num_tests_not_attempted, " tests not reached") try: from stathat import StatHat sh = StatHat() # tests run - counter sh.post_count('MjQ2IBSJUNLO7SpS4kttBQFHp2w~', '3TW60dh1mJQIqFql3VSaQSBqYlVJ', len(run_test.tests_passed)) # time taken - ms sh.post_value('MjQ2IBSJUNLO7SpS4kttBQFHp2w~', 'OIy1N3KRYp84fRyXl-GljSA1enpW', run_test.elapsed(walkstart, walkend)) except: pass sys.exit(len(run_test.tests_failed))
from app import db import datetime from peewee import CharField, DateTimeField, IntegerField, DecimalField,\ TextField from playhouse.signals import Model as SignalledModel from playhouse.signals import post_save from stathat import StatHat stats = StatHat('*****@*****.**') class Caller(db.Model, SignalledModel): number = CharField() called_at = DateTimeField(default=datetime.datetime.now) class Subscriber(db.Model, SignalledModel): number = CharField() location = CharField() created_at = DateTimeField(default=datetime.datetime.now) class Town(db.Model): country = CharField(null=True) county = CharField(null=True) easting = CharField(null=True) grid_reference = CharField(null=True) latitude = CharField(null=True) longitude = CharField(null=True) northing = CharField(null=True) place_name = CharField(null=True)
import gzip import StringIO import itertools import datetime import calendar import urllib2 import urllib import xml.etree.cElementTree as ElementTree import os import json stathat = None try: from stathat import StatHat stathat = StatHat() except: pass class OscHandler(): def __init__(self): self.changes = {} self.nodes = {} self.ways = {} self.relations = {} self.action = "" self.primitive = {} self.missingNds = set() def startElement(self, name, attributes): if name in ('modify', 'delete', 'create'):
def stathat_count(stat, count=1): if hasattr(settings, 'STATHAT_EMAIL'): stathat = StatHat() return stathat.ez_post_count(settings.STATHAT_EMAIL, stat, count)
import argparse import time import math import torch from stathat import StatHat import torch.nn as nn from torch.autograd import Variable import data import model stats = StatHat('*****@*****.**') parser = argparse.ArgumentParser( description='PyTorch PennTreeBank RNN/LSTM Language Model') parser.add_argument('--data', type=str, default='./data/penn', help='location of the data corpus') parser.add_argument( '--model', type=str, default='LSTM', help='type of recurrent net (RNN_TANH, RNN_RELU, LSTM, GRU)') parser.add_argument('--emsize', type=int, default=200, help='size of word embeddings') parser.add_argument('--nhid', type=int, default=200,