예제 #1
0
파일: hafas.py 프로젝트: xneby/kolstat
	def connFromId(id, hafas = None):
		if hafas is None:
			hafas = Hafas
		if cache.in_cache(id):
			c = cache.load(id)
			return (c.sections[0].train, c)
		else:
			sid = "{}".format(int(id, 16))
			sourceId = sid[1:10]
			destinationId = sid[10:19]
			date = datetime.strptime(sid[19:], '%Y%m%d%H%M')
			source = HafasStation('dummy', sourceId, [])
			destination = HafasStation('dummy', destinationId, [])
			cl = hafas.searchConnections(source, destination, date,1)
	
			I = 10

			if datetime.combine(cl[-1].date, cl[-1].departure.time) != date:
				while datetime.combine(cl[-1].date, cl[-1].departure.time) < date:
		
					next(cl)
	
					if I == 0:
						break
					I -= 1
	
			c = cl[-1]
			c.queryRelation(hafas)

			cache.save(c, id)

			return (c.sections[0].train, c)
예제 #2
0
def fetch_data(data_file):
    with open('data/{}'.format(data_file)) as file:
        data = json.load(file)

    # TODO: term must be configurable here (0 is hardcoded right now)
    d, m, y = data['terms'][0]['from']
    from_date = datetime.date(y, m, d)

    for handle in data['handles']:
        print('Processing "{}" handle'.format(handle))
        existing_submissions = cache.get('{}.submissions'.format(handle))
        if existing_submissions is None:
            existing_submissions = {}

        new_submissions = api.fetch_submissions(handle)
        # TODO: error handling

        for submission_id, submission in new_submissions.items():
            submission_id = str(submission_id)
            if submission_id not in existing_submissions:
                startTimestamp = time.mktime(from_date.timetuple())
                if submission['creationTimeSeconds'] >= startTimestamp:
                    print('trying to get source code for submission {}'.format(
                        submission_id))
                    code = fetch_submission_source_code(submission)
                else:
                    code = None
                submission['source_code'] = code

                # add new submission into existing
                existing_submissions[submission_id] = submission
                time.sleep(3)

        cache.save('{}.submissions'.format(handle), existing_submissions)
        time.sleep(5)
예제 #3
0
def query(type=QUERY_ANIME, aid=None, **kwargs):
    """
    Query AniDB for information about the anime identified by *aid* or the
    complete list of categories.

    :param type: Either QUERY_CATEGORIES or QUERY_ANIME
    :param aid: If *type* is QUERY_ANIME, the aid of the anime
    :param kwargs: Any kwargs you want to pass to :func:`requests.get`
    :raises: ValueError if `anidb.CLIENT` or `anidb.CLIENTVERSION` are not set
    :rtype: :class:`anidb.model.Anime` or a list of
            :class:`anidb.model.Category`
    """
    if CLIENT is None or CLIENTVERSION is None:
        raise ValueError(
            "You need to assign values to both CLIENT and CLIENTVERSION")
    if type == QUERY_ANIME:
        if aid is None:
            raise TypeError("aid can't be None")
        else:
            cacheresult = cache.get(aid)
            if cacheresult is not None:
                return cacheresult

            #print ANIDB_URL % (CLIENT, CLIENTVERSION, "anime") + "&aid=%i" % aid

            response = \
                requests.get(ANIDB_URL % (CLIENT, CLIENTVERSION, "anime")
                        + "&aid=%i" % aid, **kwargs)
            result = _handle_response(response.content)
            cache.save(aid, result)
            return result
    elif type == QUERY_CATEGORIES:
        response = requests.get(
            ANIDB_URL % (CLIENT, CLIENTVERSION, "categorylist"), **kwargs)
        return _handle_response(response.content)
예제 #4
0
    def get_lat_lng(self):
        self.tooShort = 0
        for location in self.locations:
            if len(location) > 3:
                if location not in self.latlng and location not in self.notGeolocated:
                    try:
                        locLatLng = self.geolocator.geocode(location.encode(
                            'ascii', errors='ignore'),
                                                            timeout=30)

                        if locLatLng:
                            latitude = locLatLng.latitude
                            longitude = locLatLng.longitude
                            self.latlng[location] = str(
                                locLatLng.latitude) + " " + str(
                                    locLatLng.longitude)
                            cache.save(self.latlng, 'locationLatLng')
                        else:
                            self.notGeolocated.append(location)

                    except Exception:
                        print('Error: ' + str(sys.exc_info()[0]) + '\n' +
                              str(location) + '\n\n')
            else:
                self.tooShort += 1

        cache.save(self.notGeolocated, 'failedToGeolocate')
예제 #5
0
파일: query.py 프로젝트: Fursje/Anorak
def query(type=QUERY_ANIME, aid=None, **kwargs):
    """
    Query AniDB for information about the anime identified by *aid* or the
    complete list of categories.

    :param type: Either QUERY_CATEGORIES or QUERY_ANIME
    :param aid: If *type* is QUERY_ANIME, the aid of the anime
    :param kwargs: Any kwargs you want to pass to :func:`requests.get`
    :raises: ValueError if `anidb.CLIENT` or `anidb.CLIENTVERSION` are not set
    :rtype: :class:`anidb.model.Anime` or a list of
            :class:`anidb.model.Category`
    """
    if CLIENT is None or CLIENTVERSION is None:
        raise ValueError(
                "You need to assign values to both CLIENT and CLIENTVERSION")
    if type == QUERY_ANIME:
        if aid is None:
            raise TypeError("aid can't be None")
        else:
            cacheresult = cache.get(aid)
            if cacheresult is not None:
                return cacheresult
                
            #print ANIDB_URL % (CLIENT, CLIENTVERSION, "anime") + "&aid=%i" % aid

            response = \
                requests.get(ANIDB_URL % (CLIENT, CLIENTVERSION, "anime")
                        + "&aid=%i" % aid, **kwargs)
            result =_handle_response(response.content)
            cache.save(aid, result)
            return result
    elif type == QUERY_CATEGORIES:
        response = requests.get(ANIDB_URL % (CLIENT, CLIENTVERSION,
                                "categorylist"), **kwargs)
        return _handle_response(response.content)
예제 #6
0
def getStandings(teams):
	key = "standings_" + "_".join(teams)
	standings = load(key)
	now = datetime.now()

	if standings is None:
		data = urllib2.urlopen("http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?season=%d&schedule_game_date.game_date='%s'&sit_code='h0'&league_id=103&league_id=104&all_star_sw='N'&version=2" % (now.year, now.strftime("%Y/%m/%d")))
		data = json.load(data)["standings_schedule_date"]["standings_all_date_rptr"]["standings_all_date"]

		rows = data[0]["queryResults"]["row"] + data[1]["queryResults"]["row"]

		standings = []

		for row in rows:
			found = getTeam(row["team_abbrev"])

			if found is not None and row["team_abbrev"] in teams:
				standings.append({
					"name": found.name,
					"abbrev": found.code,
					"subreddit": found.subreddit,
					"wins": int(row["w"]),
					"losses": int(row["l"]),
					"percent": row["pct"],
					"games_back": row["gb"]
				})

		standings.sort(lambda a, b: -1 if a["percent"] > b["percent"] else 1 if a["percent"] < b["percent"] else -1 if a["wins"] > b["wins"] else 1 if a["wins"] < b["wins"] else 0)

		save(key, standings, 30)

	return standings
예제 #7
0
def get_job_bugzilla_suggestions_json(args,
                                      repo,
                                      job_id,
                                      include_related_bugs=False,
                                      update_cache=False):
    """get_job_bugzilla_suggestions_json

    Retrieve job_bugzilla_suggestions given args, and job_id

    """
    cache_attributes = ['treeherder', repo, 'bugzilla_suggestions']

    suggestions_data = cache.load(cache_attributes, job_id)
    if suggestions_data and not update_cache:
        suggestions = json.loads(suggestions_data)
    else:
        bugzilla_suggestions_url = '%s/api/project/%s/jobs/%s/bug_suggestions/' % (
            (URL, repo, job_id))

        suggestions = utils.get_remote_json(bugzilla_suggestions_url)
        cache.save(cache_attributes, job_id, json.dumps(suggestions, indent=2))

    if args.test_failure_pattern:
        bugzilla_suggestions = [
            suggestion for suggestion in suggestions
            if args.test_failure_pattern.search(suggestion['search'])
        ]
    else:
        bugzilla_suggestions = suggestions

    if not include_related_bugs:
        for bug_data in bugzilla_suggestions:
            del bug_data['bugs']

    return bugzilla_suggestions
예제 #8
0
def blankTicker(startTime):
    matches = []

    matches.append({
        'tourny': '?',
        'team1': {
            'name': 'No matches retrieved.',
            'cc': ''
        },
        'team2': {
            'name': '',
            'cc': ''
        },
        'time': '0',
        'url': 'http://bit.ly/1xGEuiJ#button#slim',
        'is_live': False
    })

    matchtickerMd = ''
    matchMdTemplate = ('>>>\n'
                       '[~~__TOURNY__~~\n'
                       '~~__TIME__~~\n'
                       '~~__TEAM1__~~\n'
                       '~~__TEAM2__~~](__URL__#info)\n'
                       '[ ](#lang-__LANG1__)\n'
                       '[ ](#lang-__LANG2__)\n\n'
                       '>>[](#separator)\n\n')
    matchtickerMd = '[*Match Ticker*](#heading)\n\n'

    i = 0
    for match in matches:
        matchMd = matchMdTemplate
        matchMd = (matchMd.replace('__TOURNY__', match['tourny']).replace(
            '__TIME__',
            match['time']).replace('__TEAM1__',
                                   match['team1']['name']).replace(
                                       '__TEAM2__',
                                       match['team2']['name']).replace(
                                           '__LANG1__',
                                           match['team1']['cc']).replace(
                                               '__LANG2__',
                                               match['team2']['cc']).replace(
                                                   '__URL__', match['url']))
        matchtickerMd += matchMd
        i += 1
    matchtickerMd += '>>**[See all](http://bit.ly/1xGEuiJ#button#slim)**'

    cache.save('matchticker.txt', matchtickerMd)

    elapsedTime = '\BLUE(%s s)' % str(round(time() - startTime, 3))
    log.log('\t...done! (%d characters) %s \n' %
            (len(matchtickerMd), elapsedTime))

    return matchtickerMd
예제 #9
0
def check_page():
    page = crawl(configuration['targetURL'])  # .decode("utf8")
    page_hash = md5(page)
    c = load()
    if not c['hash'] == page_hash:
        print("HASH CHANGED! (" + page_hash + ")")

        # Run a background thread to archive the page in the web archive
        start_new_thread(crawl, ("https://web.archive.org/save/" +
                                 configuration['targetURL'], False))

        # Check if the file is online and we didn't sent the mail already (if so send it)
        match = parse(page.decode('utf8'))
        if match is not None and not c['mailSent']:
            print(
                "FILE IS ONLINE! Sending mails ... (and we didn't sent them already)"
            )
            docx = crawl(match)
            for person_details in configuration['details']:
                variables = {
                    "name": person_details['name'],
                    "year": person_details['targetYear'],
                    "quarter": person_details['quarter'],
                    "mail": person_details['mail'],
                    "streetAndCity": person_details['streetAndCity'],
                    "phone": person_details['phone'],
                    "matrikelnr": person_details['matrikelnr']
                }
                res = parser.update_document_contents(docx, person_details)
                res_filename = "Antrag Wohnheimzimmer " + variables[
                    'quarter'] + " " + variables['year'] + ".docx"
                mail.send(configuration['mail'], variables, res, res_filename)
            c['mailSent'] = True

        # Send a mail regardless of the above that there is a change
        notification_conf = {
            "body":
            "Something changed! Go and visit " + configuration['targetURL'],
            "subject":
            "IMPORTANT | The watched website has changed! Go check it immediately!",
            "recipient": configuration['mail']['notificationRecipient'],
            "server": configuration['mail']['server']
        }
        if c['mailSent']:
            notification_conf[
                'body'] += "\n\n Oh and btw I already sent your reservation request ;)\n\n Have a good one!\n - AccommodationBot"
        mail.send(notification_conf)

        c['hash'] = page_hash
    else:
        print("Boring old same page...")

    save(c)
예제 #10
0
def harvest(thesaurus):
    """Harvest information about a given Thesaurus and save cache"""
    LOG.info("Harvesting: %s", thesaurus)
    p = page.Page(WIKTIONNAIRE, thesaurusPrefix + thesaurus + fr)
    if p.exists and thesaurus not in CACHE["thesaurus"]:
        LOG.debug(thesaurus.upper())
        CACHE["thesaurus"][thesaurus] = []
        text = page.Page(WIKTIONNAIRE, thesaurusPrefix + thesaurus + fr).text
        wikiWords = [x[2:len(x) - 2] for x in link.findall(text)]
        for wikiWord in wikiWords:
            if sources(wikiWord):
                CACHE["thesaurus"][thesaurus].append(wikiWord)
    cache.save(CACHE)
예제 #11
0
def save_cache(number, variant, _ = None, hafas = None):
	if _ is None:
		_ = acquire(number)
	result = _['ids']
	roz = _['roz']

	h = {}

	for day in result:
		if roz[day] in h:
			cache.link(result[day], h[roz[day]])
		else:
			_, conn = HafasTrain.connFromId(result[day], hafas)
			h[roz[day]] = result[day]
			cache.save(conn, result[day])
예제 #12
0
def load_matrix(path):
    '''
    path: `str`
        Path to .mtx file.

    Returns:
    matrix: `numpy.ndarray(np.float32)`, (N,N)
        Number of people traveling from zone `i` to zone `j` in `matrix[i,j]`.
    zones: `numpy.ndarray(str)`, (N)
        Name of zone `i` in `zones[i]`.
    '''
    cachename = path
    r = cache.load(cachename)
    if r is not None: return r

    m = ReadPTVMatrix(filename=p)
    matrix = m['matrix'].astype(np.float32)
    ids = [int(z.coords['zone_no'].data) for z in m['zone_name']]

    origins = [int(v.data) for v in matrix['origins']]
    destinations = [int(v.data) for v in matrix['destinations']]
    assert origins == ids, \
            "different order in matrix['origins'] and zone_name"
    assert destinations == ids, \
            "different order in matrix['destinations'] and zone_name"

    zonenames = np.array([str(z.data) for z in m['zone_name']])

    r = matrix.data, zonenames
    return cache.save(cachename, r)
예제 #13
0
def load_dumps(regex=""):
    """
    >>> D = load_dumps()         #doctest: +ELLIPSIS
    Input files:
    ...
    >>> D
    [Dump of test-0xff000000.bin]
    """
    bins, loadaddrs, idcs = GetInputFiles(regex)
    D = {}
    for b,a in loadaddrs.iteritems():
        D[b] = Dump(bin=b, RAWASM = disasm_dump(b,a))
        D[b].loadaddr = a

    for b in bins:
        D[b].FUNCS = {}
        D[b].FUNCENDS = {}
        D[b].WHICHFUNC = {}
        D[b].A2N = {}
        D[b].N2A = {}

    for b,i in idcs.iteritems(): # this needs cleanup
        D[b].A2N, D[b].N2A, D[b].FUNCS = cache.access(i, idc.parse)
        D[b].update_func_indexes()
        D[b].idc = i


    for b,a in loadaddrs.iteritems():
        D[b]._loadednames = {}
        D[b]._loadednames.update(D[b].N2A)
        D[b].ROM, D[b].MNEF, D[b].ARGS, refs, D[b].DISASM = cache.access(b, lambda b: parse_disasm(D[b]))
        D[b].minaddr = min(D[b].ROM)
        D[b].maxaddr = max(D[b].ROM)
        D[b].REFLIST = list(refs.iteritems())
        D[b].A2REFS, D[b].REF2AS = cache.access(b, lambda b: index_refs(refs, D[b].ROM))
        
    for b,a in loadaddrs.iteritems():
        D[b].STRINGS # compute them
        remove_autogen_string_names(D[b])

    cache.save()
    
    if len(D) == 1:
        print "Auto-selecting dump %s" % D[bins[0]].bin
        idapy.select_dump(D[bins[0]])

    return sorted(D.values(), key=lambda x: x.bin)
예제 #14
0
def get_pushes_jobs_json(args, repo, update_cache=False):
    """get_pushes_jobs_json

    Retrieve nested pushes, jobs matching args set via push_args
    parser and job_args parser.

    """
    if hasattr(args, 'update_cache'):
        update_cache = args.update_cache

    cache_attributes_push_jobs = ['treeherder', repo, 'push_jobs']

    pushes = get_pushes_json(args, repo, update_cache=update_cache)

    for push in pushes:
        push_jobs_data = cache.load(cache_attributes_push_jobs, push['id'])
        if push_jobs_data and not update_cache:
            jobs = json.loads(push_jobs_data)
        else:
            jobs = retry_client_request(CLIENT.get_jobs,
                                        3,
                                        repo,
                                        push_id=push['id'],
                                        count=None)
            cache.save(cache_attributes_push_jobs, push['id'],
                       json.dumps(jobs, indent=2))

        if not args.job_filters:
            push['jobs'] = jobs
        else:
            push['jobs'] = []
            for job in jobs:
                include = True
                for filter_name in args.job_filters:
                    include &= args.job_filters[filter_name].search(
                        job[filter_name]) is not None
                if include:
                    push['jobs'].append(job)
        if args.add_bugzilla_suggestions:
            for job in push['jobs']:
                if job['result'] != 'testfailed':
                    job['bugzilla_suggestions'] = []
                    continue
                job['bugzilla_suggestions'] = get_job_bugzilla_suggestions_json(
                    args, repo, job['id'], update_cache=update_cache)
    return pushes
예제 #15
0
def buildMarkdown():
    log.log('\n> Retrieving community metrics...')
    startTime = time()

    settings = getSettings()
    markdown = ''

    if settings['dev_mode'] == True:
        log.log('\t... done! (using a cached copy)')
        return cache.read('community_metrics.txt')
    

    if settings['sidebar']['social']['ts_enabled']:
        teamspeakMd = getTeamspeakUsersMarkdown(settings)
    else:
        log.log('\t\REDTeamspeak metrics disabled.')
        teamspeakMd = None
    
    if settings['sidebar']['social']['irc_enabled']:
        ircMd = getIrcUsersMarkdown(settings)
    else:
        log.log('\t\REDIRC metrics disabled.')
        ircMd = None

    if settings['sidebar']['social']['discord_enabled']:
        disMd = getDiscordUsersMarkdown(settings)
    else:
        log.log('\t\REDDiscord metrics disabled.')
        disMd = None


    if teamspeakMd is not None:
        markdown += teamspeakMd + '\n'
    if ircMd is not None:
        markdown += ircMd + '\n'
    if disMd is not None:
        markdown += disMd

    # markdown = teamspeakMd + '\n' + ircMd + '\n' + disMd
    # markdown = teamspeakMd + '\n' + ircMd + '\n' + ' '
    cache.save('community_metrics.txt', markdown)

    elapsedTime = '\BLUE(%s s)' % str(round(time() - startTime, 3))
    log.log('\GREENDone retrieving community metrics. %s \n' % elapsedTime)

    return markdown
예제 #16
0
def send_smtp_test():
    c = load()
    if not c['smtpTestSent']:
        c['smtpTestSent'] = True
        save(c)
        notification_conf = {
            "body":
            "This is a test of your smtp settings.\nYour final mail will be sent to "
            + ", ".join(configuration["mail"]["recipient"]) +
            ".\n\n- Accommodation Bot",
            "subject":
            "SMTP Settings Test!",
            "recipient":
            configuration['mail']['notificationRecipient'],
            "server":
            configuration['mail']['server']
        }
        mail.send(notification_conf)
예제 #17
0
def get_job_by_repo_job_id_json(args, repo, job_id, update_cache=False):
    """get_job_by_repo_job_id_json

    Retrieve job given args, repo and job_id

    """
    cache_attributes = ['treeherder', repo, 'jobs']

    job_data = cache.load(cache_attributes, job_id)
    if job_data and not update_cache:
        jobs = [json.loads(job_data)]
    else:
        jobs = retry_client_request(CLIENT.get_jobs, 3, repo, id=job_id)
        if jobs:
            for job in jobs:
                cache.save(cache_attributes, job['id'],
                           json.dumps(job, indent=2))

    return jobs[0]
예제 #18
0
    def add_default_keys(self):
        # dict = {}
        dict = cache.load('newReversedGroupedLocations')

        self.prev_validated = []  # cache.load('previously_validated')

        for key in self.grouped.keys():
            newKey = self.generateKey(key)
            if newKey is not None:
                if newKey not in dict.keys():
                    newKey = newKey.strip()
                    dict[newKey] = {}
                    dict[newKey]['latlng'] = key
                    dict[newKey]['locations'] = self.grouped[key]
                else:
                    # Append contents of conflicting key to existing key
                    dict[newKey]['locations'] = dict[newKey][
                        'locations'] + self.grouped[key]
                cache.save(dict, 'redo_dict_grouped_locations')
                cache.save(self.prev_validated, 'redo_previously_validated')
예제 #19
0
def get_bug_job_map_json(args, repo, job_id, update_cache=False):
    """get_bug_job_map_json

    Retrieve bug_job_map given args, repo and job_id

    """
    cache_attributes = ['treeherder', repo, 'bug-job-map']

    bug_job_map_url = '%s/api/project/%s/bug-job-map/?job_id=%s' % (
        (URL, repo, job_id))

    bug_job_map_data = cache.load(cache_attributes, job_id)
    if bug_job_map_data and not update_cache:
        bug_job_map = json.loads(bug_job_map_data)
        bug_job_map_data = None
    else:
        bug_job_map = utils.get_remote_json(bug_job_map_url)
        cache.save(cache_attributes, job_id, json.dumps(bug_job_map, indent=2))

    return bug_job_map
예제 #20
0
  def save_search(self):
    """
    Saves a currently displayed search for future loading.
    """

    # Retrieve query which generated this view
    query = self.app.searchHandler.lastQueryExecuted

    # Create list containing desired identifier (from text box), and the query
    subList = [self.app.searchForm.infoWindowWidgets['saveEdit'].text(), query]

    # Append to internal list of saved searches and save to backing store
    self.app.savedSearches.append(subList)
    cache.save(self.app.savedSearches, 'savedSearches')

    # Re-load the history / saved searches table to display our newly saved search
    self.app.initialize_history_table()
    #self.app.debugDialog.add_line('{0}: saved new search under name {1}'.format(sys._getframe().f_code.co_name),
    # self.app.searchForm.infoWindowWidgets['saveEdit'].text())

    return
예제 #21
0
def get_pushes_json(args, repo, update_cache=False):
    """get_pushes_json

    Retrieve pushes matching args set via the pushes_parser.
    """
    cache_attributes = ['treeherder', repo, 'push']

    push_params = get_treeherder_push_params(args)

    all_pushes = []
    # CLIENT.MAX_COUNT is 2000 but for pushes, the maximum is 1000.
    # We need to fudge this.
    max_count = CLIENT.MAX_COUNT
    CLIENT.MAX_COUNT = 1000
    try:
        all_pushes = retry_client_request(CLIENT.get_pushes, 3, repo,
                                          **push_params)
    finally:
        CLIENT.MAX_COUNT = max_count

    if all_pushes is None:
        logger.warning("get_pushes_json({}, {}) is None".format(args, repo))
        return []

    for push in all_pushes:
        cache.save(cache_attributes, push['id'], json.dumps(push, indent=2))

    if not args.push_filters or not 'comments' in args.push_filters:
        pushes = all_pushes
    else:
        pushes = []
        for push in all_pushes:
            include = True
            for filter_name in args.push_filters:
                for revision in push['revisions']:
                    include &= args.push_filters[filter_name].search(
                        revision[filter_name]) is not None
            if include:
                pushes.append(push)
    return pushes
예제 #22
0
def load_zones(path):
    '''
    path: str
        Path to .gpkg file.

    Returns:
    zone_to_canton: `dict`
        Mapping from zone name to canton code (e.g. 'Dietlikon' -> 'ZH')
    '''
    cachename = path
    r = cache.load(cachename)
    if r is not None: return r

    gdf = gpd.read_file(p)
    zonenames = list(map(str, gdf.N_Gem))
    zonecantons = list(map(str, gdf.N_KT))

    zone_to_canton = {}

    for name, canton in zip(zonenames, zonecantons):
        zone_to_canton[name] = canton

    r = zone_to_canton
    return cache.save(cachename, r)
def get_test_isolation_bugzilla_data(args):
    """Query Bugzilla for bugs marked with [test isolation] in the
    whiteboard.  Return a dictionary keyed by revision url containing
    the bug id and summary.

    """
    cache_attributes = ['test-isolation']

    bugzilla_data = cache.load(cache_attributes, 'bugzilla.json')
    if bugzilla_data and not args.update_cache:
        return json.loads(bugzilla_data)

    now = datetime.datetime.now()

    data = {}

    re_logview = re.compile(
        r'https://treeherder.mozilla.org/logviewer.html#\?job_id=([0-9]+)&repo=([a-z-]+)'
    )
    re_pushlog_url = re.compile(r'(https://.*)$\n', re.MULTILINE)

    query = BUGZILLA_URL + 'bug?'
    query_terms = {
        'include_fields': 'id,creation_time,whiteboard',
        'creation_time': args.bug_creation_time,
        'whiteboard': args.whiteboard,
        'limit': 100,
        'offset': 0,
    }
    if args.bugs:
        query_terms['id'] = ','.join([str(id) for id in args.bugs])
    else:
        query_terms['creation_time'] = args.bug_creation_time

    while True:
        response = utils.get_remote_json(query, params=query_terms)
        if 'error' in response:
            logger.error('Bugzilla({}, {}): {}'.format(query, query_terms,
                                                       response))
            return

        if len(response['bugs']) == 0:
            break

        # update query terms for next iteration of the loop.
        query_terms['offset'] += query_terms['limit']

        for bug in response['bugs']:
            #https://bugzilla.mozilla.org/rest/bug/1559260/comment

            if args.bugs_after and bug['id'] <= args.bugs_after:
                continue

            if args.whiteboard not in bug['whiteboard']:
                # The query performs an all words not substring
                # query, so restrict to the substring.
                continue

            if args.bugs and bug['id'] not in args.bugs:
                continue

            query2 = BUGZILLA_URL + 'bug/%s' % bug['id']
            response2 = utils.get_remote_json(query2)
            if 'error' in response2:
                logger.error('Bugzilla({}): {}'.format(query2, response2))
                return

            bug_summary = response2['bugs'][0]['summary']
            munged_bug_summary = bugzilla_summary_munge_failure(bug_summary)

            query3 = BUGZILLA_URL + 'bug/%s/comment' % bug['id']
            response3 = utils.get_remote_json(query3)
            if 'error' in response3:
                logger.error('Bugzilla({}): {}'.format(query, response3))
                return

            raw_text = response3['bugs'][str(
                bug['id'])]['comments'][0]['raw_text']

            match = re_logview.search(raw_text)
            if match:
                # Get push associated with this failed job.
                job_id = int(match.group(1))
                repo = match.group(2)
                job = get_job_by_repo_job_id_json(
                    args, repo, job_id, update_cache=args.update_cache)
                push_id = job['push_id']
                push = get_push_json(args,
                                     repo,
                                     push_id,
                                     update_cache=args.update_cache)
                repository = get_repository_by_id(
                    push['revisions'][0]['repository_id'])
                revision = push['revisions'][0]['revision']
                revision_url = '%s/rev/%s' % (repository['url'], revision)

                new_args = copy.deepcopy(args)
                new_args.revision_url = revision_url
                (new_args.repo, _,
                 new_args.revision) = new_args.revision_url.split('/')[-3:]
                new_args.add_bugzilla_suggestions = True
                new_args.state = 'completed'
                new_args.result = 'success|testfailed'
                #new_args.job_type_name = '^test-'
                new_args.job_type_name = job['job_type_name']
                new_args.test_failure_pattern = TEST_FAILURE_PATTERN
                pushes_args.compile_filters(new_args)
                jobs_args.compile_filters(new_args)

                if revision_url not in data:
                    data[revision_url] = []

                mozharness_failure = match_bug_summary_to_mozharness_failure(
                    bug_summary, raw_text)

                test = None
                if mozharness_failure:
                    test = get_test(mozharness_failure)
                    pattern = convert_failure_to_pattern(mozharness_failure)
                if not test:
                    test = get_test(munged_bug_summary)
                    pattern = convert_failure_to_pattern(munged_bug_summary)
                if not test:
                    logger.warning('Unable to obtain test for '
                                   'bug {} {} failure {}'.format(
                                       bug['id'], bug_summary,
                                       mozharness_failure))

                bug_data = {
                    'bug_id':
                    bug['id'],
                    'bug_summary':
                    bug_summary,
                    'munged_bug_summary':
                    munged_bug_summary,
                    'job_type_name':
                    job['job_type_name'],
                    'test':
                    test,
                    'mozharness_failure':
                    mozharness_failure,
                    'job_id':
                    job_id,
                    'push_id':
                    push_id,
                    'repository':
                    repository['name'],
                    'revision_url':
                    revision_url,
                    'bugzilla_suggestions':
                    get_job_bugzilla_suggestions_json(
                        new_args,
                        new_args.repo,
                        job_id,
                        update_cache=args.update_cache),
                    'bug_job_map':
                    get_bug_job_map_json(new_args,
                                         new_args.repo,
                                         job_id,
                                         update_cache=args.update_cache),
                    'pattern':
                    pattern,
                }

                data[revision_url].append(bug_data)

                # Get failure counts for trunk for this bug for the two weeks following
                # the creation of the bug. Ignore failure counts for bugs who are less
                # than 2 weeks old.
                # TODO: Allow in place updating of bugzilla.json so that we can reprocess
                # the failure counts without having to query the full set of bugs.
                start_date = datetime.datetime.strptime(
                    bug['creation_time'].rstrip('Z'),
                    '%Y-%m-%dT%H:%M:%S') - datetime.timedelta(days=1)
                end_date = start_date + datetime.timedelta(days=15)
                failure_count_json = get_failure_count_json(
                    args, 'trunk', bug['id'], start_date, end_date)
                if now - start_date < datetime.timedelta(days=15):
                    failure_count = None
                else:
                    failure_count = 0
                    for failures in failure_count_json:
                        failure_count += failures['failure_count']
                bug_data['failure_count'] = failure_count

            elif args.whiteboard and False:  #Disable this as it is buggy.
                # This run has specified the test or is this is a bug
                # that is not a Treeherder filed bug. If it was marked
                # via the whiteboad then we are interested in the
                # pushes for this bug.  Since we can't really tell
                # which is which, we can include all of the pushes
                # since only those with test isolation jobs will
                # matter.  The problem is this bug does not
                # necessarily have a bug_summary referencing a test
                # failure...
                test = None  # We don't have a failure in this case.
                comments = response3['bugs'][str(bug['id'])]['comments']
                for comment in comments:
                    if not comment['raw_text'].startswith('Pushed by'):
                        continue
                    # Get the last revision in the comment as the head of the push.
                    revision_url = None
                    pushlog_url_match = re_pushlog_url.search(
                        comment['raw_text'])
                    while pushlog_url_match:
                        revision_url = pushlog_url_match.group(1)
                        pushlog_url_match = re_pushlog_url.search(
                            comment['raw_text'], pushlog_url_match.end(1))
                    if revision_url:
                        # revision_url from Bugzilla has the 12 character revision.
                        new_args = copy.deepcopy(args)
                        new_args.revision_url = revision_url
                        (new_args.repo, _, new_args.revision
                         ) = new_args.revision_url.split('/')[-3:]
                        new_args.add_bugzilla_suggestions = True
                        new_args.state = 'completed'
                        new_args.job_type_name = '^test-'
                        new_args.test_failure_pattern = TEST_FAILURE_PATTERN
                        pushes_args.compile_filters(new_args)
                        jobs_args.compile_filters(new_args)

                        pushes = get_pushes_jobs_json(
                            new_args,
                            new_args.repo,
                            update_cache=args.update_cache)
                        if len(pushes):
                            # Convert the revision url to 40 characters.
                            push = pushes[0]
                            repository = get_repository_by_id(
                                push['revisions'][0]['repository_id'])
                            revision = push['revisions'][0]['revision']
                            revision_url = '%s/rev/%s' % (repository['url'],
                                                          revision)
                            new_args.revision_url = revision_url
                            (new_args.repo, _, new_args.revision
                             ) = new_args.revision_url.split('/')[-3:]

                            if revision_url not in data:
                                data[revision_url] = []

                            push_id = push['id']
                            repository = get_repository_by_id(
                                push['revisions'][0]['repository_id'])
                            # Only the original job is of interest for collecting the bugzilla data.
                            # The others are the retriggers.
                            #  There shouldn't be a bug_job_map or bugzilla_suggestions for non-classified bugs.
                            job_id = push['jobs'][0]

                            bug_data = {
                                'bug_id': bug['id'],
                                'bug_summary': bug_summary,
                                'test': test,
                                'job_id': job_id,
                                'push_id': push_id,
                                'repository': repository['name'],
                                'revision_url': revision_url,
                                'bugzilla_suggestions': [],
                                'bug_job_map': [],
                                'pattern':
                                convert_failure_to_pattern(bug_summary),
                            }
                            data[revision_url].append(bug_data)

                            # Get failure counts for trunk for this bug for the two weeks following
                            # the creation of the bug. Ignore failure counts for bugs who are less
                            # than 2 weeks old. Use the previous day for the start date and 15 days
                            # to account for timezone issues.
                            # TODO: Allow in place updating of bugzilla.json so that we can reprocess
                            # the failure counts without having to query the full set of bugs.
                            start_date = datetime.datetime.strptime(
                                bug['creation_time'].rstrip('Z'),
                                '%Y-%m-%dT%H:%M:%S') - datetime.timedelta(
                                    days=1)
                            end_date = start_date + datetime.timedelta(days=15)
                            failure_count_json = get_failure_count_json(
                                args, 'trunk', bug['id'], start_date, end_date)
                            if now - start_date < datetime.timedelta(days=15):
                                failure_count = None
                            else:
                                failure_count = 0
                                for failures in failure_count_json:
                                    failure_count += failures['failure_count']
                            bug_data['failure_count'] = failure_count

    cache.save(cache_attributes, 'bugzilla.json', json.dumps(data, indent=2))

    return data
예제 #24
0
def get_pushes_jobs_job_details_json(args, repo, update_cache=False):
    """get_pushes_jobs_job_details_json

    Retrieve nested pushes, jobs, job details matching args set via
    push_args parser and job_args parser.

    """
    if hasattr(args, 'update_cache'):
        update_cache = args.update_cache

    cache_attributes = ['treeherder', repo, 'job_details']

    pushes = get_pushes_jobs_json(args, repo, update_cache=update_cache)

    for push in pushes:
        for job in push['jobs']:
            # job['job_guid'] contains a slash followed by the run number.
            # Convert this into a value which can be used a file name
            # by replacing / with _.
            job_guid_path = job['job_guid'].replace('/', '_')
            job_details_data = cache.load(cache_attributes, job_guid_path)
            if job_details_data and not update_cache:
                job['job_details'] = json.loads(job_details_data)
            else:
                job['job_details'] = []
                # We can get all of the job details from CLIENT.get_job_details while
                # get_job_log_url only gives us live_backing.log and live.log.
                job['job_details'] = retry_client_request(
                    CLIENT.get_job_details, 3, job_guid=job['job_guid'])
                if job['job_details'] is None:
                    logger.warning("Unable to get job_details for job_guid %s",
                                   job['job_guid'])
                    continue
                cache.save(cache_attributes, job_guid_path,
                           json.dumps(job['job_details'], indent=2))

            if hasattr(args, 'add_resource_usage') and args.add_resource_usage:
                for attempt in range(3):
                    try:
                        for job_detail in job['job_details']:
                            if job_detail['value'] == 'resource-usage.json':
                                resource_usage_name = job_guid_path + '-' + job_detail[
                                    'value']
                                job_detail_resource_usage_data = cache.load(
                                    cache_attributes, resource_usage_name)
                                if job_detail_resource_usage_data and not update_cache:
                                    job['resource_usage'] = json.loads(
                                        job_detail_resource_usage_data)
                                    job_detail_resource_usage_data = None
                                else:
                                    job['resource_usage'] = utils.get_remote_json(
                                        job_detail['url'])
                                    cache.save(
                                        cache_attributes, resource_usage_name,
                                        json.dumps(job['resource_usage'],
                                                   indent=2))
                                break
                        break
                    except requests.HTTPError as e:
                        if '503 Server Error' not in str(e):
                            raise
                        logger.exception(
                            'get_job_details resource %s attempt %s', attempt)
                    except requests.ConnectionError:
                        logger.exception(
                            'get_job_details resource %s attempt %s', attempt)
                    if attempt != 2:
                        time.sleep(30)
                if attempt == 2:
                    logger.warning("Unable to get job_details for job_guid %s",
                                   job['job_guid'])
                    continue
    return pushes
예제 #25
0
def buildMarkdown():
    log.log('> Beginning to build the matchticker...')
    startTime = time()

    settings = getSettings()

    if settings['dev_mode'] == True:
        log.log('\t...done! (using a cached copy)')
        return cache.read('matchticker.txt')

    if 'api_key' not in settings or 'gosugamers' not in settings['api_key']:
        log.error('No GosuGamers API key -- cannot build matchticker.')
        return ''

    # Get the stream information
    try:
        api_url = ''
        req = requests.get(api_url % settings['api_key']['gosugamers'])
    except requests.exceptions.RequestException as e:
        elapsedTime = '\BLUE(%s s)' % str(round(time() - startTime, 3))
        log.error('From GosuGamers API: %s %s' % (str(e), elapsedTime), 1)
        return ''
    if req.status_code == 403 or not req.ok or 'IP Address Not Allowed' in str(
            req.content):
        elapsedTime = '\BLUE(%s s)' % str(round(time() - startTime, 3))
        log.error('GosuGamers rejected our IP ' + elapsedTime, 1)
        return blankTicker(startTime)
    try:
        upcomingMatches = req.json()['matches']
    except Exception as e:
        elapsedTime = '\BLUE(%s s)' % str(round(time() - startTime, 3))
        log.error(
            'Issue with GosuGamers API JSON: %s %s' % (str(e), elapsedTime), 1)
        return ''

    # Matches to display
    matches = []
    gamesToGrab = 0

    if len(upcomingMatches) == 0:
        return blankTicker(startTime)

    if len(upcomingMatches) < settings['sidebar']['matchticker']['max_shown']:
        gamesToGrab = len(upcomingMatches)
    else:
        gamesToGrab = settings['sidebar']['matchticker']['max_shown']
    for i in range(0, gamesToGrab):
        matches.append({
            'tourny':
            prepareTournyTitle(upcomingMatches[i]['tournament']['name']),
            'team1': {
                'name':
                str(upcomingMatches[i]['firstOpponent']['shortName']),
                'cc':
                str(upcomingMatches[i]['firstOpponent']['country']
                    ['countryCode']).lower()
            },
            'team2': {
                'name':
                str(upcomingMatches[i]['secondOpponent']['shortName']),
                'cc':
                str(upcomingMatches[i]['secondOpponent']['country']
                    ['countryCode']).lower()
            },
            'time':
            getMatchTime(upcomingMatches[i]['datetime']),
            'url':
            upcomingMatches[i]['pageUrl'],
            'is_live':
            bool(upcomingMatches[i]["isLive"])
        })
    # Build the markdown
    matchtickerMd = ''
    matchMdTemplate = ('>>>\n'
                       '[~~__TOURNY__~~\n'
                       '~~__TIME__~~\n'
                       '~~__TEAM1__~~\n'
                       '~~__TEAM2__~~](__URL__#info)\n'
                       '[ ](#lang-__LANG1__)\n'
                       '[ ](#lang-__LANG2__)\n\n'
                       '>>[](#separator)\n\n')
    matchtickerMd = '[*Match Ticker*](#heading)\n\n'
    i = 0
    for match in matches:
        matchMd = matchMdTemplate
        matchMd = (matchMd.replace('__TOURNY__', match['tourny']).replace(
            '__TIME__',
            match['time']).replace('__TEAM1__',
                                   match['team1']['name']).replace(
                                       '__TEAM2__',
                                       match['team2']['name']).replace(
                                           '__LANG1__',
                                           match['team1']['cc']).replace(
                                               '__LANG2__',
                                               match['team2']['cc']).replace(
                                                   '__URL__', match['url']))
        matchtickerMd += matchMd
        i += 1
    matchtickerMd += '>>**[See all](http://bit.ly/1xGEuiJ#button#slim)**'

    cache.save('matchticker.txt', matchtickerMd)

    characters = '\YELLOW(%d characters)' % len(matchtickerMd)
    elapsedTime = '\BLUE(%s s)' % str(round(time() - startTime, 3))
    log.log('\t\GREEN...done! %s %s \n' % (characters, elapsedTime))

    return matchtickerMd
예제 #26
0
            settings['sidebar']['livestreams']['spritesheet_name'],
            livestreams['spritesheet_path'])
    except praw.exceptions.APIException as e:
        print(e)
    log.log('\t\GREEN...done! \BLUE(%s s)\n' %
            str(round(time() - startTime, 3)))

# Get the PRAW subreddit object
subreddit = r.subreddit(settings['subreddit'])

# Upload the new sidebar markdown if it's any different
if cache.read('sidebar_markdown.txt') != sidebar:
    startTime = time()
    log.log('> Uploading sidebar markdown...')
    subreddit.mod.update(description=sidebar)
    cache.save('sidebar_markdown.txt', sidebar)
    log.log('\t\GREEN...done! \BLUE(%s s)\n' %
            str(round(time() - startTime, 3)))
else:
    log.log('Not uploading sidebar -- it hasn\'t changed!')

# Upload the new stylesheet
# (ALWAYS! Any image changes rely on this being uploaded)
if stylesheet != None:
    startTime = time()
    log.log('> Uploading stylesheet...')
    subreddit.stylesheet.update(stylesheet=stylesheet)
    cache.save('stylesheet.txt', stylesheet)
    log.log('\t\GREEN...done! \BLUE(%s s) \n\n' %
            str(round(time() - startTime, 3)))
def summarize_isolation_pushes_jobs_json(args):

    pushes = []

    test_isolation_bugzilla_data = get_test_isolation_bugzilla_data(args)
    for revision_url in test_isolation_bugzilla_data:
        revision_data = test_isolation_bugzilla_data[revision_url]
        new_args = copy.deepcopy(args)
        new_args.revision_url = revision_url
        (new_args.repo, _,
         new_args.revision) = new_args.revision_url.split('/')[-3:]
        new_args.add_bugzilla_suggestions = True
        new_args.state = 'completed'
        new_args.result = 'success|testfailed'
        new_args.job_type_name = '^test-'
        new_args.test_failure_pattern = TEST_FAILURE_PATTERN
        jobs_args.compile_filters(new_args)

        # Load the pushes/jobs data from cache if it exists.
        cache_attributes = ['test-isolation', new_args.repo]
        pushes_jobs_data = cache.load(cache_attributes, new_args.revision)
        if pushes_jobs_data and not args.update_cache:
            new_pushes = json.loads(pushes_jobs_data)
        else:
            new_pushes = get_pushes_jobs_json(new_args,
                                              new_args.repo,
                                              update_cache=args.update_cache)
            cache.save(cache_attributes, new_args.revision,
                       json.dumps(new_pushes, indent=2))

        pushes.extend(new_pushes)

        for revision_bug_data in revision_data:
            if args.bugs and revision_bug_data['bug_id'] not in args.bugs:
                # Skip if we requested a specific bug and this is not it.
                continue
            if args.bugs and args.override_bug_summary:
                revision_bug_data[
                    'bug_summary'] = bugzilla_summary_munge_failure(
                        args.override_bug_summary)

    pushes_jobs_data = None
    data = convert_pushes_to_test_isolation_bugzilla_data(args, pushes)

    #logger.info('convert_pushes_to_test_isolation_bugzilla_data\n{}'.format(
    #    json.dumps(data, indent=2)))

    summary = {}

    for revision_url in data:

        (repo, _, revision) = revision_url.split('/')[-3:]

        if revision_url not in summary:
            summary[revision_url] = {}
        summary_revision = summary[revision_url]

        job_type_names = sorted(data[revision_url].keys())

        for job_type_name in job_type_names:
            if job_type_name not in summary_revision:
                summary_revision[job_type_name] = dict(
                    notes=[],
                    isolation_job=
                    "{}/#/jobs?repo={}&tier=1%2C2%2C3&revision={}&searchStr={}"
                    .format(args.treeherder_url, repo, revision,
                            job_type_name),
                )
            summary_revision_job_type = summary_revision[job_type_name]

            job_type = data[revision_url][job_type_name]

            if 'bugzilla_data' not in summary_revision_job_type:
                summary_revision_job_type['bugzilla_data'] = copy.deepcopy(
                    test_isolation_bugzilla_data[revision_url])
                for bug_data in summary_revision_job_type['bugzilla_data']:
                    # bug_data['failure_reproduced'][section_name] counts the
                    # number of times the original bug_summary failure
                    # was seen in that section of jobs.
                    bug_data['failure_reproduced'] = dict(
                        original=0,
                        repeated=0,
                        id=0,
                        it=0,
                    )
                    # bug_data['test_reproduced'][section_name] counts the
                    # number of times the original bug_summary test
                    # was seen in that section of jobs.
                    bug_data['test_reproduced'] = dict(
                        original=0,
                        repeated=0,
                        id=0,
                        it=0,
                    )

            for section_name in (ORIGINAL_SECTIONS + ISOLATION_SECTIONS):
                if section_name not in summary_revision_job_type:
                    summary_revision_job_type[section_name] = dict(
                        failures={},
                        tests={},
                        failure_reproduced=0,
                        test_reproduced=0,
                    )
                    if section_name == 'original':
                        summary_revision_job_type[section_name][
                            'bug_job_map'] = []

                summary_revision_job_type_section = summary_revision_job_type[
                    section_name]

                job_type_section = job_type[section_name]

                run_time = 0
                jobs_testfailed_count = 0
                bugzilla_suggestions_count = 0

                for job in job_type_section:
                    if section_name == 'original':
                        summary_revision_job_type_section[
                            'bug_job_map'].extend(job['bug_job_map'])
                    run_time += job['end_timestamp'] - job['start_timestamp']
                    jobs_testfailed_count += 1 if job[
                        'result'] == 'testfailed' else 0
                    bugzilla_suggestions_count += len(
                        job['bugzilla_suggestions'])

                    for bugzilla_suggestion in job['bugzilla_suggestions']:

                        #failure = bugzilla_summary_munge_failure(bugzilla_suggestion['search'])
                        failure = bugzilla_suggestion['search']
                        if failure not in summary_revision_job_type_section[
                                'failures']:
                            summary_revision_job_type_section['failures'][
                                failure] = dict(
                                    count=0,
                                    failure_reproduced=0,
                                )

                        summary_revision_job_type_section['failures'][failure][
                            'count'] += 1
                        for bug_data in summary_revision_job_type[
                                'bugzilla_data']:
                            if args.bugs and args.override_bug_summary:
                                #pattern = convert_failure_to_pattern(bugzilla_summary_munge_failure(args.override_bug_summary))
                                pattern = convert_failure_to_pattern(
                                    args.override_bug_summary)
                            else:
                                pattern = bug_data['pattern']
                            if re.compile(pattern).search(failure):
                                bug_data['failure_reproduced'][
                                    section_name] += 1
                                summary_revision_job_type_section['failures'][
                                    failure]['failure_reproduced'] += 1
                                summary_revision_job_type_section[
                                    'failure_reproduced'] += 1

                            test = get_test(failure)
                            if test:
                                if test not in summary_revision_job_type_section[
                                        'tests']:
                                    summary_revision_job_type_section['tests'][
                                        test] = dict(
                                            count=0,
                                            test_reproduced=0,
                                        )

                                summary_revision_job_type_section['tests'][
                                    test]['count'] += 1
                                if args.bugs and args.override_bug_summary:
                                    bug_data_test = get_test(
                                        args.override_bug_summary)
                                else:
                                    bug_data_test = bug_data['test']
                                if bug_data_test and test in bug_data_test:
                                    bug_data['test_reproduced'][
                                        section_name] += 1
                                    summary_revision_job_type_section['tests'][
                                        test]['test_reproduced'] += 1
                                    summary_revision_job_type_section[
                                        'test_reproduced'] += 1

                summary_revision_job_type_section['run_time'] = run_time
                summary_revision_job_type_section[
                    'jobs_testfailed'] = jobs_testfailed_count
                summary_revision_job_type_section['jobs_total'] = len(
                    job_type_section)
                summary_revision_job_type_section[
                    'bugzilla_suggestions_count'] = bugzilla_suggestions_count

    return summary
예제 #28
0
 def on_exit(self, _, __):
     save(self.cache)
     quit()
    def __init__(self):
        self.locations = cache.load('newReversedGroupedLocations')

        countries = []
        for key in self.locations.keys():
            countries.append(key.split(',')[-1].rstrip())
        countries = list(set(countries))

        translation = {
            'Slovaka': 'Slovakia',
            'Trinidad and Tobao': 'Trinidad and Tobago',
            'Luxemboug': 'Luxembourg',
            'Icelad': 'Iceland',
            'Cua': 'Cuba',
            'Brazl': 'Brazil',
            'Belgim': 'Belgium',
            'Portugl': 'Portugal',
            'Pakistn': 'Pakistan',
            'Moroco': 'Morroco',
            'Swedn': 'Sweden',
            'Costa Ria': 'Costa Rica',
            'Ecuadr': 'Eduador',
            'Canaa': 'Canada',
            'Greee': 'Greece',
            #' K' : 'UK',
            'Austra': 'Austria',
            'Australa': 'Australia',
            'Czechna': 'Czechnia',
            'Iceld': 'Iceland',
            'Peu': 'Peru',
            'Itay': 'Italy',
            'The Bahams': 'The Bahamas',
            'Netherlans': 'Netherlands',
            'Span': 'Spain',
            'Denmak': 'Denmark',
            'Hong Kog': 'Hong Kong',
            'Isral': 'Israel',
            'Lithuana': 'Lithuania',
            'Germay': 'Germany',
            'Norwy': 'Norway',
            'Jamaia': 'Jamaica',
            'Polad': 'Poland',
            'Nicaraga': 'Nicaragra',
            'Frane': 'France',
            'Serba': 'Serbia',
            'UA': 'USA',
            'Hungay': 'Hungry',
            'Switzerlad': 'Switzerland',
            'Austriala': 'Australia',
            'SSolomon Islans': 'Solomon Islands',
            'Boliva': 'Bolivia'
        }

        new_dict = {}
        for key in self.locations.keys():
            oldCountry = key[key.rfind(',') + 2:]
            newCountry = oldCountry
            if newCountry == 'K':
                newCountry = 'UK'
            for country_key in translation.keys():
                newCountry = newCountry.replace(
                    country_key, translation[country_key]).rstrip()

            newKey = key[:key.rfind(',') + 2] + newCountry
            new_dict[newKey] = self.locations[key]

        cache.save(new_dict, 'newReversedGroupedLocations')
예제 #30
0
    def update(self):
        buf = []
        # 创建 空list
        for i in range(32):
            buf.append(0)
        if True:
            self.is_online = True
            try:
                # 读取数据
                rsp_ = self.client.execute(self.address,
                                           cst.READ_HOLDING_REGISTERS, 150, 16)
                # 将读取的tuple 转换为 list 每元素2bytes
                temp_list = list(tuple(rsp_))
                # 拆解2 bytes的list为1 byte的list
                for i in range(8):
                    buf[i * 4 + 1] = temp_list[i * 2 + 1].to_bytes(
                        2, 'little')[0]
                    buf[i * 4 + 0] = temp_list[i * 2 + 1].to_bytes(
                        2, 'little')[1]
                    buf[i * 4 + 3] = temp_list[i * 2].to_bytes(2, 'little')[0]
                    buf[i * 4 + 2] = temp_list[i * 2].to_bytes(2, 'little')[1]
                # 将byte list转换为bytes
                temp_bytes = bytes(buf)
                # bytes 转换为 flaot

                self.Flow[0] = struct.unpack_from('>f', temp_bytes, 0)[0]
                self.sumFlow[0] = struct.unpack_from('>f', temp_bytes, 4)[0]
                self.CA[0] = struct.unpack_from('>f', temp_bytes, 8)[0]
                self.sumCA[0] = struct.unpack_from('>f', temp_bytes, 12)[0]

                self.Flow[1] = struct.unpack_from('>f', temp_bytes, 16)[0]
                self.sumFlow[1] = struct.unpack_from('>f', temp_bytes, 20)[0]
                self.CA[1] = struct.unpack_from('>f', temp_bytes, 24)[0]
                self.sumCA[1] = struct.unpack_from('>f', temp_bytes, 28)[0]
                logging.info("Flow" + str(self.Flow))
                logging.info("sumFlow" + str(self.sumFlow))
                logging.info("CA" + str(self.CA))
                logging.info("sumCA" + str(self.sumCA))

                cache.save("cat:%s:Flow0" % self.name, self.Flow[0], None)
                cache.save("cat:%s:Flow1" % self.name, self.Flow[1], None)
                cache.save("cat:%s:sumFlow0" % self.name, self.sumFlow[0],
                           None)
                cache.save("cat:%s:sumFlow1" % self.name, self.sumFlow[1],
                           None)
                cache.save("cat:%s:CA0" % self.name, self.CA[0], None)
                cache.save("cat:%s:CA1" % self.name, self.CA[1], None)
                cache.save("cat:%s:sumCA0" % self.name, self.sumCA[0], None)
                cache.save("cat:%s:sumCA1" % self.name, self.sumCA[1], None)
                if self.Flow[0] > 0:
                    db.write_db(self.name, self.sumFlow[0], self.sumCA[0])
            except:
                logging.error('dev : %s read reg error' % self.name)
        t = Timer(self.update_period, self.update)
        t.start()