def __init__(self, *args, **kwargs): # maybe the answer for nexdef for basic subscribers self.use_wired_web = kwargs.get('use_wired_web') ymd_tuple = kwargs.get('ymd_tuple') time_shift = kwargs.get('time_shift') self.international = kwargs.get('international') self.cfg = kwargs.get('cfg') # Default to today if not ymd_tuple: now = datetime.datetime.now() dif = datetime.timedelta(1) # Now, we want the day to go until, say, 9 am the next # morning. This needs to be worked out, still... if now.hour < 9: now = now - dif ymd_tuple = (now.year, now.month, now.day) self.year = ymd_tuple[0] self.month = ymd_tuple[1] self.day = ymd_tuple[2] self.shift = time_shift self.http = MLBHttp(accept_gzip=True) self.grid = "http://gdx.mlb.com/components/game/mlb/year_"\ + padstr(self.year)\ + "/month_" + padstr(self.month)\ + "/day_" + padstr(self.day) + "/grid.xml" self.multiangle = "http://gdx.mlb.com/components/game/mlb/year_"\ + padstr(self.year)\ + "/month_" + padstr(self.month)\ + "/day_" + padstr(self.day) + "/multi_angle_epg.xml" self.log = MLBLog(LOGFILE) self.data = [] self.error_str = "Something went wrong. A more descriptive error should be here."
def __init__(self, gameid): self.gameid = gameid self.gameid = self.gameid.replace('/', '_') self.gameid = self.gameid.replace('-', '_') (year, month, day) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.error_str = "Could not retrieve master_scoreboard.xml file" self.http = MLBHttp(accept_gzip=True)
def __init__(self): self.data = [] self.last_update = "" self.xml = "" self.date = datetime.datetime.now() self.url = 'https://erikberg.com/mlb/standings.xml' self.jUrl = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?&sit_code=%27h0%27&league_id=103&league_id=104&all_star_sw=%27N%27&version=2' self.http = MLBHttp(accept_gzip=True)
def __init__(self, gameid): self.gameid = gameid self.gameid = self.gameid.replace('/', '_') self.gameid = self.gameid.replace('-', '_') (year, month, day) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/boxscore.xml' % ( league, year, month, day, self.gameid) self.boxscore = None self.http = MLBHttp(accept_gzip=True)
def __init__(self, myscr, mycfg): self.myscr = myscr self.mycfg = mycfg self.statuswin = curses.newwin(1, curses.COLS - 1, curses.LINES - 1, 0) self.titlewin = curses.newwin(2, curses.COLS - 1, 0, 0) self.rssUrl = 'http://mlb.mlb.com/partnerxml/gen/news/rss/mlb.xml' self.milbRssUrl = 'http://www.milb.com/partnerxml/gen/news/rss/milb.xml' self.data = [] self.records = [] self.current_cursor = 0 self.record_cursor = 0 self.game_cursor = 0 self.htmlParser = HTMLParser() self.http = MLBHttp(accept_gzip=True)
def __init__(self, cfg=None): self.data = [] self.mycfg = cfg self.last_update = "" self.date = datetime.datetime.now() self.season = self.date.year self.http = MLBHttp(accept_gzip=True) if self.mycfg is None: self.type = 'pitching' self.sort = 'era' self.league = 'MLB' self.sort_order = 'default' self.team = 0 self.season = self.date.year self.player_pool = 'QUALIFIER'
def __init__(self,gameid): self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.error_str = "Could not retrieve master_scoreboard.xml file" self.http = MLBHttp(accept_gzip=True)
def __init__(self,gameid): self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/boxscore.xml' % ( league, year, month, day, self.gameid ) self.boxscore = None self.http = MLBHttp(accept_gzip=True)
def __init__(self,myscr,mycfg): self.myscr = myscr self.mycfg = mycfg self.statuswin = curses.newwin(1,curses.COLS-1,curses.LINES-1,0) self.titlewin = curses.newwin(2,curses.COLS-1,0,0) self.rssUrl = 'http://mlb.mlb.com/partnerxml/gen/news/rss/mlb.xml' self.milbRssUrl = 'http://www.milb.com/partnerxml/gen/news/rss/milb.xml' self.data = [] self.records = [] self.current_cursor = 0 self.record_cursor = 0 self.game_cursor = 0 self.htmlParser = HTMLParser() self.http = MLBHttp(accept_gzip=True)
def __init__(self,cfg=None): self.data = [] self.mycfg = cfg self.last_update = "" self.date = datetime.datetime.now() self.season = self.date.year self.http = MLBHttp(accept_gzip=True) if self.mycfg is None: self.type = 'pitching' self.sort = 'era' self.league = 'MLB' self.sort_order = 'default' self.team = 0 self.season = self.date.year self.player_pool = 'QUALIFIER'
class MLBStats: def __init__(self, cfg=None): self.data = [] self.mycfg = cfg self.last_update = "" self.date = datetime.datetime.now() self.season = self.date.year self.http = MLBHttp(accept_gzip=True) if self.mycfg is None: self.type = 'pitching' self.sort = 'era' self.league = 'MLB' self.sort_order = 'default' self.team = 0 self.season = self.date.year self.player_pool = 'QUALIFIER' def getBirthdate(self, player_id): bUrl = 'http://mlb.mlb.com/lookup/json/named.player_info.bam?sport_code=%27mlb%27&player_id=' + str( player_id) try: rsp = self.http.getUrl(bUrl) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve statistics" raise MLBUrlError, bUrl try: tmp = json.loads(rsp) except Exception, error: raise MLBUrlError, bUrl bdate_str = tmp['player_info']['queryResults']['row']['birth_date'] ddate_str = tmp['player_info']['queryResults']['row']['death_date'] out = [] ts = time.strptime(bdate_str, '%Y-%m-%dT00:00:00') out.append((ts.tm_year, ts.tm_mon, ts.tm_mday)) if ddate_str != "": ts = time.strptime(ddate_str, '%Y-%m-%dT00:00:00') out.append((ts.tm_year, ts.tm_mon, ts.tm_mday)) else: out.append(None) return out
class MLBStats: def __init__(self,cfg=None): self.data = [] self.mycfg = cfg self.last_update = "" self.date = datetime.datetime.now() self.season = self.date.year self.http = MLBHttp(accept_gzip=True) if self.mycfg is None: self.type = 'pitching' self.sort = 'era' self.league = 'MLB' self.sort_order = 'default' self.team = 0 self.season = self.date.year self.player_pool = 'QUALIFIER' def getBirthdate(self,player_id): bUrl = 'http://mlb.mlb.com/lookup/json/named.player_info.bam?sport_code=%27mlb%27&player_id=' + str(player_id) try: rsp = self.http.getUrl(bUrl) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve statistics" raise MLBUrlError,bUrl try: tmp = json.loads(rsp) except Exception,error: raise MLBUrlError,bUrl bdate_str=tmp['player_info']['queryResults']['row']['birth_date'] ddate_str=tmp['player_info']['queryResults']['row']['death_date'] out = [] ts=time.strptime(bdate_str,'%Y-%m-%dT00:00:00') out.append((ts.tm_year,ts.tm_mon,ts.tm_mday)) if ddate_str != "": ts=time.strptime(ddate_str,'%Y-%m-%dT00:00:00') out.append((ts.tm_year,ts.tm_mon,ts.tm_mday)) else: out.append(None) return out
class MLBBoxScore: def __init__(self,gameid): self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/boxscore.xml' % ( league, year, month, day, self.gameid ) self.boxscore = None self.http = MLBHttp(accept_gzip=True) def getBoxData(self,gameid): self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/boxscore.xml' % ( league, year, month, day, self.gameid ) self.boxscore = None try: rsp = self.http.getUrl(self.boxUrl) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve box score." raise MLBUrlError try: xp = parseString(rsp) except: raise # if we got this far, initialize the data structure self.boxscore = dict() self.boxscore['game'] = self.parseGameData(xp) self.boxscore['batting'] = self.parseBattingData(xp) self.boxscore['pitching'] = self.parsePitchingData(xp) self.boxscore['game_info'] = self.parseGameInfo(xp) return self.boxscore def parseGameData(self,xp): out = dict() for node in xp.getElementsByTagName('boxscore'): for attr in node.attributes.keys(): out[attr] = node.getAttribute(attr) return out def parseBattingData(self,xp): out = dict() for node in xp.getElementsByTagName('batting'): team=node.getAttribute('team_flag') out[team] = dict() for attr in node.attributes.keys(): out[team][attr] = node.getAttribute(attr) out[team]['batters'] = dict() for b in node.getElementsByTagName('batter'): b_id = b.getAttribute('id') out[team]['batters'][b_id] = dict() for a in b.attributes.keys(): out[team]['batters'][b_id][a] = b.getAttribute(a) # <note> tag contains substitution notes out[team]['batting-note'] = [] for span in node.getElementsByTagName('note'): # encapsulate span data in foo tag and then parse it as # well-behaved XML new='<foo>'+span.childNodes[0].data+'</foo>' tmp=parseString(new) for text in tmp.getElementsByTagName('span'): # wait! really? span inside span??? out[team]['batting-note'].append(text.childNodes[0].data) # text_data is used for BATTING / FIELDING notes out[team]['batting-data'] = [] # deal with culturing the messy blob later for blob in node.getElementsByTagName('text_data'): out[team]['batting-data'].append(blob) # good enough for here - do more parsing elsewhere return out def parsePitchingData(self,xp): out = dict() for node in xp.getElementsByTagName('pitching'): team=node.getAttribute('team_flag') out[team] = dict() for attr in node.attributes.keys(): out[team][attr] = node.getAttribute(attr) out[team]['pitchers'] = dict() out[team]['pitchers']['pitching-order'] = list() for p in node.getElementsByTagName('pitcher'): p_id = p.getAttribute('id') out[team]['pitchers']['pitching-order'].append(p_id) out[team]['pitchers'][p_id] = dict() for a in p.attributes.keys(): out[team]['pitchers'][p_id][a] = p.getAttribute(a) # <note> tag contains substitution notes out[team]['pitching-note'] = [] for span in node.getElementsByTagName('note'): tmp=parseString(span.childNodes[0].data) for text in tmp.getElementsByTagName('span'): out[team]['pitching-note'].append(text.childNodes[0].data) # text_data is used for additional notes out[team]['pitching-data'] = [] for blob in node.getElementsByTagName('text_data'): out[team]['pitching-data'].append(blob) # good enough for here - do more parsing elsewhere return out # probably don't need this anymore since line score is another class def parseLineScore(self,xp): out = dict() for node in xp.getElementsByTagName('linescore'): out['totals'] = dict() for attr in node.attributes.keys(): out['totals'][attr] = node.getAttribute(attr) out['innings'] = dict() for iptr in node.getElementsByTagName('inning_line_score'): inning = iptr.getAttribute('inning') out['innings'][inning] = dict() for team in ( 'home', 'away' ): out['innings'][inning][team] = iptr.getAttribute(team) return out def parseGameInfo(self,xp): for node in xp.getElementsByTagName('game_info'): # there should only be one return node def parseDataBlob(self,blob): data='<data>'+blob.childNodes[0].nodeValue + '</data>' dptr=parseString(data) out=[] tmp_str='' #print "dptr.childNodes[0].childNodes:" #print dptr.childNodes[0].childNodes for elem in dptr.childNodes[0].childNodes: self.blobNode(elem) def blobNode(self,node): if node.nodeName == 'b': print node.childNodes[0].nodeValue elif node.nodeName == 'span': for child in node.childNodes: self.blobNode(child) elif node.nodeType == node.TEXT_NODE: self.blobTextNode(node) elif node.nodeName == 'br': pass def blobTextNode(self,node): if not node.nodeValue.isspace(): print node.nodeValue
def __init__(self): self.games = [] self.calendar = [] self.http = MLBHttp(accept_gzip=True)
class MLBMasterScoreboard: def __init__(self, gameid): self.gameid = gameid self.gameid = self.gameid.replace('/', '_') self.gameid = self.gameid.replace('-', '_') (year, month, day) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.error_str = "Could not retrieve master_scoreboard.xml file" self.http = MLBHttp(accept_gzip=True) def getScoreboardData(self, gameid): self.scoreboard = [] self.gameid = gameid self.gameid = self.gameid.replace('/', '_') self.gameid = self.gameid.replace('-', '_') (year, month, day) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.sbUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/master_scoreboard.xml' % ( league, year, month, day) try: rsp = self.http.getUrl(self.sbUrl) except urllib2.URLError: self.error_str = "Could not retrieve master_scoreboard.xml file" raise MLBUrlError, self.error_str try: xp = parseString(rsp) except: self.error_str = "Could not parse master_scoreboard.xml file" raise MLBXmlError, self.error_str # if we got this far, initialize the data structure for game in xp.getElementsByTagName('game'): tmp = dict() gid = game.getAttribute('id') tmp[gid] = dict() tmp[gid] = self.parseGameData(game) try: for media in game.getElementsByTagName('media'): type = media.getAttribute('type') if type == "game": free = media.getAttribute('free') tmp[gid]['free'] = (False, True)[free == "ALL"] if not tmp[gid].has_key('free'): tmp[gid]['free'] = False except: tmp[gid]['free'] = False try: tmp[gid]['totals'] = self.parseLineScore(game) except: tmp['totals'] = None status = tmp[gid]['status'] if status in ('Final', 'Game Over', 'Completed Early'): tmp[gid]['pitchers'] = self.parseWinLossPitchers(game) elif status in ('In Progress', 'Delayed', 'Suspended', 'Manager Challenge', 'Replay'): tmp[gid]['pitchers'] = self.parseCurrentPitchers(game) else: tmp[gid]['pitchers'] = self.parseProbablePitchers(game) if tmp[gid]['status'] in ('In Progress', 'Delayed', 'Suspended', 'Replay', 'Manager Challenge', 'Completed Early', 'Game Over', 'Final'): tmp[gid]['hr'] = dict() tmp[gid]['hr'] = self.parseHrData(game) if tmp[gid]['status'] in ('In Progress', 'Delayed', 'Replay', 'Manager Challenge', 'Suspended'): tmp[gid]['in_game'] = dict() tmp[gid]['in_game'] = self.parseInGameData(game) self.scoreboard.append(tmp) return self.scoreboard def parseInGameData(self, game): out = dict() for tag in ('pbp', 'batter', 'pitcher', 'opposing_pitcher', 'ondeck', 'inhole', 'runners_on_base'): out[tag] = dict() for node in game.getElementsByTagName(tag): for attr in node.attributes.keys(): out[tag][attr] = node.getAttribute(attr) return out def parseHrData(self, game): out = dict() # codes are not the same in this file so translate teamcodes = dict() (home_code, away_code) = (game.getAttribute('home_code'), game.getAttribute('away_code')) (home_fcode, away_fcode) = (game.getAttribute('home_file_code'), game.getAttribute('away_file_code')) teamcodes[home_code] = home_fcode teamcodes[away_code] = away_fcode for node in game.getElementsByTagName('home_runs'): for player in node.getElementsByTagName('player'): # mlb.com lists each homerun separately so track game and # season totals tmp = dict() for attr in player.attributes.keys(): tmp[attr] = player.getAttribute(attr) # if we already have the player, this is more than one hr # this game team = teamcodes[tmp['team_code']].upper() if not out.has_key(team): out[team] = dict() if out[team].has_key(tmp['id']): game_hr += 1 else: game_hr = 1 out[team][tmp['id']] = dict() out[team][tmp['id']][game_hr] = (tmp['id'], tmp['name_display_roster'], teamcodes[tmp['team_code']], game_hr, tmp['std_hr'], tmp['inning'], tmp['runners']) return out def parseGameData(self, node): out = dict() for attr in node.attributes.keys(): out[attr] = node.getAttribute(attr) for sptr in node.getElementsByTagName('status'): for attr in sptr.attributes.keys(): out[attr] = sptr.getAttribute(attr) return out def parseLineScore(self, xp): out = dict() for tag in ('r', 'h', 'e'): out[tag] = dict() for team in ('away', 'home'): out[tag][team] = dict() for tptr in xp.getElementsByTagName(tag): out[tag][team] = tptr.getAttribute(team) return out def parseWinLossPitchers(self, xp): out = dict() for pitcher in ('winning_pitcher', 'losing_pitcher', 'save_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) if pitcher == 'save_pitcher': out[pitcher] = (tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era'], tmp['saves']) else: out[pitcher] = (tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era']) return out def parseProbablePitchers(self, xp): out = dict() for pitcher in ('home_probable_pitcher', 'away_probable_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) out[pitcher] = (tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era']) return out def parseCurrentPitchers(self, xp): out = dict() for pitcher in ('pitcher', 'opposing_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) out[pitcher] = (tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era']) for b in xp.getElementsByTagName('batter'): tmp = dict() for attr in b.attributes.keys(): tmp[attr] = b.getAttribute(attr) out['batter'] = (tmp['id'], tmp['name_display_roster'], tmp['avg']) return out
class MLBCalendar: def __init__(self): self.games = [] self.calendar = [] self.http = MLBHttp(accept_gzip=True) def getData(self, teamid, year=None, month=None): self.teamid = teamid self.url = 'http://mlb.com/gen/schedule/' self.url += STATS_TEAMS[self.teamid] + '/' if year is not None and month is not None: self.year = year self.month = month else: self.now = datetime.datetime.now() self.year = self.now.year self.month = self.now.month self.url += "%s_%s.json" % (self.year, self.month) try: rsp = self.http.getUrl(self.url) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve calendar." raise MLBUrlError, self.url try: jp = json.loads(rsp) except: self.error_str = "JsonError: Could not parse calendar." raise MLBJsonError # if we got this far, initialize the data structure self.collectCalendar(jp) return self.games def collectCalendar(self, jp): self.games = [] for game in jp: if game.has_key('game_id'): self.games.append(game) def calendarMonth(self): self.calendar = [] # TODO: Parse game data # Step 1: step through all entries in self.cal and create searchable # indices tmp = dict() for game in self.games: # index based on gid in order to capture double-headers gid = game['game_id'] (year, month, day) = gid.split('/')[:3] key = "%s-%02d-%02d" % (year, int(month), int(day)) if not tmp.has_key(key): tmp[key] = [] tmp[key].append(game) # Step 2: fill in any off days with None so we have no gaps (firstday, daysinmonth) = calendar.monthrange(self.year, self.month) for d in range(daysinmonth): key = '%s-%02d-%02d' % (self.year, self.month, d + 1) if not tmp.has_key(key): tmp[key] = None # Step 3: front-fill any days before start of month if month starts # after Sunday # convert firstday from week begins with monday to week begins with # sunday firstDate = datetime.datetime(self.year, self.month, 1) days = (firstday + 1) % 7 while days > 0: dif = datetime.timedelta(days) thisDate = firstDate - dif # For simplicity, fill in days from prior month with None. # In reality, those days may have games/scores but assume user # will scroll back for prior month games. self.calendar.append((thisDate, None)) days -= 1 # Step 4: fill in the rest with the days of the month for d in range(daysinmonth): thisDate = datetime.datetime(self.year, self.month, d + 1) key = '%s-%02d-%02d' % (self.year, self.month, d + 1) self.calendar.append((thisDate, tmp[key])) return self.calendar
class MLBSchedule: def __init__(self, *args, **kwargs): # maybe the answer for nexdef for basic subscribers self.use_wired_web = kwargs.get('use_wired_web') ymd_tuple = kwargs.get('ymd_tuple') time_shift = kwargs.get('time_shift') self.international = kwargs.get('international') self.cfg = kwargs.get('cfg') # Default to today if not ymd_tuple: now = datetime.datetime.now() dif = datetime.timedelta(1) # Now, we want the day to go until, say, 9 am the next # morning. This needs to be worked out, still... if now.hour < 9: now = now - dif ymd_tuple = (now.year, now.month, now.day) self.year = ymd_tuple[0] self.month = ymd_tuple[1] self.day = ymd_tuple[2] self.shift = time_shift self.http = MLBHttp(accept_gzip=True) self.grid = "http://gdx.mlb.com/components/game/mlb/year_"\ + padstr(self.year)\ + "/month_" + padstr(self.month)\ + "/day_" + padstr(self.day) + "/grid.xml" self.multiangle = "http://gdx.mlb.com/components/game/mlb/year_"\ + padstr(self.year)\ + "/month_" + padstr(self.month)\ + "/day_" + padstr(self.day) + "/multi_angle_epg.xml" self.log = MLBLog(LOGFILE) self.data = [] self.error_str = "Something went wrong. A more descriptive error should be here." def __getSchedule(self): try: fp = self.http.getUrl(self.grid) return fp except urllib2.HTTPError: self.error_str = "UrlError: Could not retrieve listings." raise MLBUrlError,self.grid def getMultiAngleFromXml(self,event_id): out = [] camerainfo = dict() txheaders = {'User-agent' : USERAGENT} data = None self.multiangle = self.grid.replace('grid.xml','multi_angle_epg.xml') try: fp = self.http.getUrl(self.multiangle) except urllib2.HTTPError: raise MLBUrlError xp = parseString(fp) for node in xp.getElementsByTagName('game'): id = node.getAttribute('calendar_event_id') if id != event_id: continue home = node.getAttribute('home_file_code') away = node.getAttribute('away_file_code') title = ' '.join(TEAMCODES[away][1:]).strip() + ' at ' title += ' '.join(TEAMCODES[home][1:]).strip() camerainfo[id] = dict() camerainfo[id]['angles'] = [] for attr in node.attributes.keys(): camerainfo[id][attr] = node.getAttribute(attr) for angle in node.getElementsByTagName('angle'): cdict = dict() for attr in angle.attributes.keys(): cdict[attr] = angle.getAttribute(attr) media = angle.getElementsByTagName('media')[0] platform = media.getAttribute('platform') if platform != 'WEB_MEDIAPLAYER': continue cdict['content_id'] = media.getAttribute('content_id') if cdict['name'] == '': cdict['name'] = 'Unknown Camera Angle' camerainfo[id]['angles'].append(cdict) out.append(camerainfo[id]) #raise Exception,repr((out,event_id,self.multiangle)) return out def getMultiAngleListing(self,event_id): out = [] teams = dict() angles = [] null = [] raw = self.getMultiAngleFromXml(event_id)[0] id = raw['id'] desc = raw['description'] teams['home'] = raw['home_file_code'] teams['away'] = raw['away_file_code'] for angle in raw['angles']: out.append((teams, 0, (angle['name'], 0, angle['content_id'], event_id), null, null, 'NB', event_id, 0)) #raise Exception,repr(out) return out def __scheduleFromXml(self): out = [] gameinfo = dict() fp = parseString(self.__getSchedule()) for node in fp.getElementsByTagName('game'): id = node.getAttribute('id') gameinfo[id] = dict() for attr in node.attributes.keys(): gameinfo[id][attr] = node.getAttribute(attr) media = node.getElementsByTagName('game_media')[0] try: media_detail = media.getElementsByTagName('media')[0] gameinfo[id]['state'] = media_detail.getAttribute('media_state') except: gameinfo[id]['media_state'] = 'media_dead' try: gameinfo[id]['time'] except: gameinfo[id]['time'] = gameinfo[id]['event_time'].split()[0] gameinfo[id]['ampm'] = gameinfo[id]['event_time'].split()[1] home = node.getAttribute('home_team_id') away = node.getAttribute('away_team_id') gameinfo[id]['content'] = self.parseMediaGrid(node,away,home) #raise Exception,repr(gameinfo[id]['content']) # time to add unknown teamcodes dynamically rather than maintaining # them in mlbConstants for team in ( 'home', 'away' ): teamcode = str(gameinfo[id]['%s_code'%team]) teamfilecode = str(gameinfo[id]['%s_file_code'%team]) if not TEAMCODES.has_key(teamfilecode): TEAMCODES[teamfilecode] = \ ( str(gameinfo[id]['%s_team_id'%team]), str(gameinfo[id]['%s_team_name'%team]) ) out.append(gameinfo[id]) #raise Exception,repr(out) return out def parseMediaGrid(self,xp,away,home): content = {} content['audio'] = [] content['alt_audio'] = [] content['video'] = {} for s in STREAM_SPEEDS: content['video'][s]= [] content['video']['swarm'] = [] content['condensed'] = [] event_id = str(xp.getAttribute('calendar_event_id')) content['free'] = False for media in xp.getElementsByTagName('media'): tmp = {} for attr in media.attributes.keys(): tmp[attr] = str(media.getAttribute(attr)) out = [] # skip TBS-NAT for international postseason if self.international: if tmp.get('tbs_auth_required') == "Y": continue if tmp.get('mlbn_auth_required') == "Y": continue try: tmp['playback_scenario'] = tmp['playback_scenario'].strip() except: continue raise Exception,repr(tmp) if tmp['type'] in ('home_audio','away_audio'): if tmp['playback_scenario'] == 'AUDIO_FMS_32K': if tmp['type'] == 'away_audio': coverage = away elif tmp['type'] == 'home_audio': coverage = home out = (tmp['display'], coverage, tmp['id'], event_id) content['audio'].append(out) elif tmp['type'] in ('alt_home_audio', 'alt_away_audio'): if tmp['playback_scenario'] == 'AUDIO_FMS_32K': if tmp['type'] == 'alt_away_audio': coverage = away elif tmp['type'] == 'alt_home_audio': coverage = home out = (tmp['display'], coverage, tmp['id'], event_id) content['alt_audio'].append(out) elif tmp['type'] in ('mlbtv_national', 'mlbtv_home', 'mlbtv_away','mlbtv_enhanced'): if tmp['playback_scenario'] in \ ( 'HTTP_CLOUD_WIRED', 'HTTP_CLOUD_WIRED_WEB', 'FMS_CLOUD'): # candidate for new procedure: determine whether game is # national blackout try: tmp['blackout'] except: tmp['blackout'] = "" nb_pat = re.compile(r'MLB_NATIONAL_BLACKOUT') if re.search(nb_pat,tmp['blackout']) is not None: content['blackout'] = 'MLB_NATIONAL_BLACKOUT' else: content['blackout'] = None # candidate for new procedure: determine the coverage if tmp['type'] == 'mlbtv_national': coverage = '0' elif tmp['type'] == 'mlbtv_enhanced': coverage = '+' elif tmp['type'] == 'mlbtv_away': coverage = away else: coverage = home # free game of the day try: if tmp['free'] == 'ALL': content['free'] = True except: pass # each listing is a tuple of display, coverage, content id # and event-id out = (tmp['display'], coverage, tmp['id'], event_id) # determine where to store this tuple - trimList will # return only the listings for a given speed/stream type if tmp['playback_scenario'] == 'HTTP_CLOUD_WIRED': if not self.use_wired_web: content['video']['swarm'].append(out) elif tmp['playback_scenario'] == 'HTTP_CLOUD_WIRED_WEB': if self.use_wired_web: content['video']['swarm'].append(out) elif tmp['playback_scenario'] == 'FMS_CLOUD': for s in STREAM_SPEEDS: content['video'][s].append(out) else: continue elif tmp['type'] == 'condensed_game': out = ('CG',0,tmp['id'], event_id) content['condensed'].append(out) return content def __xmlToPython(self): return self.__scheduleFromXml() def getData(self): # This is the public method that puts together the private # steps above. Fills it up with data. try: self.data = self.__xmlToPython() except ValueError,detail: raise MLBXmlError,detail
class MLBMasterScoreboard: def __init__(self,gameid): self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.error_str = "Could not retrieve master_scoreboard.xml file" self.http = MLBHttp(accept_gzip=True) def getScoreboardData(self,gameid): self.scoreboard = [] self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.sbUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/master_scoreboard.xml' % ( league, year, month, day ) try: rsp = self.http.getUrl(self.sbUrl) except urllib2.URLError: self.error_str = "Could not retrieve master_scoreboard.xml file" raise MLBUrlError, self.error_str try: xp = parseString(rsp) except: self.error_str = "Could not parse master_scoreboard.xml file" raise MLBXmlError, self.error_str # if we got this far, initialize the data structure for game in xp.getElementsByTagName('game'): tmp = dict() gid = game.getAttribute('id') tmp[gid] = dict() tmp[gid] = self.parseGameData(game) try: for media in game.getElementsByTagName('media'): type = media.getAttribute('type') if type == "game": free = media.getAttribute('free') tmp[gid]['free'] = (False,True)[free=="ALL"] if not tmp[gid].has_key('free'): tmp[gid]['free'] = False except: tmp[gid]['free'] = False try: tmp[gid]['totals'] = self.parseLineScore(game) except: tmp['totals'] = None status = tmp[gid]['status'] if status in ('Final', 'Game Over', 'Completed Early'): tmp[gid]['pitchers'] = self.parseWinLossPitchers(game) elif status in ( 'In Progress', 'Delayed', 'Suspended', 'Manager Challenge', 'Replay' ): tmp[gid]['pitchers'] = self.parseCurrentPitchers(game) else: tmp[gid]['pitchers'] = self.parseProbablePitchers(game) if tmp[gid]['status'] in ( 'In Progress', 'Delayed', 'Suspended', 'Replay', 'Manager Challenge', 'Completed Early', 'Game Over', 'Final' ): tmp[gid]['hr'] = dict() tmp[gid]['hr'] = self.parseHrData(game) if tmp[gid]['status'] in ( 'In Progress', 'Delayed', 'Replay', 'Manager Challenge', 'Suspended' ): tmp[gid]['in_game'] = dict() tmp[gid]['in_game'] = self.parseInGameData(game) self.scoreboard.append(tmp) return self.scoreboard def parseInGameData(self,game): out = dict() for tag in ( 'pbp', 'batter', 'pitcher', 'opposing_pitcher', 'ondeck', 'inhole', 'runners_on_base' ): out[tag] = dict() for node in game.getElementsByTagName(tag): for attr in node.attributes.keys(): out[tag][attr] = node.getAttribute(attr) return out def parseHrData(self,game): out = dict() # codes are not the same in this file so translate teamcodes = dict() ( home_code , away_code ) = ( game.getAttribute('home_code'), game.getAttribute('away_code') ) ( home_fcode , away_fcode ) = ( game.getAttribute('home_file_code'), game.getAttribute('away_file_code')) teamcodes[home_code] = home_fcode teamcodes[away_code] = away_fcode for node in game.getElementsByTagName('home_runs'): for player in node.getElementsByTagName('player'): # mlb.com lists each homerun separately so track game and # season totals tmp = dict() for attr in player.attributes.keys(): tmp[attr] = player.getAttribute(attr) # if we already have the player, this is more than one hr # this game team = teamcodes[tmp['team_code']].upper() if not out.has_key(team): out[team] = dict() if out[team].has_key(tmp['id']): game_hr += 1 else: game_hr = 1 out[team][tmp['id']] = dict() out[team][tmp['id']][game_hr] = ( tmp['id'], tmp['name_display_roster'], teamcodes[tmp['team_code']], game_hr, tmp['std_hr'], tmp['inning'], tmp['runners'] ) return out def parseGameData(self,node): out = dict() for attr in node.attributes.keys(): out[attr] = node.getAttribute(attr) for sptr in node.getElementsByTagName('status'): for attr in sptr.attributes.keys(): out[attr] = sptr.getAttribute(attr) return out def parseLineScore(self,xp): out = dict() for tag in ('r', 'h', 'e'): out[tag] = dict() for team in ( 'away', 'home' ): out[tag][team] = dict() for tptr in xp.getElementsByTagName(tag): out[tag][team] = tptr.getAttribute(team) return out def parseWinLossPitchers(self,xp): out = dict() for pitcher in ( 'winning_pitcher' , 'losing_pitcher' , 'save_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) if pitcher == 'save_pitcher': out[pitcher] = ( tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era'], tmp['saves'] ) else: out[pitcher] = ( tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era'] ) return out def parseProbablePitchers(self,xp): out = dict() for pitcher in ( 'home_probable_pitcher', 'away_probable_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) out[pitcher] = ( tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era'] ) return out def parseCurrentPitchers(self,xp): out = dict() for pitcher in ( 'pitcher', 'opposing_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) out[pitcher] = ( tmp['id'], tmp['name_display_roster'], tmp['wins'], tmp['losses'], tmp['era'] ) for b in xp.getElementsByTagName('batter'): tmp = dict() for attr in b.attributes.keys(): tmp[attr] = b.getAttribute(attr) out['batter'] = ( tmp['id'], tmp['name_display_roster'], tmp['avg'] ) return out
class MLBLineScore: def __init__(self,gameid): self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] self.league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/linescore.xml' % ( self.league, year, month, day, self.gameid ) self.hrUrl = self.boxUrl.replace('linescore.xml','miniscoreboard.xml') self.linescore = None self.http = MLBHttp(accept_gzip=True) def getLineData(self,gameid): self.gameid = gameid self.gameid = self.gameid.replace('/','_') self.gameid = self.gameid.replace('-','_') ( year, month, day ) = self.gameid.split('_')[:3] self.league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/linescore.xml' % ( self.league, year, month, day, self.gameid ) self.hrUrl = self.boxUrl.replace('linescore.xml','miniscoreboard.xml') self.linescore = None try: rsp = self.http.getUrl(self.boxUrl) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve linescore." raise MLBUrlError try: xp = parseString(rsp) except: self.error_str = "XmlError: Could not parse linescore." raise MLBXmlError # if we got this far, initialize the data structure self.linescore = dict() self.linescore['game'] = dict() self.linescore['innings'] = dict() self.linescore['pitchers'] = dict() self.linescore['game'] = self.parseGameData(xp) try: self.linescore['innings'] = self.parseLineScore(xp) except: self.linescore['innings'] = None status = self.linescore['game']['status'] if status in ('Final', 'Game Over', 'Completed Early'): self.linescore['pitchers'] = self.parseWinLossPitchers(xp) elif status in ( 'In Progress', 'Delayed' ): self.linescore['pitchers'] = self.parseCurrentPitchers(xp) else: self.linescore['pitchers'] = self.parseProbablePitchers(xp) if self.linescore['game']['status'] in ( 'In Progress', 'Delayed', 'Suspended', 'Completed Early', 'Game Over', 'Final' ): hrptr = self.getHrData() self.linescore['hr'] = dict() self.linescore['hr'] = self.parseHrData(hrptr) if self.linescore['game']['status'] in ( 'In Progress', 'Delayed', 'Suspended' ): self.linescore['in_game'] = dict() self.linescore['in_game'] = self.parseInGameData(hrptr) return self.linescore def getHrData(self): try: rsp = self.http.getUrl(self.hrUrl) except: self.error_str = "UrlError: Could not retrieve home run data." raise MLBUrlError try: xp = parseString(rsp) except: self.error_str = "XmlError: Could not parse home run data." raise MLBXmlError # initialize the structure return xp def parseInGameData(self,xp): out = dict() for ingame in xp.getElementsByTagName('in_game'): out['last_pbp'] = ingame.getAttribute('last_pbp') for tag in ( 'batter', 'pitcher', 'opposing_pitcher', 'ondeck', 'inhole', 'runner_on_1b', 'runner_on_2b', 'runner_on_3b' ): out[tag] = dict() for node in ingame.getElementsByTagName(tag): for attr in node.attributes.keys(): out[tag][attr] = node.getAttribute(attr) return out def parseHrData(self,xp): out = dict() # codes are not the same in this file so translate for game in xp.getElementsByTagName('game'): teamcodes = dict() ( home_code , away_code ) = ( game.getAttribute('home_code'), game.getAttribute('away_code') ) ( home_fcode , away_fcode ) = ( game.getAttribute('home_file_code'), game.getAttribute('away_file_code')) teamcodes[home_code] = home_fcode teamcodes[away_code] = away_fcode for node in xp.getElementsByTagName('home_runs'): for player in node.getElementsByTagName('player'): # mlb.com lists each homerun separately so track game and # season totals tmp = dict() for attr in player.attributes.keys(): tmp[attr] = player.getAttribute(attr) # if we already have the player, this is more than one hr # this game if self.league != 'mlb': team = tmp['team_code'].upper() else: team = teamcodes[tmp['team_code']].upper() if not out.has_key(team): out[team] = dict() if out[team].has_key(tmp['id']): # game_hr is local to this loop so look it up each time game_hr = out[team][tmp['id']].keys()[-1] game_hr += 1 else: game_hr = 1 out[team][tmp['id']] = dict() out[team][tmp['id']][game_hr] = ( tmp['id'], tmp['name_display_roster'], teamcodes[tmp['team_code']], game_hr, tmp['std_hr'], tmp['inning'], tmp['runners'] ) return out def parseGameData(self,xp): out = dict() for node in xp.getElementsByTagName('game'): for attr in node.attributes.keys(): out[attr] = node.getAttribute(attr) return out def parseLineScore(self,xp): out = dict() for iptr in xp.getElementsByTagName('linescore'): inning = iptr.getAttribute('inning') out[inning] = dict() for team in ( 'home', 'away' ): out[inning][team] = iptr.getAttribute("%s_inning_runs"%team) return out def parseWinLossPitchers(self,xp): out = dict() for pitcher in ( 'winning_pitcher' , 'losing_pitcher' , 'save_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) if pitcher == 'save_pitcher': out[pitcher] = ( tmp['id'], tmp['last_name'], tmp['wins'], tmp['losses'], tmp['era'], tmp['saves'] ) else: out[pitcher] = ( tmp['id'], tmp['last_name'], tmp['wins'], tmp['losses'], tmp['era'] ) return out def parseProbablePitchers(self,xp): out = dict() for pitcher in ( 'home_probable_pitcher', 'away_probable_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) out[pitcher] = ( tmp['id'], tmp['last_name'], tmp['wins'], tmp['losses'], tmp['era'] ) return out def parseCurrentPitchers(self,xp): out = dict() for pitcher in ( 'current_pitcher', 'opposing_pitcher'): for p in xp.getElementsByTagName(pitcher): tmp = dict() for attr in p.attributes.keys(): tmp[attr] = p.getAttribute(attr) out[pitcher] = ( tmp['id'], tmp['last_name'], tmp['wins'], tmp['losses'], tmp['era'] ) for b in xp.getElementsByTagName('current_batter'): tmp = dict() for attr in b.attributes.keys(): tmp[attr] = b.getAttribute(attr) out['current_batter'] = ( tmp['id'], tmp['last_name'], tmp['avg'] ) return out
class MLBCalendar: def __init__(self): self.games = [] self.calendar = [] self.http = MLBHttp(accept_gzip=True) def getData(self,teamid,year=None,month=None): self.teamid = teamid self.url = 'http://mlb.com/gen/schedule/' self.url += STATS_TEAMS[self.teamid] + '/' if year is not None and month is not None: self.year = year self.month = month else: self.now = datetime.datetime.now() self.year = self.now.year self.month = self.now.month self.url += "%s_%s.json" % ( self.year, self.month ) try: rsp = self.http.getUrl(self.url) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve calendar." raise MLBUrlError,self.url try: jp = json.loads(rsp) except: self.error_str = "JsonError: Could not parse calendar." raise MLBJsonError # if we got this far, initialize the data structure self.collectCalendar(jp) return self.games def collectCalendar(self,jp): self.games = [] for game in jp: if game.has_key('game_id'): self.games.append(game) def calendarMonth(self): self.calendar = [] # TODO: Parse game data # Step 1: step through all entries in self.cal and create searchable # indices tmp = dict() for game in self.games: # index based on gid in order to capture double-headers gid=game['game_id'] ( year, month, day ) = gid.split('/')[:3] key="%s-%02d-%02d" % ( year, int(month), int(day) ) if not tmp.has_key(key): tmp[key] = [] tmp[key].append(game) # Step 2: fill in any off days with None so we have no gaps ( firstday, daysinmonth ) = calendar.monthrange(self.year, self.month) for d in range(daysinmonth): key='%s-%02d-%02d' % ( self.year, self.month, d+1 ) if not tmp.has_key(key): tmp[key] = None # Step 3: front-fill any days before start of month if month starts # after Sunday # convert firstday from week begins with monday to week begins with # sunday firstDate = datetime.datetime(self.year, self.month, 1) days = (firstday + 1) % 7 while days > 0: dif=datetime.timedelta(days) thisDate = firstDate - dif # For simplicity, fill in days from prior month with None. # In reality, those days may have games/scores but assume user # will scroll back for prior month games. self.calendar.append((thisDate, None)) days-=1 # Step 4: fill in the rest with the days of the month for d in range(daysinmonth): thisDate = datetime.datetime(self.year, self.month, d+1) key='%s-%02d-%02d' % ( self.year, self.month, d+1 ) self.calendar.append((thisDate, tmp[key])) return self.calendar
class MLBBoxScore: def __init__(self, gameid): self.gameid = gameid self.gameid = self.gameid.replace('/', '_') self.gameid = self.gameid.replace('-', '_') (year, month, day) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/boxscore.xml' % ( league, year, month, day, self.gameid) self.boxscore = None self.http = MLBHttp(accept_gzip=True) def getBoxData(self, gameid): self.gameid = gameid self.gameid = self.gameid.replace('/', '_') self.gameid = self.gameid.replace('-', '_') (year, month, day) = self.gameid.split('_')[:3] league = self.gameid.split('_')[4][-3:] self.boxUrl = 'http://gdx.mlb.com/components/game/%s/year_%s/month_%s/day_%s/gid_%s/boxscore.xml' % ( league, year, month, day, self.gameid) self.boxscore = None try: rsp = self.http.getUrl(self.boxUrl) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve box score." raise MLBUrlError try: xp = parseString(rsp) except: raise # if we got this far, initialize the data structure self.boxscore = dict() self.boxscore['game'] = self.parseGameData(xp) self.boxscore['batting'] = self.parseBattingData(xp) self.boxscore['pitching'] = self.parsePitchingData(xp) self.boxscore['game_info'] = self.parseGameInfo(xp) return self.boxscore def parseGameData(self, xp): out = dict() for node in xp.getElementsByTagName('boxscore'): for attr in node.attributes.keys(): out[attr] = node.getAttribute(attr) return out def parseBattingData(self, xp): out = dict() for node in xp.getElementsByTagName('batting'): team = node.getAttribute('team_flag') out[team] = dict() for attr in node.attributes.keys(): out[team][attr] = node.getAttribute(attr) out[team]['batters'] = dict() for b in node.getElementsByTagName('batter'): b_id = b.getAttribute('id') out[team]['batters'][b_id] = dict() for a in b.attributes.keys(): out[team]['batters'][b_id][a] = b.getAttribute(a) # <note> tag contains substitution notes out[team]['batting-note'] = [] for span in node.getElementsByTagName('note'): # encapsulate span data in foo tag and then parse it as # well-behaved XML new = '<foo>' + span.childNodes[0].data + '</foo>' tmp = parseString(new) for text in tmp.getElementsByTagName('span'): # wait! really? span inside span??? out[team]['batting-note'].append(text.childNodes[0].data) # text_data is used for BATTING / FIELDING notes out[team]['batting-data'] = [] # deal with culturing the messy blob later for blob in node.getElementsByTagName('text_data'): out[team]['batting-data'].append(blob) # good enough for here - do more parsing elsewhere return out def parsePitchingData(self, xp): out = dict() for node in xp.getElementsByTagName('pitching'): team = node.getAttribute('team_flag') out[team] = dict() for attr in node.attributes.keys(): out[team][attr] = node.getAttribute(attr) out[team]['pitchers'] = dict() out[team]['pitchers']['pitching-order'] = list() for p in node.getElementsByTagName('pitcher'): p_id = p.getAttribute('id') out[team]['pitchers']['pitching-order'].append(p_id) out[team]['pitchers'][p_id] = dict() for a in p.attributes.keys(): out[team]['pitchers'][p_id][a] = p.getAttribute(a) # <note> tag contains substitution notes out[team]['pitching-note'] = [] for span in node.getElementsByTagName('note'): tmp = parseString(span.childNodes[0].data) for text in tmp.getElementsByTagName('span'): out[team]['pitching-note'].append(text.childNodes[0].data) # text_data is used for additional notes out[team]['pitching-data'] = [] for blob in node.getElementsByTagName('text_data'): out[team]['pitching-data'].append(blob) # good enough for here - do more parsing elsewhere return out # probably don't need this anymore since line score is another class def parseLineScore(self, xp): out = dict() for node in xp.getElementsByTagName('linescore'): out['totals'] = dict() for attr in node.attributes.keys(): out['totals'][attr] = node.getAttribute(attr) out['innings'] = dict() for iptr in node.getElementsByTagName('inning_line_score'): inning = iptr.getAttribute('inning') out['innings'][inning] = dict() for team in ('home', 'away'): out['innings'][inning][team] = iptr.getAttribute(team) return out def parseGameInfo(self, xp): for node in xp.getElementsByTagName('game_info'): # there should only be one return node def parseDataBlob(self, blob): data = '<data>' + blob.childNodes[0].nodeValue + '</data>' dptr = parseString(data) out = [] tmp_str = '' #print "dptr.childNodes[0].childNodes:" #print dptr.childNodes[0].childNodes for elem in dptr.childNodes[0].childNodes: self.blobNode(elem) def blobNode(self, node): if node.nodeName == 'b': print node.childNodes[0].nodeValue elif node.nodeName == 'span': for child in node.childNodes: self.blobNode(child) elif node.nodeType == node.TEXT_NODE: self.blobTextNode(node) elif node.nodeName == 'br': pass def blobTextNode(self, node): if not node.nodeValue.isspace(): print node.nodeValue
class MLBRssWin(MLBListWin): def __init__(self, myscr, mycfg): self.myscr = myscr self.mycfg = mycfg self.statuswin = curses.newwin(1, curses.COLS - 1, curses.LINES - 1, 0) self.titlewin = curses.newwin(2, curses.COLS - 1, 0, 0) self.rssUrl = 'http://mlb.mlb.com/partnerxml/gen/news/rss/mlb.xml' self.milbRssUrl = 'http://www.milb.com/partnerxml/gen/news/rss/milb.xml' self.data = [] self.records = [] self.current_cursor = 0 self.record_cursor = 0 self.game_cursor = 0 self.htmlParser = HTMLParser() self.http = MLBHttp(accept_gzip=True) def getFeedFromUser(self): feed = self.prompter(self.statuswin, 'Enter teamcode of feed:') feed = feed.strip() if self.mycfg.get('milbtv') and feed == "" or feed == "milb": feed = "milb" elif feed == "" or feed == "mlb": feed = 'mlb' elif feed not in TEAMCODES.keys(): self.statusWrite('Invalid teamcode: ' + feed, wait=2) return self.statusWrite('Retrieving feed for %s...' % feed, wait=1) # in this case, overwrite rather than aggregate self.data = [] self.getRssData(team=feed) def getRssData(self, team='mlb'): if self.mycfg.get('milbtv'): try: team = TEAMCODES[team][2] except: pass rssUrl = self.milbRssUrl.replace('milb.xml', '%s.xml' % team) else: rssUrl = self.rssUrl.replace('mlb.xml', '%s.xml' % team) try: rsp = self.http.getUrl(rssUrl) except: self.error_str = "UrlError: Could not retrieve RSS." self.statusWrite(self.error_str, wait=2) return #raise MLBUrlError try: xp = parseString(rsp) except: self.error_str = "XmlError: Could not parse RSS." raise MLBXmlError # append rather than overwrite to allow multiple feeds to be aggregated #self.data = [] self.parseRssData(xp) # this is all just initialization ; setCursors should be called to # align with listings position self.game_cursor = 0 self.current_cursor = 0 self.record_cursor = 0 viewable = curses.LINES - 4 if viewable % 2 > 0: viewable -= 1 self.records = self.data[:viewable] def setCursors(self, current_cursor, record_cursor): self.game_cursor = current_cursor + record_cursor # scoreboard scrolls two lines at a time absolute_cursor = self.game_cursor * 2 viewable = curses.LINES - 4 if viewable % 2 > 0: viewable -= 1 # integer division will give us the correct top record position try: self.record_cursor = (absolute_cursor / viewable) * viewable except: raise MLBCursesError, "Screen too small." # and find the current position in the viewable screen self.current_cursor = absolute_cursor - self.record_cursor # and finally collect the viewable records self.records = self.data[self.record_cursor:self.record_cursor + viewable] def parseRssData(self, xptr): for item in xptr.getElementsByTagName('item'): title = item.getElementsByTagName('title')[0].childNodes[0].data link = item.getElementsByTagName('link')[0].childNodes[0].data try: link = self.htmlParser.unescape(link) except: raise Exception, repr(link) try: desc = item.getElementsByTagName( 'description')[0].childNodes[0].data except IndexError: desc = "" self.data.append((title, link, desc)) def Up(self): if self.current_cursor - 2 < 0 and self.record_cursor - 2 >= 0: viewable = curses.LINES - 4 if viewable % 2 > 0: viewable -= 1 self.current_cursor = viewable - 2 #if self.current_cursor % 2 > 0: # self.current_cursor -= 1 if self.record_cursor - viewable < 0: self.record_cursor = 0 else: self.record_cursor -= viewable #if self.record_cursor % 2 > 0: # self.record_cursor -= 1 self.records = self.data[self.record_cursor:self.record_cursor + viewable] elif self.current_cursor > 0: self.current_cursor -= 2 def Down(self): viewable = curses.LINES - 4 if self.current_cursor + 2 >= len(self.records) and\ ( self.record_cursor + self.current_cursor + 2 ) < len(self.data): self.record_cursor += self.current_cursor + 2 self.current_cursor = 0 if (self.record_cursor + viewable) % 2 > 0: self.records = self.data[self. record_cursor:self.record_cursor + curses.LINES - 5] else: self.records = self.data[self. record_cursor:self.record_cursor + curses.LINES - 4] # Elif not at bottom of window elif self.current_cursor + 2 < self.records and\ self.current_cursor + 2 < curses.LINES-4: if (self.current_cursor + 2 + self.record_cursor) < len(self.data): self.current_cursor += 2 # Silent else do nothing at bottom of window and bottom of records def Refresh(self): self.myscr.clear() # display even number of lines since games will be two lines wlen = curses.LINES - 4 if wlen % 2 > 0: wlen -= 1 for n in range(wlen): if n < len(self.records): cursesflags = 0 game_cursor = (n + self.record_cursor) / 2 (title, link, desc) = self.data[game_cursor] if n % 2 > 0: # second line of the feed item, underline it for division # between items if len(desc) > curses.COLS - 2: s = desc[:curses.COLS - 5] s += '...' else: s = desc pad = curses.COLS - 2 - len(s) s += ' ' * pad if n - 1 == self.current_cursor: cursesflags |= curses.A_UNDERLINE | curses.A_REVERSE else: cursesflags = curses.A_UNDERLINE self.myscr.addnstr(n + 2, 0, s, curses.COLS - 2, cursesflags) else: s = title pad = curses.COLS - 2 - len(s) if n == self.current_cursor: cursesflags |= curses.A_REVERSE | curses.A_BOLD else: cursesflags = curses.A_BOLD self.myscr.addstr(n + 2, 0, s, cursesflags) # don't bold the pad or it results in an uneven looking # highlight cursesflags ^= curses.A_BOLD self.myscr.addstr(n + 2, len(s), ' ' * pad, cursesflags) else: s = ' ' * (curses.COLS - 1) self.myscr.addnstr(n + 2, 0, s, curses.COLS - 2) self.myscr.refresh() def titleRefresh(self, mysched): self.titlewin.clear() # RSS is always today - there are no archives now = datetime.datetime.now() titlestr = "RSS FEED FOR " +\ str(now.month) + '/' +\ str(now.day) + '/' +\ str(now.year) # TODO: '(Use arrow keys to change days)' padding = curses.COLS - (len(titlestr) + 6) titlestr += ' ' * padding pos = curses.COLS - 6 self.titlewin.addstr(0, 0, titlestr) self.titlewin.addstr(0, pos, 'H', curses.A_BOLD) self.titlewin.addstr(0, pos + 1, 'elp') self.titlewin.hline(1, 0, curses.ACS_HLINE, curses.COLS - 1) self.titlewin.refresh() def statusRefresh(self): game_cursor = (self.current_cursor + self.record_cursor) / 2 # BEGIN curses debug code if self.mycfg.get('curses_debug'): wlen = curses.LINES - 4 if wlen % 2 > 0: wlen -= 1 status_str = "game_cursor=%s, wlen=%s, current_cursor=%s, record_cursor=%s, len(records)=%s" %\ ( game_cursor, wlen, self.current_cursor, self.record_cursor, len(self.records) ) self.statuswin.clear() self.statuswin.addnstr(0, 0, status_str, curses.COLS - 2, curses.A_BOLD) self.statuswin.refresh() return # END curses debug code # use the url for status now status_str = self.data[game_cursor][1][:curses.COLS - 2] padding = curses.COLS - len(status_str) # shrink the status string to fit if it is too many chars wide for # screen if padding < 0: status_str = status_str[:padding] status_str += ' ' * padding self.statuswin.addnstr(0, 0, status_str, curses.COLS - 2, curses.A_BOLD) self.statuswin.refresh()
class MLBStandings: def __init__(self): self.data = [] self.last_update = "" self.xml = "" self.date = datetime.datetime.now() self.url = 'https://erikberg.com/mlb/standings.xml' self.jUrl = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?&sit_code=%27h0%27&league_id=103&league_id=104&all_star_sw=%27N%27&version=2' self.http = MLBHttp(accept_gzip=True) #def getStandingsData(self,offline=False,datetime=None,format='json'): # if format == 'xml': # self.getStandingsXmlData(offline) # else: # self.getStandingsJsonData(offline) def getStandingsData(self,ymd_tuple=None,offline=False): # this part needs to be added dynamically #schedule_game_date.game_date=%272013/06/12%27&season=2013 # if not given a datetime, calculate it self.data = [] if ymd_tuple is not None: now = datetime.datetime(ymd_tuple[0],ymd_tuple[1],ymd_tuple[2]) else: now=datetime.datetime.now() self.jUrl = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?&sit_code=%27h0%27&league_id=103&league_id=104&all_star_sw=%27N%27&version=2' self.jUrl += '&season=%s&schedule_game_date.game_date=%%27%s%%27' % \ ( now.year, now.strftime('%Y/%m/%d') ) try: rsp = self.http.getUrl(self.jUrl) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve standings." raise MLBUrlError try: self.json = json.loads(rsp) except ValueError: if re.search(r'Check back soon',rsp) is not None: #raise Exception,MLBJsonError return raise Exception,rsp raise Exception,self.jUrl raise Exception,MLBJsonError self.parseStandingsJson() def getDataFromFile(self): # For development purposes, let's parse from a file (activate web # code later) f = open('standings.xml') self.xml = f.read() f.close() def getStandingsXmlData(self,offline=False): # To limit test requests until permission has been obtained # from data provider if offline: try: self.getDataFromFile() self.parseStandingsXml() except: pass return request = urllib2.Request(self.url) request.add_header('Accept-encoding', 'gzip') request.add_header('User-agent', 'mlbviewer/2013sf3 https://sourceforge.net/projects/mlbviewer/ ([email protected])') opener = urllib2.build_opener() try: f = opener.open(request) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve standings." raise MLBUrlError compressedData = f.read() compressedStream = StringIO.StringIO(compressedData) gzipper = gzip.GzipFile(fileobj=compressedStream) self.xml = gzipper.read() self.parseStandingsXml() def parseStandingsJson(self): tmp = dict() self.last_update = self.json['standings_schedule_date']['standings_all_date_rptr']['standings_all_date'][0]['queryResults']['created'] + '-04:00' for league in self.json['standings_schedule_date']['standings_all_date_rptr']['standings_all_date']: if int(league['queryResults']['totalSize']) == 0: #raise Exception,self.jUrl return for div in STANDINGS_JSON_DIVISIONS.keys(): if not tmp.has_key(div): tmp[div] = [] for team in league['queryResults']['row']: if team['division_id'] == div: tmp[div].append(team) for div in ( '201', '202', '200', '204', '205', '203' ): if len(tmp[div]) > 0: self.data.append( (STANDINGS_JSON_DIVISIONS[div], self.parseDivisionJsonData(tmp[div])) ) def parseStandingsXml(self): xp = parseString(self.xml) for metadata in xp.getElementsByTagName('sports-metadata'): self.last_update = metadata.getAttribute('date-time') for standing in xp.getElementsByTagName('standing'): for div in standing.getElementsByTagName('sports-content-code'): type=div.getAttribute('code-type') if type == "division": key = div.getAttribute('code-key') division = STANDINGS_DIVISIONS[key] self.data.append((division,self.parseDivisionData(standing))) def parseDivisionData(self,xp): out = [] for tptr in xp.getElementsByTagName('team'): out.append(self.parseTeamData(tptr)) return out def parseDivisionJsonData(self,division): out = [] for team in division: out.append(self.parseTeamJsonData(team)) return out def parseTeamJsonData(self,team): tmp = dict() tmp['first'] = team['team_short'] tmp['file_code'] = team['file_code'] tmp['G'] = int(team['w']) + int(team['l']) tmp['W'] = team['w'] tmp['L'] = team['l'] tmp['GB'] = team['gb'] tmp['E'] = team['elim'] tmp['WCGB'] = team['gb_wildcard'] if tmp['WCGB'] == '': tmp['WCGB'] = '-' tmp['WP'] = team['pct'] tmp['STRK'] = team['streak'] tmp['RS'] = team['runs'] tmp['RA'] = team['opp_runs'] ( tmp['HW'], tmp['HL'] ) = team['home'].split('-') ( tmp['AW'], tmp['AL'] ) = team['away'].split('-') ( tmp['L10_W'], tmp['L10_L'] ) = team['last_ten'].split('-') return tmp def parseTeamData(self,tptr): tmp = dict() for name in tptr.getElementsByTagName('name'): tmp['first'] = name.getAttribute('first') tmp['last'] = name.getAttribute('last') for teamStats in tptr.getElementsByTagName('team-stats'): tmp['G'] = teamStats.getAttribute('events-played') tmp['GB'] = teamStats.getAttribute('games-back') for totals in teamStats.getElementsByTagName('outcome-totals'): scope = totals.getAttribute('alignment-scope') if scope == "events-all": tmp['W'] = totals.getAttribute('wins') tmp['L'] = totals.getAttribute('losses') tmp['WP'] = totals.getAttribute('winning-percentage') streak = totals.getAttribute('streak-type') if streak == 'win': tmp['STRK'] = 'W' else: tmp['STRK'] = 'L' tmp['STRK'] += str(totals.getAttribute('streak-total')) tmp['RS'] = totals.getAttribute('points-scored-for') tmp['RA'] = totals.getAttribute('points-scored-against') elif scope == "events-home": tmp['HW'] = totals.getAttribute('wins') tmp['HL'] = totals.getAttribute('losses') elif scope == "events-away": tmp['AW'] = totals.getAttribute('wins') tmp['AL'] = totals.getAttribute('losses') elif scope == "": scope = totals.getAttribute('duration-scope') if scope == 'events-most-recent-5': tmp['L5_W'] = totals.getAttribute('wins') tmp['L5_L'] = totals.getAttribute('losses') elif scope == 'events-most-recent-10': tmp['L10_W'] = totals.getAttribute('wins') tmp['L10_L'] = totals.getAttribute('losses') return tmp
class MLBStandings: def __init__(self): self.data = [] self.last_update = "" self.xml = "" self.date = datetime.datetime.now() self.url = 'https://erikberg.com/mlb/standings.xml' self.jUrl = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?&sit_code=%27h0%27&league_id=103&league_id=104&all_star_sw=%27N%27&version=2' self.http = MLBHttp(accept_gzip=True) #def getStandingsData(self,offline=False,datetime=None,format='json'): # if format == 'xml': # self.getStandingsXmlData(offline) # else: # self.getStandingsJsonData(offline) def getStandingsData(self, ymd_tuple=None, offline=False): # this part needs to be added dynamically #schedule_game_date.game_date=%272013/06/12%27&season=2013 # if not given a datetime, calculate it self.data = [] if ymd_tuple is not None: now = datetime.datetime(ymd_tuple[0], ymd_tuple[1], ymd_tuple[2]) else: now = datetime.datetime.now() self.jUrl = 'http://mlb.mlb.com/lookup/json/named.standings_schedule_date.bam?&sit_code=%27h0%27&league_id=103&league_id=104&all_star_sw=%27N%27&version=2' self.jUrl += '&season=%s&schedule_game_date.game_date=%%27%s%%27' % \ ( now.year, now.strftime('%Y/%m/%d') ) try: rsp = self.http.getUrl(self.jUrl) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve standings." raise MLBUrlError try: self.json = json.loads(rsp) except ValueError: if re.search(r'Check back soon', rsp) is not None: #raise Exception,MLBJsonError return raise Exception, rsp raise Exception, self.jUrl raise Exception, MLBJsonError self.parseStandingsJson() def getDataFromFile(self): # For development purposes, let's parse from a file (activate web # code later) f = open('standings.xml') self.xml = f.read() f.close() def getStandingsXmlData(self, offline=False): # To limit test requests until permission has been obtained # from data provider if offline: try: self.getDataFromFile() self.parseStandingsXml() except: pass return request = urllib2.Request(self.url) request.add_header('Accept-encoding', 'gzip') request.add_header( 'User-agent', 'mlbviewer/2013sf3 https://sourceforge.net/projects/mlbviewer/ ([email protected])' ) opener = urllib2.build_opener() try: f = opener.open(request) except urllib2.URLError: self.error_str = "UrlError: Could not retrieve standings." raise MLBUrlError compressedData = f.read() compressedStream = StringIO.StringIO(compressedData) gzipper = gzip.GzipFile(fileobj=compressedStream) self.xml = gzipper.read() self.parseStandingsXml() def parseStandingsJson(self): tmp = dict() self.last_update = self.json['standings_schedule_date'][ 'standings_all_date_rptr']['standings_all_date'][0][ 'queryResults']['created'] + '-04:00' for league in self.json['standings_schedule_date'][ 'standings_all_date_rptr']['standings_all_date']: if int(league['queryResults']['totalSize']) == 0: #raise Exception,self.jUrl return for div in STANDINGS_JSON_DIVISIONS.keys(): if not tmp.has_key(div): tmp[div] = [] for team in league['queryResults']['row']: if team['division_id'] == div: tmp[div].append(team) for div in ('201', '202', '200', '204', '205', '203'): if len(tmp[div]) > 0: self.data.append((STANDINGS_JSON_DIVISIONS[div], self.parseDivisionJsonData(tmp[div]))) def parseStandingsXml(self): xp = parseString(self.xml) for metadata in xp.getElementsByTagName('sports-metadata'): self.last_update = metadata.getAttribute('date-time') for standing in xp.getElementsByTagName('standing'): for div in standing.getElementsByTagName('sports-content-code'): type = div.getAttribute('code-type') if type == "division": key = div.getAttribute('code-key') division = STANDINGS_DIVISIONS[key] self.data.append((division, self.parseDivisionData(standing))) def parseDivisionData(self, xp): out = [] for tptr in xp.getElementsByTagName('team'): out.append(self.parseTeamData(tptr)) return out def parseDivisionJsonData(self, division): out = [] for team in division: out.append(self.parseTeamJsonData(team)) return out def parseTeamJsonData(self, team): tmp = dict() tmp['first'] = team['team_short'] tmp['file_code'] = team['file_code'] tmp['G'] = int(team['w']) + int(team['l']) tmp['W'] = team['w'] tmp['L'] = team['l'] tmp['GB'] = team['gb'] tmp['E'] = team['elim'] tmp['WCGB'] = team['gb_wildcard'] if tmp['WCGB'] == '': tmp['WCGB'] = '-' tmp['WP'] = team['pct'] tmp['STRK'] = team['streak'] tmp['RS'] = team['runs'] tmp['RA'] = team['opp_runs'] (tmp['HW'], tmp['HL']) = team['home'].split('-') (tmp['AW'], tmp['AL']) = team['away'].split('-') (tmp['L10_W'], tmp['L10_L']) = team['last_ten'].split('-') return tmp def parseTeamData(self, tptr): tmp = dict() for name in tptr.getElementsByTagName('name'): tmp['first'] = name.getAttribute('first') tmp['last'] = name.getAttribute('last') for teamStats in tptr.getElementsByTagName('team-stats'): tmp['G'] = teamStats.getAttribute('events-played') tmp['GB'] = teamStats.getAttribute('games-back') for totals in teamStats.getElementsByTagName('outcome-totals'): scope = totals.getAttribute('alignment-scope') if scope == "events-all": tmp['W'] = totals.getAttribute('wins') tmp['L'] = totals.getAttribute('losses') tmp['WP'] = totals.getAttribute('winning-percentage') streak = totals.getAttribute('streak-type') if streak == 'win': tmp['STRK'] = 'W' else: tmp['STRK'] = 'L' tmp['STRK'] += str(totals.getAttribute('streak-total')) tmp['RS'] = totals.getAttribute('points-scored-for') tmp['RA'] = totals.getAttribute('points-scored-against') elif scope == "events-home": tmp['HW'] = totals.getAttribute('wins') tmp['HL'] = totals.getAttribute('losses') elif scope == "events-away": tmp['AW'] = totals.getAttribute('wins') tmp['AL'] = totals.getAttribute('losses') elif scope == "": scope = totals.getAttribute('duration-scope') if scope == 'events-most-recent-5': tmp['L5_W'] = totals.getAttribute('wins') tmp['L5_L'] = totals.getAttribute('losses') elif scope == 'events-most-recent-10': tmp['L10_W'] = totals.getAttribute('wins') tmp['L10_L'] = totals.getAttribute('losses') return tmp
class MLBRssWin(MLBListWin): def __init__(self,myscr,mycfg): self.myscr = myscr self.mycfg = mycfg self.statuswin = curses.newwin(1,curses.COLS-1,curses.LINES-1,0) self.titlewin = curses.newwin(2,curses.COLS-1,0,0) self.rssUrl = 'http://mlb.mlb.com/partnerxml/gen/news/rss/mlb.xml' self.milbRssUrl = 'http://www.milb.com/partnerxml/gen/news/rss/milb.xml' self.data = [] self.records = [] self.current_cursor = 0 self.record_cursor = 0 self.game_cursor = 0 self.htmlParser = HTMLParser() self.http = MLBHttp(accept_gzip=True) def getFeedFromUser(self): feed = self.prompter(self.statuswin,'Enter teamcode of feed:') feed = feed.strip() if self.mycfg.get('milbtv') and feed == "" or feed == "milb": feed = "milb" elif feed == "" or feed == "mlb": feed = 'mlb' elif feed not in TEAMCODES.keys(): self.statusWrite('Invalid teamcode: '+feed,wait=2) return self.statusWrite('Retrieving feed for %s...'%feed,wait=1) # in this case, overwrite rather than aggregate self.data = [] self.getRssData(team=feed) def getRssData(self,team='mlb'): if self.mycfg.get('milbtv'): try: team = TEAMCODES[team][2] except: pass rssUrl = self.milbRssUrl.replace('milb.xml','%s.xml'%team) else: rssUrl = self.rssUrl.replace('mlb.xml','%s.xml'%team) try: rsp = self.http.getUrl(rssUrl) except: self.error_str = "UrlError: Could not retrieve RSS." self.statusWrite(self.error_str,wait=2) return #raise MLBUrlError try: xp = parseString(rsp) except: self.error_str = "XmlError: Could not parse RSS." raise MLBXmlError # append rather than overwrite to allow multiple feeds to be aggregated #self.data = [] self.parseRssData(xp) # this is all just initialization ; setCursors should be called to # align with listings position self.game_cursor = 0 self.current_cursor = 0 self.record_cursor = 0 viewable = curses.LINES-4 if viewable % 2 > 0: viewable -= 1 self.records = self.data[:viewable] def setCursors(self,current_cursor,record_cursor): self.game_cursor = current_cursor + record_cursor # scoreboard scrolls two lines at a time absolute_cursor = self.game_cursor * 2 viewable = curses.LINES-4 if viewable % 2 > 0: viewable -= 1 # integer division will give us the correct top record position try: self.record_cursor = ( absolute_cursor / viewable ) * viewable except: raise MLBCursesError,"Screen too small." # and find the current position in the viewable screen self.current_cursor = absolute_cursor - self.record_cursor # and finally collect the viewable records self.records = self.data[self.record_cursor:self.record_cursor+viewable] def parseRssData(self,xptr): for item in xptr.getElementsByTagName('item'): title = item.getElementsByTagName('title')[0].childNodes[0].data link = item.getElementsByTagName('link')[0].childNodes[0].data try: link = self.htmlParser.unescape(link) except: raise Exception,repr(link) try: desc = item.getElementsByTagName('description')[0].childNodes[0].data except IndexError: desc = "" self.data.append((title,link,desc)) def Up(self): if self.current_cursor - 2 < 0 and self.record_cursor - 2 >= 0: viewable = curses.LINES-4 if viewable % 2 > 0: viewable -= 1 self.current_cursor = viewable-2 #if self.current_cursor % 2 > 0: # self.current_cursor -= 1 if self.record_cursor - viewable < 0: self.record_cursor = 0 else: self.record_cursor -= viewable #if self.record_cursor % 2 > 0: # self.record_cursor -= 1 self.records = self.data[self.record_cursor:self.record_cursor+viewable] elif self.current_cursor > 0: self.current_cursor -= 2 def Down(self): viewable=curses.LINES-4 if self.current_cursor + 2 >= len(self.records) and\ ( self.record_cursor + self.current_cursor + 2 ) < len(self.data): self.record_cursor += self.current_cursor + 2 self.current_cursor = 0 if ( self.record_cursor + viewable ) % 2 > 0: self.records = self.data[self.record_cursor:self.record_cursor+curses.LINES-5] else: self.records = self.data[self.record_cursor:self.record_cursor+curses.LINES-4] # Elif not at bottom of window elif self.current_cursor + 2 < self.records and\ self.current_cursor + 2 < curses.LINES-4: if (self.current_cursor + 2 + self.record_cursor) < len(self.data): self.current_cursor += 2 # Silent else do nothing at bottom of window and bottom of records def Refresh(self): self.myscr.clear() # display even number of lines since games will be two lines wlen = curses.LINES-4 if wlen % 2 > 0: wlen -= 1 for n in range(wlen): if n < len(self.records): cursesflags = 0 game_cursor = ( n + self.record_cursor ) / 2 ( title, link, desc ) = self.data[game_cursor] if n % 2 > 0: # second line of the feed item, underline it for division # between items if len(desc) > curses.COLS-2: s = desc[:curses.COLS-5] s += '...' else: s = desc pad = curses.COLS-2 - len(s) s += ' '*pad if n - 1 == self.current_cursor: cursesflags |= curses.A_UNDERLINE|curses.A_REVERSE else: cursesflags = curses.A_UNDERLINE self.myscr.addnstr(n+2,0,s,curses.COLS-2,cursesflags) else: s = title pad = curses.COLS - 2 - len(s) if n == self.current_cursor: cursesflags |= curses.A_REVERSE|curses.A_BOLD else: cursesflags = curses.A_BOLD self.myscr.addstr(n+2,0,s,cursesflags) # don't bold the pad or it results in an uneven looking # highlight cursesflags ^= curses.A_BOLD self.myscr.addstr(n+2,len(s),' '*pad,cursesflags) else: s = ' '*(curses.COLS-1) self.myscr.addnstr(n+2,0,s,curses.COLS-2) self.myscr.refresh() def titleRefresh(self,mysched): self.titlewin.clear() # RSS is always today - there are no archives now = datetime.datetime.now() titlestr = "RSS FEED FOR " +\ str(now.month) + '/' +\ str(now.day) + '/' +\ str(now.year) # TODO: '(Use arrow keys to change days)' padding = curses.COLS - (len(titlestr) + 6) titlestr += ' '*padding pos = curses.COLS - 6 self.titlewin.addstr(0,0,titlestr) self.titlewin.addstr(0,pos,'H', curses.A_BOLD) self.titlewin.addstr(0,pos+1, 'elp') self.titlewin.hline(1, 0, curses.ACS_HLINE, curses.COLS-1) self.titlewin.refresh() def statusRefresh(self): game_cursor = ( self.current_cursor + self.record_cursor ) / 2 # BEGIN curses debug code if self.mycfg.get('curses_debug'): wlen=curses.LINES-4 if wlen % 2 > 0: wlen -= 1 status_str = "game_cursor=%s, wlen=%s, current_cursor=%s, record_cursor=%s, len(records)=%s" %\ ( game_cursor, wlen, self.current_cursor, self.record_cursor, len(self.records) ) self.statuswin.clear() self.statuswin.addnstr(0,0,status_str,curses.COLS-2,curses.A_BOLD) self.statuswin.refresh() return # END curses debug code # use the url for status now status_str = self.data[game_cursor][1][:curses.COLS-2] padding = curses.COLS - len(status_str) # shrink the status string to fit if it is too many chars wide for # screen if padding < 0: status_str=status_str[:padding] status_str += ' '*padding self.statuswin.addnstr(0,0,status_str,curses.COLS-2,curses.A_BOLD) self.statuswin.refresh()