def fetchStartList(self, startListId=None): """Get startlist data from API. Add to db if available.""" self._checkStartListId(startListId) (startList, startListItems) = getStartlist(startListId=startListId) if _notnull(startList) and _notnull(startListItems): self.startList, self.startListItems = startList, startListItems self.dbfy([('startList', ['startListId']), ('startListItems', ['startListItemId'])]) if not startListId: startListId = startList['startListId'].iloc[0] if startListId not in self.startLists: self.startLists[startListId] = startListItems
def getStartlistId(stage='', startListId=None, legs=None, stages=None): """Get a generic startListId.""" # We essentially hack the precedence ordering # TO DO - we should warn from this # If passed something as first parameter (stage) that is actually # a startListId and we have no startListId, use _stage as startListId _stage = _jsInt(stage) if startListId is None and _stage and legs and _stage in legs[ 'startListId']: startListId = _stage # If we don't have a valid startListId, try to finesse one from stage if _isnull(_jsInt(startListId)) or not (legs and _jsInt(startListId) and _jsInt(startListId) in legs['startListId']): # If the startListId is a str, is it a stage designator? if isinstance(startListId, str) and startListId.startswith('SS'): stage = startListId if stage and isinstance(stage, str) and stage.lower().startswith('current'): startListId = getCurrentLeg(legs=legs)['startListId'] elif stage: stage_details = getStageDetails(startListId, stages=stages) if _notnull(stage_details): startListId = stage_details['startListId'] if not startListId: startListId = getCurrentLeg(legs=legs)['startListId'] return startListId
def _parseItinerary(r): """Parse itinerary response.""" itinerary = json_normalize(r.json()).drop(columns='itineraryLegs') legs = json_normalize(r.json(), 'itineraryLegs') if _notnull(legs): legs = legs.drop(columns='itinerarySections') sections = json_normalize(r.json(), ['itineraryLegs', 'itinerarySections']).drop( columns=['controls', 'stages']) controls = json_normalize( r.json(), ['itineraryLegs', 'itinerarySections', 'controls'], meta=[['itineraryLegs', 'itineraryLegId'], ['itineraryLegs', 'startListId']]) controls.rename(columns={ 'itineraryLegs.itineraryLegId': 'itineraryLegId', 'itineraryLegs.startListId': 'startListId' }, inplace=True) stages = json_normalize( r.json(), ['itineraryLegs', 'itinerarySections', 'stages'], meta=[['itineraryLegs', 'itineraryLegId'], ['itineraryLegs', 'startListId']]) stages.rename(columns={ 'itineraryLegs.itineraryLegId': 'itineraryLegId', 'itineraryLegs.startListId': 'startListId' }, inplace=True) else: legs = sections = controls = stages = None return (itinerary, legs, sections, controls, stages)
def _parseSplitTimes(r): """Parser for raw splittimes response.""" splitPoints = json_normalize(r.json(), 'splitPoints') entrySplitPointTimes = json_normalize(r.json(), 'entrySplitPointTimes', meta='stageId') splitPointTimes = json_normalize( r.json(), ['entrySplitPointTimes', 'splitPointTimes'], meta='stageId') if _notnull(splitPointTimes): entrySplitPointTimes.drop(columns='splitPointTimes', inplace=True) return (splitPoints, entrySplitPointTimes, splitPointTimes)
def upsert(self, table, data=None, pk=None): """ Upsert data to database. If we forget to create a database, this will create one. """ #print(f'upserting into {table}') _upserts = [] if isinstance(table, str): if _notnull(data) and pk is not None: # One table _upserts.append((table, data, pk)) # Check data is None. It may be {}, which would be data... elif hasattr(self, table) and data is None and pk is not None: # One table, data from self attribute data = getattr(self, table) _upserts.append((table, data, pk)) elif isinstance(table, tuple) and len(table) == 2 and _isnull(data): (_table, _pk) = table if isinstance(_table, str) and hasattr(self, _table) and _pk is not None: _data = getattr(self, _table) _upserts.append((_table, _data, _pk)) elif isinstance(table, list) and _isnull(data): # Several tables from self attributes: [(table, pk), ..] for _t in table: if isinstance(_t, tuple) and len(_t) == 2: (_table, _pk) = _t if isinstance(_table, str) and hasattr( self, _table) and _pk is not None: _data = getattr(self, _table) _upserts.append((_table, _data, _pk)) # TO DO - if the pk is none, and data is a df, use the df index? if not _checkattr(self, 'dbname') and not _checkattr(self, 'db'): self.db_connect() # The alter=True allows us the table to be modified if we have extra columns for (_table, _data, _pk) in _upserts: # TO DO - the try is to cope with this issue: # https://github.com/simonw/sqlite-utils/issues/73#issuecomment-571138093 # The ignore does not result in a dupe: the original row is left in place if isinstance( _data, pd.DataFrame) and _notnull(_data) and _pk is not None: try: self.db[_table].upsert_all(_data.to_dict(orient='records'), pk=_pk, alter=True) except: try: self.db[_table].insert_all( _data.to_dict(orient='records'), pk=_pk, alter=True, replace=True) except: warning(f"Couldn't add data to {_table} with PK {_pk}") elif isinstance(_data, dict) and _data and _pk is not None: try: self.db[_table].upsert_all(_data, pk=_pk, alter=True) except: try: self.db[_table].insert_all(_data, pk=_pk, alter=True, replace=True) except: warning(f"Couldn't add data to {_table} with PK {_pk}")