def make_record_updates( comparison_map: Dict[str, Dict[str, ATRecord]], assume_newer: bool = False, delete_unmatched_except: Optional[Tuple] = None, ): """Update Airtable from newer source records""" record_type = MC.get() at_key, at_base, at_table, at_typecast = MC.at_connect_info() at = Airtable(at_base, at_table, api_key=at_key) an_only = comparison_map["an_only"] did_update = False if an_only: did_update = True prinl(f"Doing updates for {record_type} records...") prinl(f"Uploading {len(an_only)} new record(s)...") records = [r.all_fields() for r in an_only.values()] at.batch_insert(records, typecast=at_typecast) an_newer: Dict[str, ATRecord] = comparison_map["an_newer"] if an_newer: update_map: Dict[str, Dict[str, Any]] = {} for key, record in an_newer.items(): updates = record.find_at_field_updates(assume_newer=assume_newer) if updates: update_map[record.at_match.record_id] = updates if update_map: if not did_update: prinl(f"Doing updates for {record_type} records...") did_update = True prinl(f"Updating {len(update_map)} existing record(s)...") for i, (record_id, updates) in enumerate(update_map.items()): at.update(record_id, updates, typecast=at_typecast) if (i + 1) % 25 == 0: prinlv(f"Processed {i+1}/{len(update_map)}...") if not did_update: prinlv(f"No updates required for {record_type} records.") at_only = comparison_map["at_only"] if at_only and delete_unmatched_except: field_name = delete_unmatched_except[0] field_val = delete_unmatched_except[1] record_ids = [ record.record_id for record in at_only.values() if record.custom_fields.get(field_name) != field_val ] if record_ids: prinl( f"Deleting {len(record_ids)} unmatched Airtable record(s)...") at.batch_delete(record_ids)
def airtable_export(json_file,table): base_key = 'appaTgY78Ycqm3PkP' table_name = table airtable = Airtable(base_key, table_name, api_key=os.environ['AIRTABLE_KEY']) file_insert = [] for item in json_file: #pprint.pprint(item) typetest = pd.DataFrame(pd.json_normalize(item)) #print(typetest) typetest3 = json.loads(typetest.to_json(orient='records',lines=True)) file_insert.append(typetest3) #pprint.pprint(typetest3) #typetest3.update({'typecast': True}) #print(type(typetest3)) #airtable.insert(typetest3, typecast=True) #pprint.pprint(file_insert) airtable.batch_insert(file_insert, typecast=True)
def puncher(y): table_name = 'xxx' base_key = 'xxx' airtable = Airtable(base_key, table_name, api_key='xxx') results = airtable.batch_insert(y) new_thesaurus = airtable.get_all() with open('make_api.pickle', 'wb') as f: pickle.dump(new_thesaurus, f) return results
def upload_attendence_file(form, id): event_date = get_event_date(id) airtable_connection = Airtable('app1TMHqTJRUWnrvB', 'Event Attendance', api_key=settings.EVENT_ATTENDANCE_API_KEY) attendance_file = pd.read_csv('temp_attendence.csv') csv_to_form_dict = make_csv_to_form_dict(form) form_to_airtable_dict = make_form_to_airtable_dict() attendance_file = rename_columns(attendance_file, csv_to_form_dict, form_to_airtable_dict) attendance_file = update_columns(attendance_file, id) attendance_file = attendance_file.fillna("") if event_date: attendance_file["Meeting Date"] = event_date attendance_file["Event Type"] = form["event_type"] airtable_connection.batch_insert(attendance_file.to_dict('records'))
while True: if (ser.in_waiting > 0): line = (str(ser.readline())) strline = line.replace("b'", " ")[:-5] if '!' in strline: strline = strline.split('!')[0] print(strline) if (time.time() - now > 10): airtable = Airtable('app42vXuBNsAVyqTG', 'Test', 'keyVVJGhlDj5vfPCd') if 'Moisture Percentage =' in strline: Value = strline.replace('Moisture Percentage = ', ' ') Time = datetime.now() current_time = datetime.now().strftime("%H:%M:%S") records = [{'Value': Value, 'Time': current_time}] airtable.batch_insert(records) now = time.time() if 'ON' in strline: flow_time = time.time() print("water") airtable = Airtable('app42vXuBNsAVyqTG', 'Water', 'keyVVJGhlDj5vfPCd') water_time = datetime.now().strftime("%H:%M:%S") flow = True elif flow == True: if 'Flow= ' in strline: print("flow") Flow = strline.replace("Flow= ", " ") flow = False elif 'OFF' in strline: print('off')
# Insert rec = airtable.insert({"text": "A", "number": 1, "boolean": True}) # Get assert airtable.get(rec["id"]) # Update rv = airtable.update(rec["id"], {"text": "B"}) assert rv["fields"]["text"] == "B" # Replace rv = airtable.replace(rec["id"], {"text": "C"}) assert rv["fields"]["text"] == "C" # Get all assert airtable.get_all() # Delete assert airtable.delete(rec["id"]) # Batch Insert records = airtable.batch_insert([{ "text": "A", "number": 1, "boolean": True } for _ in range(100)]) # Batch Delete records = airtable.batch_delete([r["id"] for r in records]) assert len(records) == 100
def copy_to_airtable(self): """ copies the DB to airtables """ try: logger.debug('Starting Airtable copy...') # set the ready state to false to indicate that it's not ready state_table = Airtable(self.airtable_basekey, 'State', self.airtable_apikey) ready_id = state_table.match('key', 'ready')['id'] fields = {'key': 'ready', 'value': 'false'} state_table.replace(ready_id, fields) # delete previous table entries logger.debug("Deleting previous table entries...") pilot_table = Airtable(self.airtable_basekey, 'Pilots', self.airtable_apikey) character_table = Airtable(self.airtable_basekey, 'Characters', self.airtable_apikey) attribute_table = Airtable(self.airtable_basekey, 'Attributes', self.airtable_apikey) attribute_groups_table = Airtable(self.airtable_basekey, 'AttributeGroups', self.airtable_apikey) pilot_table.batch_delete( [entry['id'] for entry in pilot_table.get_all()]) character_table.batch_delete( [entry['id'] for entry in character_table.get_all()]) attribute_table.batch_delete( [entry['id'] for entry in attribute_table.get_all()]) attribute_groups_table.batch_delete( [entry['id'] for entry in attribute_groups_table.get_all()]) logger.debug("Previous table entries deleted!") # copy pilots table logger.debug("Copying Pilots table...") pilots = session.query(Pilot) pilot_records = [] for pilot in pilots: pilot_records.append({ 'id': pilot.id, 'discord_id': pilot.discord_id, 'discord_name': pilot.discord_name, 'discord_discriminator': pilot.discord_discriminator }) pilot_table.batch_insert(pilot_records) logger.debug("Pilots table copied!") # copy characters table logger.debug("Copying Characters table...") characters = session.query(Character) character_records = [] for character in characters: character_records.append({ 'id': character.id, 'pilot_id': character.pilot_id, 'name': character.name }) character_table.batch_insert(character_records) logger.debug("Characters table copied!") # copy attributes logger.debug("Copying Attribute table...") attributes = session.query(Attribute) attribute_records = [] for attribute in attributes: attribute_records.append({ 'id': attribute.id, 'attribute_group_id': attribute.attribute_group_id, 'key': attribute.key, 'value': attribute.value, 'friendly_name': attribute.friendly_name }) attribute_table.batch_insert(attribute_records) logger.debug("Attribute table copied!") # copy attributegroups logger.debug("Copying AttributeGroup table...") attribute_groups = session.query(AttributeGroup) attribute_group_records = [] for attribute_group in attribute_groups: attribute_group_records.append({ 'id': attribute_group.id, 'pilot_id': attribute_group.pilot_id, 'name': attribute_group.name, 'description': attribute_group.description }) attribute_groups_table.batch_insert(attribute_group_records) logger.debug("AttributeGroup table copied!") # set the ready state to true to indicate that it's ready state_table = Airtable(self.airtable_basekey, 'State', self.airtable_apikey) ready_id = state_table.match('key', 'ready')['id'] fields = {'key': 'ready', 'value': 'true'} state_table.replace(ready_id, fields) logger.debug('Airtable copy complete!') except: logger.error( f"Failed to copy to airtable:\n{traceback.format_exc()}")
class Airtable(object): """ `Args:` base_key: str The key of the Airtable base that you will interact with. table_name: str The name of the table in the base. The table name is the equivilant of the sheet name in Excel or GoogleDocs. api_key: str The Airtable provided api key. Not required if ``AIRTABLE_API_KEY`` env variable set. """ def __init__(self, base_key, table_name, api_key=None): self.api_key = check_env.check('AIRTABLE_API_KEY', api_key) self.at = AT(base_key, table_name, self.api_key) def get_record(self, record_id): """ Returns a single record. `Args:` record_id: str The Airtable record id `Returns:` A dictionary of the record """ return self.at.get(record_id) def get_records(self, fields=None, max_records=None, view=None, formula=None, sort=None): """ `Args:` fields: str or lst Only return specified column or list of columns. The column name is case sensitive max_records: int The maximum total number of records that will be returned. view: str If set, only the records in that view will be returned. The records will be sorted according to the order of the view. formula: str The formula will be evaluated for each record, and if the result is not 0, false, "", NaN, [], or #Error! the record will be included in the response. If combined with view, only records in that view which satisfy the formula will be returned. For example, to only include records where ``COLUMN_A`` isn't empty, pass in: ``"NOT({COLUMN_A}='')"`` For more information see `Airtable Docs on formulas. <https://airtable.com/api>`_ Usage - Text Column is not empty: ``airtable.get_all(formula="NOT({COLUMN_A}='')")`` Usage - Text Column contains: ``airtable.get_all(formula="FIND('SomeSubText', {COLUMN_STR})=1")`` sort: str or lst Specifies how the records will be ordered. If you set the view parameter, the returned records in that view will be sorted by these fields. If sorting by multiple columns, column names can be passed as a list. Sorting Direction is ascending by default, but can be reversed by prefixing the column name with a minus sign -. Example usage: ``airtable.get(sort=['ColumnA', '-ColumnB'])`` `Returns:` Parsons Table See :ref:`parsons-table` for output options. """ # Raises an error if sort is None type. Thus, only adding if populated. kwargs = {'fields': fields, 'max_records': max_records, 'formula': formula} if sort: kwargs['sort'] = sort tbl = Table(self.at.get_all(**kwargs)) return tbl.unpack_dict(column='fields', prepend=False) def insert_record(self, row): """ Insert a single record into an Airtable. `Args:` row: dict Fields to insert. Must be dictionary with Column names as Key. typecast: boolean Automatic data conversion from string values. `Returns:` Dictionary of inserted row """ resp = self.at.insert(row) logger.info('Record inserted') return resp def insert_records(self, table): """ Insert multiple records into an Airtable. The columns in your Parsons table must exist in the Airtable. The method will attempt to map based on column name, so the order of the columns is irrelevant. `Args:` table: A Parsons Table Insert a Parsons table typecast: boolean Automatic data conversion from string values. `Returns:` List of dictionaries of inserted rows """ resp = self.at.batch_insert(table) logger.info(f'{table.num_rows} records inserted.') return resp def update_record(self, record_id, fields, typecast=False): """ Updates a record by its record id. Only Fields passed are updated, the rest are left as is. `Args:` record_id: str The Airtable record id fields: dict Fields to insert. Must be dictionary with Column names as Key. typecast: boolean Automatic data conversion from string values. `Returns:` ``None`` """ resp = self.at.update(record_id, fields, typecast=typecast) logger.info(f'{record_id} updated') return resp
inserts = [] updated = [] for entry in methods: fields = strip_dict_nans({k: v for k, v in entry.items() if k != "id"}) dst_fields = raw_map.get(entry['method'], {}) if record_needs_update(fields, dst_fields): updated.append(fields) air_methods.update_by_field("method", entry["method"], fields=fields) if pd.isna(entry["id"]): inserts.append(fields) if inserts: air_methods.batch_insert(inserts) print("Updated methods: ", ", ".join([d['method'] for d in updated])) print("Inserted methods: ", ", ".join([d['method'] for d in inserts])) #air_methods.batch_update(list(map(strip_dict_nans, method_inserts))) #air_backends.batch_insert(backend_inserts) # - # ## Link method records to backends raw_backend = { x['fields']['backend_method']: x['fields'] for x in air_backends.get_all() }