Example #1
0
 def sources(self):
     table = airtable.Airtable(self.base, RESOURCE_LOCATIONS, self.key)
     for row in table.get_all():
         yield Source(name=row['fields'].get('Name'),
                      url=row['fields'].get('URL'))
Example #2
0
def getTable(tablename):
    fellowstable = airtable.Airtable('appepRGO7wI0cmN4q', tablename,
                                     'keyi9WRicMVH9g2ui')
    return fellowstable
Example #3
0
import bleach
import dotenv
import urllib
import pathlib
import wayback
import airtable
import pypandoc
import requests_html

from shutil import copyfile

# get the airtable credentials
dotenv.load_dotenv()
key = os.environ.get('AIRTABLE_API_KEY')
base_id = os.environ.get('AIRTABLE_RESEARCH_BASE_ID')
projects = airtable.Airtable(base_id, 'Projects', key)

# a web client
http = requests_html.HTMLSession()

# a function to return the first wayback archive for a url

wb = wayback.WaybackClient()


def wayback_search(url):
    try:
        memento = next(wb.search(url))
        resp = http.get(memento.raw_url)
        return resp
    except StopIteration:
Example #4
0
 def setUp(self):
     self.base_id = FAKE_BASE_ID
     self.api_key = FAKE_API_KEY
     self.airtable = airtable.Airtable(self.base_id, self.api_key)
import os

from apscheduler.schedulers.background import BackgroundScheduler
from flask import Flask
import airtable

from email_service import send_email, build_confirmation_msg

AIRTABLE_BASE = os.getenv('AIRTABLE_BASE')
AIRTABLE_API_KEY = os.getenv('AIRTABLE_API_KEY')

airtable = airtable.Airtable(AIRTABLE_BASE, 'experts', AIRTABLE_API_KEY)


def send_confirmation_email():
    """Send confirmation emails to all the To Process folks.

    Should look something like:
      1. get all records which haven't gotten emails or errors
      2. iterate through the records
      2a. send the emails
      2a1. try: sending the email
      2a2. except: error sending the email
      2a3. mark the error that was found
      2b. mark the records as processed
    
    """
    print("Scheduler is alive!")
    experts = airtable.get_all(view='To process')
    # TODO: remove this when you're satisfied
    print(experts)
        print('CV accuracy: %.3f' % gs_nb_tfidf.best_score_)

        clf = gs_nb_tfidf.best_estimator_
        print('Test accuracy: %.3f' % clf.score(X_test, y_test))

        df_name = next(iter(pipelines[index]))

        results['{0}'.format(df_name)] = pd.DataFrame(gs_nb_tfidf.best_params_,
                                                      index=[0])
        results['{0}'.format(df_name)]['Vectorizer'] = df_name

        results['{0}'.format(df_name)]['best_score'] = gs_nb_tfidf.best_score_
        results['{0}'.format(df_name)]['test_accuracy'] = clf.score(
            X_test, y_test)

import airtable

api_key = 'keyzuMSPCp9CJMHKX'

airtable = airtable.Airtable('appaqpUirAcrQV4GP',
                             'Logistic regression 100k samples',
                             api_key=api_key)

for key, value in results.items():
    airtable.insert({
        'Name': 'Multinomial Naive Bayes',
        'Vectorizer': str(value['Vectorizer'].iloc[0]),
        'clf__fit_prior': str(value['clf__fit_prior'].iloc[0]),
        'clf__alpha': str(value['clf__alpha'].iloc[0])
    })
Example #7
0
				'Maggie', \
				'Richard', \
				'Sai', \
				'Sunny', \
				'Thu', \
				'Will'
			]

reviewer_name = str(input('What is your name? ')).title()
if reviewer_name not in reviewer_names:
	print('The inputted name is not valid. Please provide your full first name. (i.e. Kevin)')
	quit()
print('\n')

# Creates airtable object for use with API
application_at = airtable.Airtable(APPLICATION_BASE_ID, API_KEY)
decision_at = airtable.Airtable(DECISION_BASE_ID, API_KEY)

# Adding reviewed applicants to list so we don't have to re-review them
# Get all applications that have a decision
decisions_list = decision_at.get(DECISION_TABLE_NAME)
decisions = decisions_list['records']
while 'offset' in decisions_list.keys():
	decisions_list = decision_at.get(DECISION_TABLE_NAME, offset=decisions_list['offset'])
	decisions += decisions_list['records']

reviewed_applications = set()
for decision in decisions:
	# print(reviewer_name)
	if decision['fields']['Reviewer Name'] == reviewer_name:
		reviewed_applications.add(decision['fields']['Applicant Name'])
Example #8
0
#!/usr/bin/env python3

import os
import dotenv
import airtable

dotenv.load_dotenv()
key = os.environ.get('AIRTABLE_API_KEY')
base_id = os.environ.get('AIRTABLE_RESEARCH_BASE_ID')
events = airtable.Airtable(base_id, 'Events', key)

for e in events.get_all():
    if not e['fields'].get('slug'):
        events.update(e['id'], {'slug': e['fields']['id']})



def main():
    # run_date = '2020-08-06'
    run_date = datetime.today().date().strftime('%Y-%m-%d')
    parser = argparse.ArgumentParser()
    parser.add_argument("--album_name",
                        help="name as it appears in Flickr UI' "
                        "(default \'Flora\')",
                        default='Flora')
    parser.add_argument("--backfill",
                        help="import all photos from album",
                        action="store_true")
    parser.add_argument("--target_base",
                        help="original or postcovid base "
                        "(default \'postcovid\')",
                        default="postcovid")
    args = parser.parse_args()

    # Original airtable base ran out of room.
    # Flowers first seen after transition date will go to new base.
    apikey = AirTableConfig.apikey
    if args.target_base == 'original':
        base_id = AirTableConfig.flora_base_id
    else:
        base_id = AirTableConfig.flora_ii_base_id
    airtable = at.Airtable(base_id, apikey, dict)
    table_name = 'FLORA'
    base_transition_date = '2022-06-10'

    directory = '/home/koya/datascience/flickr_to_airtable/flickr_exports/'
    file_name = f'flickr_album_{args.album_name}_{run_date}'
    if args.backfill:
        file_name = file_name + '__full'
    file_name = file_name + '.json'

    try:
        with open(directory + file_name, 'r') as f:
            contents = json.loads(f.read())
        return_status = 1
    except FileNotFoundError as e:
        print(f'file not found. error: {e}')
        raise (e)
        return_status = 0

    master_dict = {}
    for photo in contents['photos']:
        original_title = photo['title']
        title = original_title.strip('?')  # group ?/non-? versions
        flickr_id = photo['id']
        flickr_date_taken = photo['datetaken']
        download_url = photo['url_m']
        flickr_description = photo['description']
        flickr_raw_tags = photo['tags']
        flickr_tags = flickr_raw_tags.split(' ')
        prefix_primary_photo = '1' if 'primary' in flickr_tags else '2'
        download_filename = (prefix_primary_photo + '_' +
                             path.basename(download_url))
        this_dict = {
            title: {
                'num_photos': 1,
                'lowest_flickr_id': flickr_id,
                'flickr_ids': [flickr_id],
                'flickr_id_to_use': flickr_id,
                'Image(s)': [{
                    'url': download_url,
                    'filename': download_filename
                }],
                'Map Link': photo['google_map_url'],
                'Coordinates': photo['coordinates'],
                'Date Seen': flickr_date_taken,
                # Stop setting this large column (for space concerns)
                # 'Flickr_description': flickr_description,
                'Flickr Link': photo['flickr_url'],
                'Flickr_tags': flickr_raw_tags,
                'Common Name': camel_case_split(original_title)
            }
        }

        # Add all the dynamic Parsed tags
        parsed_tags = parse_description(flickr_description)
        this_dict[title].update(parsed_tags)

        if title in master_dict:
            master_dict[title]['num_photos'] += 1
            master_dict[title]['Image(s)'].append({
                'url': download_url,
                'filename': download_filename
            })
            master_dict[title]['Image(s)'].sort(
                key=lambda x: (x['filename'], x['url']))
            if master_dict[title]['lowest_flickr_id'] > flickr_id:
                master_dict[title]['lowest_flickr_id'] = flickr_id

            master_dict[title]['flickr_ids'].append(flickr_id)
            master_dict[title]['flickr_ids'].sort()
            master_dict[title]['flickr_id_to_use'] = '_'.join(
                master_dict[title]['flickr_ids'])
            # always prefer non-? common name if mixed group
            if (re.search('\?', master_dict[title]['Common Name'])) \
                    and not (re.search('\?', this_dict[title]['Common Name'])):
                master_dict[title]['Common Name'] = \
                    this_dict[title]['Common Name']
        else:
            master_dict.update(this_dict)

# Only include data pre/post transition based on which base writing to.
    if args.target_base == 'original':
        master_dict = \
            {title: columns for (title, columns) in master_dict.items()
             if columns['Date Seen'] <= base_transition_date}
    else:
        master_dict = \
            {title: columns for (title, columns) in master_dict.items()
             if columns['Date Seen'] > base_transition_date}


# Process in Airtable
    airtable_ids = {}
    airtable_records = airtable.iterate(table_name)

    for record in airtable_records:
        # Fixed. Ignore empty/bad rows in airtable.
        if 'fields' in record:
            if 'Flickr_id' in record['fields']:
                flickr_id = record['fields']['Flickr_id']
                airtable_id = record['id']
                airtable_ids[flickr_id] = airtable_id

    flickr_delete_ids = []
    # delete those to be re-inserted and any singles that may now be grouped
    for name, fields in master_dict.items():
        delete_id = fields['flickr_id_to_use']
        flickr_delete_ids.append(delete_id)
        if delete_id.find('_') > 0:
            delete_ids = delete_id.split('_')
            flickr_delete_ids.extend(delete_ids)
    # delete those lingering groups that are now split or re-grouped elsewhere
    for flickr_id, airtable_id in airtable_ids.items():
        if flickr_id.find('_') > 0:
            flickr_ids = flickr_id.split('_')
            flickr_ids = list(
                filter(lambda x: x in flickr_delete_ids, flickr_ids))
            if flickr_ids and flickr_id not in flickr_delete_ids:
                flickr_delete_ids.append(flickr_id)

    if args.backfill:
        airtable_delete_ids = list(airtable_ids.values())
    else:
        airtable_delete_ids = [
            val for key, val in airtable_ids.items()
            if key in flickr_delete_ids
        ]

    for record_id in airtable_delete_ids:
        delete_response = airtable.delete(table_name, record_id)
        print(delete_response)
        time.sleep(.20)

    # final scrub - split out to allow debug
    for name, record in master_dict.items():
        record['Flickr_id'] = record['flickr_id_to_use']
        del record['flickr_id_to_use']
        del record['flickr_ids']
        del record['num_photos']
        del record['lowest_flickr_id']

        upload = airtable.create(table_name, data=record)
        # TO DO: add try/except. If id line fails, print {name} failed.
        try:
            print(f"uploaded: {upload['id']}, flickr name: {name}")
        except KeyError:
            print("FAILED TO UPLOAD: {}".format(record['Common Name']))
        time.sleep(.20)
Example #10
0
Uses an airtable list of interested people to create matches at random.
"""
import random
import os
from typing import List, Literal, TypedDict

import airtable
import discord

TOKEN = os.getenv("DISCORD_TOKEN")
AIRTABLE_BASE = os.getenv("AIRTABLE_BASE")
AIRTABLE_API_KEY = os.getenv("AIRTABLE_API_KEY")

client = discord.Client()
airtable = airtable.Airtable(AIRTABLE_BASE, "dumble", AIRTABLE_API_KEY)

copy = {
    "intro": "Welcome to Dumble! Are you open to matching in a romantic or friendly manner today?",
    "friendly": "😃",  # smiley face emoji
    "romantic": "💕",  # two hearts emoji
    "either": "😊",  # blushing emoji
}

romanticPool = set()  # {discord.User}, remove user when matched
friendlyPool = set()

UserFields = TypedDict(
    "UserFields",
    {
        "Email": str,
from kivy.uix.label import Label 
from kivy.uix.button import Button
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.tabbedpanel import TabbedPanelHeader 
from kivy.uix.tabbedpanel import TabbedPanelItem
from kivy.properties import ObjectProperty

from functools import partial

# Variables
tab_headers = [] # List of strings of all headers (main-menu) fields within the database
tab_menus = [] # List of dictionaries of all menu options (sub-menu) and their url data (name and url reference) within the database

# Airtable API variable that connects to and fetches databases
connection = airtable.Airtable(
	base_key='appdqzfZoeTcXC7VD', # Serial number to identify the correct database
	table_name='Config', # Name of the table that host al needed data
	api_key='keyeNCirYgYK9YhOd') # Personal api key for authentification and access. PLEASE PROVIDE YOUR OWN 

# Function for data retrieval from Airtable via api connection
# {'id': 'recyBT9LyhJ7a1KJP', 
# 'fields': {'Link Type': 'All Brands', 
# 				'URL': 'https://www.google.com/', 
#				'View Type': 'Form', 
#				'Main Menu': 'Design', 
#				'Link Name': 'Upload Color', 
#				'Sub-menu': 'Develop Colors', 
#				'Live': True, 'Name': 
#				'Upload Color'}, 
# 'createdTime': '2018-04-16T16:47:52.000Z'}
def fetch_airtable_data():
	# Loop to cycle entirity of retireved data
Example #12
0
from typing import Any, Mapping, TypedDict

import airtable


# Strongly typed fields.
class NameFields(TypedDict):
    name: str


class MyFields(NameFields):
    score: float


# Example from the README.
at = airtable.Airtable('BASE_ID', 'API_KEY')
records = at.get('TABLE_NAME')

# Get the fields of a record.
record = at.get('TABLE_NAME', 'recmlj', fields=['a'])
record_id: str = record['id']
record_fields: Mapping[str, Any] = record['fields']
record_id = record.get('id')
record.get('fields', {}).get('a')

# Create a record.
created = at.create('TABLE_NAME', {'a': 3})
created['fields']['b']

# Iterate.
first = next(at.iterate('TABLE_NAME'))