Ejemplo n.º 1
0
def scrape_qual():

    url = "https://www.yaliscafe.com"
    source = requests.get(url)
    soup = bs.BeautifulSoup(source.content, features='html.parser')
    mydivs = soup.findAll("div", {"class": "textwidget"})
    cur = mydivs[1].p.getText()
    x1 = cur.split(":")[1][1:8]
    x2 = cur.split(":")[2][1:8]
    x11, x12 = x1.split("–")
    x21, x22 = x2.split("–")

    times = [x11, x12, x21, x22]
    yali_times = []
    c, d, a, b = helper.standarize_timestring(
        times[0]), helper.standarize_timestring(
            times[1]), helper.standarize_timestring(
                times[2]), helper.standarize_timestring(times[3])
    bam = helper.get_last_sunday()
    for i in range(1, 6):
        x = bam + datetime.timedelta(days=i)
        current = helper.build_time_interval(c, d, x)
        yali_times.append(current)

    x = bam + datetime.timedelta(days=6)
    current = helper.build_time_interval(a, b, x)
    yali_times.append(current)

    x = bam + datetime.timedelta(days=0)
    current = helper.build_time_interval(a, b, x)
    yali_times.append(current)

    return yali_times
Ejemplo n.º 2
0
def scrape_library_hours(libraries, library_names):
    '''
    Scrapes library hours from library urls and stores it into the libraries dictionary.
    '''
    for library_name in library_names:
        library_dict = libraries[library_name]
        library_id = library_dict["id"]
        url = get_library_url(library_id)
        try:
            response = requests.get(url)
            response_page_json = response.json()
            response_json = response_page_json[0]
            hours_dict = response_json["hours"]
            # print(len(hours_dict))
            for days in hours_dict:
                # print(days)

                day_hours, notes = parse_hours(days["day"])
                for item in day_hours:
                    library_dict["open_close_array"].append(
                        helper.build_time_interval(*item))
                    last_entry = library_dict["open_close_array"].pop()
                    last_entry["notes"] = notes
                    library_dict["open_close_array"].append(last_entry)

        except Exception as e:
            '''
            Edge cases: Library doesn't have start, end time. Has "closed" or some text? 
            
            Library JSON API url has multiple dictionaries >>> json.loads() doesn't like that. 
            '''
            print(str(e))
            print("Exception has occurred for library with id: {0}.".format(
                library_id))
Ejemplo n.º 3
0
def secondary_scrapper(facility,url):
   ## This is the seconary scrapper, used when MULTIPLE facilities are loaded
   ## on a table/per page.

    open_close_array = []
    page = requests.get(url)
    soup = BeautifulSoup(page.content,"html.parser")
    content = soup.find(class_="entry-content")
    list_of_days = content.findAll(class_="fancy_header")
    content = soup.findAll(class_="minimal_table")

    for i, d in zip(content, list_of_days):
        date = d.find(class_="slategrey").get_text()
        revised_date = datetime.strptime(date,"%A, %B %d").date()
        revised_date = revised_date.replace(int(datetime.now().year))

        content_iterable = i.findAll("tr")
        for x in content_iterable:
            if (x.findAll("td") == []):
                continue
            day_iterable = x.findAll("td")
            time = day_iterable[0].get_text().split("-")
            start_time = re.sub(r"([0-9:]+(\.[0-9]+)?)",r" \1 ", time[0]).strip()
            end_time = re.sub(r"([0-9:]+(\.[0-9]+)?)",r" \1 ", time[1]).strip()
            open_close_time = helper.build_time_interval(start_time,end_time,revised_date)
            location = day_iterable[1].get_text()
            event = day_iterable[2].get_text()
            if (location == facility):
                #open_close_array.append(open_close_time)
                if (event_helper(event) == True):
                    open_close_array.append(open_close_time)

    return open_close_array
Ejemplo n.º 4
0
def primary_scrapper(url):
    ## This is the primary scrapper for athletic facilites. It is used
    ## when a only a SINGLE facility is loaded per webpage/table.

    open_close_array = []
    page = requests.get(url)
    soup = BeautifulSoup(page.content,"html.parser")
    content = soup.find(class_="minimal_table")
    content_iterable = content.findAll("tr")
   
    
    for i in content_iterable:
        day_iterable = i.findAll("td")
        day = str(datetime.strptime(str(day_iterable[0].get_text().replace(",","")), '%a %b %d')).replace("1900",str(datetime.now().year)).split(" ")[0]
        day_converted = datetime.strptime(day,'%Y-%m-%d').date()
        if (day_iterable[1].get_text() == "CLOSED"):
            start_time = "11:59 pm"
            end_time = "11:59 pm"
            continue ## If the facility is closed, then we are not adding a time interval into the array.
        else: 
            start_time = re.sub("^0", "", datetime.strptime(day_iterable[1].get_text().split("-")[0],'%I%p ').time().strftime( '%I:%M %p' ))
            end_time = re.sub("^0", "", datetime.strptime(day_iterable[1].get_text().split("-")[1],' %I%p').time().strftime( '%I:%M %p' ))
        open_close_time = helper.build_time_interval(start_time.lower(),end_time.lower(),day_converted)
        open_close_array.append(open_close_time)

    return open_close_array
Ejemplo n.º 5
0
def scrape():
    startTime = helper.get_last_sunday().isoformat() + 'T' + "7:0:0" + 'Z' # 'Z' indicates UTC time
    endTime = helper.get_next_monday().isoformat() + 'T' + "7:0:0" + 'Z' # 'Z' indicates UTC time

    class_types = {
        "Barre-Fit":"STRENGTH",
        "Barre-Fit Express":"STRENGTH",
        "Barre-Pilates Fusion":"STRENGTH",
        "BollyX":"DANCE",
        "Cardio & Core":"CARDIO",
        "Cardio & Core Express":"CARDIO",
        "Dance Jam":"DANCE",
        "Early Bird Yoga":"MIND/BODY",
        "Legs & Glutes":"CARDIO",
        "Mat Pilates":"CARDIO",
        "PiYo":"MIND/BODY",
        "Power Yoga":"MIND/BODY",
        "Simple Strong | TBC":"STRENGTH",
        "Upper Body Blast":"HIIT",
        "UrbanKick":"HIIT",
        "Yoga":"MIND/BODY",
        "Yoga & Meditation":"MIND/BODY",
        "Yoga Stretch":"MIND/BODY",
        "Zumba":"DANCE",
    }

    # Output dictionary = level 0
    output = []
  
    # Date dictonary = level 1
    date_dictionary = {}


    with open('keys/bm-calendar-token-pickle.pickle', 'rb') as token:
        creds = pickle.load(token)

    service = build('calendar', 'v3', credentials=creds)

    

    events_result = service.events().list(
        calendarId='*****@*****.**',
        timeMin=startTime,singleEvents=True,orderBy='startTime').execute()
    events = events_result.get('items', [])

    if not events:
        print('No upcoming events found.')
    for event in events:
        class_dictionary = {}

        bcalDesc = re.sub(re.compile('<.*?>'), ' ', event["description"])

        class_trainer = bcalDesc.split("How to Participate:")[0][len("Instructor: "):]
        zoom_link = event["description"].split('<a href=\"')[1].split('"')[0]
        class_name = event["summary"]
        class_description = bcalDesc.split("Class Description:")[-1]

        date = datetime.datetime.strptime(event["start"]["dateTime"].split("T")[0], '%Y-%m-%d')
        start_time = event["start"]["dateTime"].split("T")[1][:-9]
        end_time = event["end"]["dateTime"].split("T")[1][:-9]
        open_close = helper.build_time_interval(start_time, end_time, date)

        class_dictionary["trainer"] = class_trainer
        class_dictionary['class'] = class_name
        class_dictionary['location'] = "\nSee recsports.berkeley.edu/online"
        class_dictionary['link'] = zoom_link
        class_dictionary['class type'] = class_types[class_name] if class_name in class_types else "ALL-AROUND WORKOUT"
        class_dictionary['date'] = event["start"]["dateTime"].split("T")[0]
        class_dictionary['description'] = class_description
        class_dictionary["open_close_array"] = open_close
        output.append(class_dictionary)


    
    # Data Processing: Getting Data into requested schema.
    for i in output:
        date_key = i.get("date")
        if (date_dictionary.get(date_key) == None):
            date_dictionary[date_key] = {i.get('class'):i}
        else:
            date_dictionary[date_key].update({i.get('class'):i})
    
    return date_dictionary
Ejemplo n.º 6
0
def scrape():
    '''
        Reads in the data from the resources csv and outputs a dictionary with the values
    '''
    #Read data from file "resources.csv"
    parent_working_directory = os.path.dirname((abspath(__file__)))

    data = pd.read_csv(parent_working_directory + "/csv_data/resources.csv",
                       engine='python')
    data = data.fillna("Not Available")

    #Extract column names
    fields = data.columns.values

    #Make master array to store each resource, each item in resource is of type dictionary.

    #Loop through each row in csv
    all_resources = {}
    for category in data["category"].unique():
        if len(category.strip()) == 0:
            continue

        category_resources = {}
        for row in data[data["category"] == category].iterrows():
            new_resource = {}

            #Loop through each column for each row.
            for i in range(21):
                new_resource[fields[i]] = row[1][i]

            # Collapsing monday_hours .... sunday_hours and converting to interval
            intervals = []
            dates = helper.get_this_week_dates()
            hours_df_key = [
                "sunday", "monday", "tuesday", "wednesday", "thursday",
                "friday", "saturday"
            ]

            for day, date in zip(hours_df_key, dates):
                csv_read = new_resource["{}_hours".format(day)]
                if str(csv_read) != "nan":
                    for time in csv_read.split(","):
                        if not bool(re.search(r'\d', time)):
                            intervals.append(
                                helper.build_time_interval(open="Closed",
                                                           close="Closed",
                                                           date=date))
                        else:
                            time = time.split("-")
                            open = helper.standarize_timestring(time[0])
                            close = helper.standarize_timestring(time[1])
                            intervals.append(
                                helper.build_time_interval(open=open,
                                                           close=close,
                                                           date=date))

                if isinstance(new_resource["by_appointment"],
                              str) and "Yes" in new_resource["by_appointment"]:
                    new_resource["by_appointment"] = True
                elif isinstance(new_resource["by_appointment"], str):
                    new_resource["by_appointment"] = False

                if isinstance(new_resource["on_campus"],
                              str) and "Yes" in new_resource["on_campus"]:
                    new_resource["on_campus"] = True
                elif isinstance(new_resource["on_campus"], str):
                    new_resource["on_campus"] = False

                if isinstance(
                        new_resource["Cal1Card_Accepted"],
                        str) and "Yes" in new_resource["Cal1Card_Accepted"]:
                    new_resource["Cal1Card_Accepted"] = True
                elif isinstance(new_resource["EatWell_Accepted"], str):
                    new_resource["Cal1Card_Accepted"] = False

                if isinstance(
                        new_resource["EatWell_Accepted"],
                        str) and "Yes" in new_resource["EatWell_Accepted"]:
                    new_resource["EatWell_Accepted"] = True
                elif isinstance(new_resource["EatWell_Accepted"], str):
                    new_resource["EatWell_Accepted"] = False

                if "@" in new_resource[
                        "email"] and "mailto:" not in new_resource["email"]:
                    new_resource["email"] = "mailto:" + new_resource["email"]

                del new_resource["{}_hours".format(day)]

            new_resource["open_close_array"] = intervals
            category_resources[new_resource['name']] = new_resource

        all_resources[category] = category_resources

    return all_resources
Ejemplo n.º 7
0
def scrape():
    ## Moffit scrapper function, extracts data online.

    url = get_library_url(179)
    data = json.load(urllib.request.urlopen(url))

    # Array will hold all the scrapped open/close times.
    output_array = []

    #Array will hold all the data that will need to be exported to JSON format.
    JSON_array = {}

    #Scrapped master list of open/close times, stored in array.
    master_list = data[0]["hours"]

    # Master list of hours, will loop through it.
    for i in master_list:

        #Default start and end values
        start_time = i["day"]["start"]
        end_time = i["day"]["end"]
        date = i["day"]["day"]
        date_converted = Date(date)

        #Edge Case 1: When start time is none, '9am - '
        if (start_time == None):
            start_time = i["day"]["note"]

        ## Edge Case 2: When end time is none, 24 hours
        if (end_time == None and start_time != i["day"]["note"]):
            end_time = i["day"]["note"]

        ## Edge Case 3: Start time is 24 hous
        if (start_time == "24 hours" and end_time == None):
            start_time = "12:00 am"
            end_time = "11:59 pm"

        if (end_time == None):
            end_time = "11:59 pm"

        ## Edge Case 4A Library is closed, but opens next day.
        if (start_time == None and end_time == '11:59 pm'):
            start_time = "11:59 pm"

        ## Edge Case 4B Library is closed, closed on next day.
        if (start_time == None and end_time == None):
            start_time = "11:59 pm"
            end_time = "11:59 pm"

        ## Edge Case 5: Fridays, closes at X time.
        if ("Close" in start_time):
            time_parsing = start_time.split("at ")
            start_time = "12:00 am"
            end_time = time_parsing[1]

        output = helper.build_time_interval(start_time, end_time,
                                            date_converted)
        output_array.append(output)

    JSON_array.update({"name": "Moffitt Library"})
    JSON_array.update({"latitude": float(37.87277)})
    JSON_array.update({"longitude": float(-122.260244)})
    JSON_array.update({"phone": "510-642-5072"})
    JSON_array.update({
        "picture":
        "https://www.lib.berkeley.edu/sites/default/files/moffitt_library_0_0.jpg"
    })
    JSON_array.update({"phone": "510-642-5072"})
    JSON_array.update({
        "description":
        "Moffitt Library, located next to Memorial Glade, is one of the busiest campus libraries with undergraduate course reserves, computer lab, makerspace, media center, copy center, campus classrooms, and convenient access to the research collections in the Main (Gardner) Stacks.  Moffitt floors 4 & 5, accessed through the east entrance are open 24 hours during the fall and spring semester and are snack and drink friendly. Reserved for UC Berkeley students and faculty, Moffitt serves students of all majors and is open the longest hours.  Campus visitors are welcome at the Free Speech Movement (FSM) Café and popular Newspaper Display Wall near the 3rd floor south entrance."
    })
    JSON_array.update({"address": "350 Moffitt Library, Berkeley, CA 94720"})
    JSON_array.update({"open_close_array": output_array})
    return JSON_array
Ejemplo n.º 8
0
def scrape():
    ## Gym classes scrapper function, extracts data online.
    api_call = "https://widgets.mindbodyonline.com/widgets/schedules/8d3262c705.json?mobile=false&version=0.1"
    page = requests.get(api_call)
    page.encoding = 'utf-8'
    soup = BeautifulSoup(json.loads(page.content)['contents'], 'html.parser')
    data = soup.findAll(class_="DropIn")

    # Output dictionary = level 0
    output = []

    # Date dictonary = level 1
    date_dictionary = {}

    for i in data:

        # Class dictionary = level 3
        class_dictionary = {}

        class_content = i.find(class_="mbo_class")
        class_time = i.find(class_="hc_time")
        day = class_time.find(
            class_="hc_starttime").get("data-datetime").split("T")[0].replace(
                '"', "")

        ##Class Type
        class_type = unidecode.unidecode(
            i.find(class_="visit_type").get_text())
        class_type = class_type.replace("\n", "")

        ##Trainer's name
        class_trainer = unidecode.unidecode(
            i.find(class_="trainer").get_text())
        class_trainer = class_trainer.replace("\n", "")

        ##Class name and location
        class_name_and_location = class_content.find(
            class_="classname").get_text().split("(")
        class_name = class_name_and_location[0].rstrip('\n')
        try:
            class_location = class_name_and_location[1].split(")")[0]
        except:
            pass

        ## Second API call to get class content details (descriptions)
        class_description_url = class_content.find(
            "a", {
                'data-hc-open-modal': 'modal-iframe'
            }).get("data-url")
        content_detail = requests.get(class_description_url)
        soup_content = BeautifulSoup(content_detail.content, 'html.parser')

        ##Class description
        class_description = unidecode.unidecode(
            soup_content.find(
                class_="class_description").get_text().rstrip('\n'))

        ##Class start time, multiple cases needed due to inconsistent formatting.
        class_time_frame = class_time.get_text()
        class_time_frame = class_time_frame.split("-")
        start_time = class_time_frame[0].lower().strip()
        end_time = class_time_frame[1].lower().strip()

        #Build time interval
        open_close = helper.build_time_interval(start_time, end_time,
                                                Date(day))

        class_dictionary["trainer"] = class_trainer
        class_dictionary['class'] = class_name
        class_dictionary['location'] = class_location
        class_dictionary['class type'] = class_type
        class_dictionary['date'] = day
        class_dictionary['description'] = class_description
        class_dictionary["open_close_array"] = open_close
        output.append(class_dictionary)

    # Data Processing: Getting Data into requested schema.
    for i in output:
        date_key = i.get("date")
        if (date_dictionary.get(date_key) == None):
            date_dictionary[date_key] = {i.get('class'): i}
        else:
            date_dictionary[date_key].update({i.get('class'): i})

    return date_dictionary
Ejemplo n.º 9
0
def scrape_others():
    '''Objective: Scrape the data on convenience stores and campus restaurants.
       Format: Has 2 dictionaries with arrays appended in the format (Open, Close, DatetimeObject)
       How-to: Find all tables, goes through rows. Goes through all the tr, finds the restaurant, matches it with the key in the corresponding dictionary and appends it
       with the opening closing time.
       a datetime object using helper functions"
       Contributed by Varun A'''
    conv_stores = {
        "Bear Market (Café 3)": [],
        "CKCub\n\t\t\t(Clark Kerr)": [],
        "Cub Market (Foothill)": [],
        "The Den, featuring Peet's Coffee & Tea (Crossroads)": []
    }
    campus_rests = {
        "The Golden Bear Café": [],
        "Brown's": [],
        "Terrace Café": [],
        "Common Grounds": [],
        "The Pro Shop": []
    }
    months = {
        1: "jan",
        2: "feb",
        3: "mar",
        4: "apr",
        5: "may",
        6: "jun",
        7: "jul",
        8: "aug",
        9: "nov",
        10: "oct",
        11: "nov",
        12: "dec"
    }
    day = str(helper.get_last_sunday().day)
    monthnum = helper.get_last_sunday().month
    month = months[monthnum]
    url = "https://caldining.berkeley.edu/locations/hours-operation/week-of-" + month + day
    source = requests.get(url)
    soup = bs.BeautifulSoup(source.content, features='html.parser')
    tables = soup.find_all('table', {'class': 'spacefortablecells'})
    for i in range(4, 6):
        tab = tables[i]
        table_body = tab.find('tbody')
        rows = table_body.find_all('tr')
        rows = rows[1:]
        for row in rows:
            cols = row.find_all('td')
            cols = [ele.text.strip() for ele in cols]
            if len(cols) == 0:
                continue
            key = cols[0]

            for j in range(1, len(cols)):
                if (i == 4):
                    if cols[j] == "Closed":
                        a, b = 0, 0
                    else:
                        times = cols[j].split(';')

                        if len(times) == 1:

                            times2 = times[0].split('–')
                            times2[1] = times2[1][4:]
                            times3 = str(times2[0].replace(" ", ""))
                            times4 = str(times2[1].replace(" ", ""))
                            times5 = times3.replace(".", "")
                            times6 = times4.replace(".", "")
                            a, b = times5, times6

                        else:
                            times11 = times[0].split('–')
                            times12 = times[1].split('–')

                            times13a = str(times11[0].replace(" ", ""))
                            times13b = str(times11[1].replace(" ", ""))

                            times14a = str(times12[0].replace(" ", ""))
                            times14b = str(times12[1].replace(" ", ""))

                            times15a = times13a.replace(".", "")
                            times15b = times13b.replace(".", "")

                            times16a = times14a.replace(".", "")
                            times16b = times14b.replace(".", "")

                            #print(c, d)
                            c, d, a, b = helper.standarize_timestring(
                                times15a), helper.standarize_timestring(
                                    times15b), helper.standarize_timestring(
                                        times16a
                                    ), helper.standarize_timestring(times16b)
                            #print(c,d,a,b)

                            bam = helper.get_last_sunday()
                            x = bam + datetime.timedelta(
                                days=j)  #get_last_sunday() +
                            current = helper.build_time_interval(c, d, x)
                            conv_stores[key].append(current)
                    bam = helper.get_last_sunday()
                    x = bam + datetime.timedelta(days=j)
                    #print(a, b, x)
                    current = helper.build_time_interval(a, b, x)
                    conv_stores[key].append(current)
                if (i == 5):

                    if cols[j] == "Closed" or cols[j] == "Closed.":
                        a, b = '0', '0'
                    else:
                        times = cols[j].split(',')
                        if len(times) == 1:

                            times2 = times[0].split('–')
                            times2[1] = times2[1][4:]
                            times3 = str(times2[0].replace(" ", ""))
                            times4 = str(times2[1].replace(" ", ""))
                            times5 = times3.replace(".", "")
                            times6 = times4.replace(".", "")
                            a, b = times5, times6
                            #a, b = times2[0], times2[1]

                        else:
                            times2 = times[0].split('–')
                            times3 = times[1].split('–')
                            c, d, a, b = helper.standarize_timestring(
                                times3[0]), helper.standarize_timestring(
                                    times3[1]), helper.standarize_timestring(
                                        times2[0]
                                    ), helper.standarize_timestring(times2[1])
                            bam = helper.get_last_sunday()
                            x = bam + datetime.timedelta(
                                days=j)  #get_last_sunday() +
                            current = helper.build_time_interval(c, d, x)
                            campus_rests[key].append(current)
                    bam = helper.get_last_sunday()
                    x = bam + datetime.timedelta(days=j)  #get_last_sunday() +
                    current = helper.build_time_interval(a, b, x)
                    campus_rests[key].append(current)
    keys_swap("The Golden Bear Café", "The Golden Bear Cafe", campus_rests)
    return campus_rests, conv_stores
Ejemplo n.º 10
0
def scrape_times(index):
    '''Objective: Scrape the data on convenience stores and campus restaurants.
       Format: Has 2 dictionaries with arrays appended in the format (Open, Close, Breakfast/Lunch/Dinner, DatetimeObject)
       How-to: Find all tables, goes through rows. Goes through all the tr, finds whether it's Breakfast/Lunch/Dinner and appends it with the opening closing time, adding
       a datetime object using helper functions"
       Contributed by Varun A'''
    day = str(helper.get_last_sunday().day)
    monthnum = helper.get_last_sunday().month
    month = month_num_to_month_long[monthnum]
    url = "https://caldining.berkeley.edu/locations/hours-of-operation/week-of-{0}-{1}/".format(
        month, day)

    driver = webdriver.Chrome(executable_path='./chromedriver')
    driver.get(url)
    driver.execute_script(
        "window.scrollTo(0, document.body.scrollHeight);var lenOfPage=document.body.scrollHeight;return lenOfPage;"
    )
    results = driver.page_source

    driver.quit()

    soup = bs.BeautifulSoup(results, features='html.parser')
    #return as format [(Open, close, date, Breakfast/lunch/dinner), (Open, close, date, Breakfast/lunch/dinner)]
    data = []  #This is where we'll store data
    tables = soup.find_all('table')

    tab = tables[index]  #Selects table based on index
    table_body = tab.find('tbody')
    rows = table_body.find_all('tr')

    for row, meal in zip(rows, ["Breakfast", "Lunch", "Dinner"]):
        cols = row.find_all('td')
        cols = [ele.text.strip() for ele in cols]

        for j in range(0, len(cols)):
            if cols[j] == "Closed":
                a, b = '0', '0'
                bam = helper.get_last_sunday()
                x = bam + datetime.timedelta(days=j)
                current_time = helper.build_time_interval(a, b, x)
                current_time["notes"] = meal

                data.append(current_time)
            else:
                times = cols[j].split(',')
                times = [t.replace(".", "") for t in times]
                times = [t.replace(" ", "") for t in times]
                times = [t.replace(" ", "") for t in times]
                times = [t.split("–") for t in times]

                for time in times:
                    am_pm_arr = [
                        "".join(re.findall("[a-zA-Z]+",
                                           t))[:2].strip().lower()
                        for t in time
                    ]
                    time = [
                        "".join(re.findall("[0-9:]+", t)).strip().lower()
                        for t in time
                    ]

                    if len(am_pm_arr[0]) != 2:
                        time[0] = time[0] + am_pm_arr[-1]
                        time[1] = time[1] + am_pm_arr[-1]
                    else:
                        time[0] = time[0] + am_pm_arr[0]
                        time[1] = time[1] + am_pm_arr[1]

                    a, b = helper.standarize_timestring(
                        time[0]), helper.standarize_timestring(time[1])

                    bam = helper.get_last_sunday()
                    x = bam + datetime.timedelta(days=j)
                    current_time = helper.build_time_interval(a, b, x)
                    current_time["notes"] = meal

                    data.append(current_time)

    return data