예제 #1
0
    def scrape(self, browser, date = datetime.datetime.now()):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self, date)
        soup = get_html(self.source_website)

        #saveToFile(soup.prettify(), "output.txt")
        result.screenshot_path = self.screenshot(browser)

        objects = soup.find("td", text=re.compile("Geografsk område")).parent.parent.find_all("td")
        table = [i.text for i in objects]

        denmark_cases = clean_number(table[7])
        denmark_deaths = clean_number(table[9])
        denmark_tested = clean_number(table[6])

        faraoe_cases = clean_number(table[12])
        faraoe_deaths = clean_number(table[14])
        faraoe_tested = clean_number(table[11])

        greenland_cases = clean_number(table[17])
        greenland_deaths = clean_number(table[19])
        greenland_tested = clean_number(table[16])

        result.cases = denmark_cases + faraoe_cases + greenland_cases
        result.deaths = denmark_deaths + faraoe_deaths + greenland_deaths
        result.tested = denmark_tested + faraoe_tested + greenland_tested

        return result
예제 #2
0
    def __init__(self, *args, **kwargs):

        tk.Tk.__init__(self, *args, **kwargs)

        container = tk.Frame(self)

        container.pack(side="top", fill="both", expand = True)

        container.grid_rowconfigure(0, weight=1) #gjør at alt expander av seg selv med pack
        container.grid_columnconfigure(0, weight=1)


        #  Make a new database file for each month
        self.db_month = datetime.date.today().isoformat()[:-3]
        #  Don't store more than 6 months of data. Delete the oldest database file when it turns over.
        dbs = [f for f in os.listdir(".") if os.path.isfile(f) and f[-3:] == ".db"]
        if len(dbs) > 5:
            f = dbs.pop(0)
            if f != f"data_{self.db_month}.db":
                os.remove(dbs[0])
        #  Connect to database
        self.database = sqlite3.connect(f"data_{self.db_month}.db")
        self.cursor = self.database.cursor()
        #  Check if data table exists in database and create it if not
        self.cursor.execute("""CREATE TABLE IF NOT EXISTS data
                    (time date, windspeed int, temperature int, humidity int, pitch int,
                        airpressure int, dragforce int, liftforce int)""")
        self.database.commit()

        #  Initialize communication thread
        self.stop_receiver_event = threading.Event()
        self.sensor_data = dataobject.DataObject()
        self.comm = idmserial.SerialCommunicator(self.sensor_data, self.stop_receiver_event)

        #  Start database write loop
        self.after(2000, self.amend_database)



        self.frames = {}

        for F in (StartPage, PageOne, PageTwo, PageThree,PageFour,PageFive): #antall sider som skal lages i programmet

            frame = F(container, self)


            self.frames[F] = frame

            frame.grid_rowconfigure(0, weight=0) #gjør at alt expander av seg selv grid
            frame.grid_columnconfigure(0, weight=1)

            frame.grid(row=0, column=0, sticky="nsew")

        self.show_frame(StartPage) # viser første side
        self.title("IDM")
예제 #3
0
    def scrape(self, browser):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self)
        soup = get_parsed_javascript_html(self.source_website, browser)

        result.source_update_date = date_formatter(soup.find("strong").text)

        result.cases = clean_number(match(soup.text, "Total cazuri confirmate {}"))
        result.deaths = clean_number(match(soup.text, "Persoane decedate {}"))

        return result
예제 #4
0
    def scrape(self, browser):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self)
        soup = get_html(self.source_website)

        result.source_update_date = date_formatter(soup.find("p", id="last-modified-datetime").text) #Minute is off
        result.tested = clean_number(soup.find("p", id="count-test").text)
        result.cases = clean_number(soup.find("p", id="count-sick").text)
        result.recovered = clean_number(soup.find("p", id="count-recover").text)
        result.deaths = clean_number(soup.find("p", id="count-dead").text)

        return result
예제 #5
0
    def scrape(self, browser):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self)
        soup = get_parsed_javascript_html(self.source_website, browser)

        result.cases = clean_number(soup.find("span", class_="d-map__indicator d-map__indicator_sick").parent.find("h3").next)
        result.recovered = clean_number(soup.find("span", class_="d-map__indicator d-map__indicator_healed").parent.find("h3").next)
        result.deaths = clean_number(soup.find("span", class_="d-map__indicator d-map__indicator_die").parent.find("h3").next)

        result.source_update_date = date_formatter(soup.find("small", text=re.compile("По состоянию на")).string + " 2020")

        return result
예제 #6
0
    def scrape(self, browser):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self)
        soup = get_html(self.source_website)

        elem = soup.find("div", class_="col-lg-8")
        result.cases = clean_number(elem.find("span", class_="badge badge-danger").text)
        result.deaths = clean_number(elem.find("span", class_="badge badge-dark").text)
        result.hospitalised = clean_number(match(elem.text, "{} hospitalizacja"))
        result.tested = clean_number(match(elem.text, "{} wykonane testy"))
        result.source_update_date = date_formatter(elem.find("span", class_="badge badge-light").text)

        return result
예제 #7
0
    def scrape(self, browser, date = datetime.datetime.now()):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self, date)

        result.screenshot_path = self.screenshot(browser)

        soup = get_parsed_javascript_html(self.source_website, browser)

        result.cases = clean_number(soup.find("div", class_="counter-title", text=re.compile("Slučajevi")).parent.find("strong").text)
        result.recovered = clean_number(soup.find("div", class_="counter-title", text=re.compile("Izliječeni")).parent.find("strong").text)
        result.deaths = clean_number(soup.find("div", class_="counter-title", text=re.compile("Preminuli")).parent.find("strong").text)

        return result
예제 #8
0
    def scrape(self, browser, date = datetime.datetime.now()):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self, date)
        soup = get_html(self.source_website)

        #saveToFile(soup.prettify(), "output.txt")
        text = soup.find("div", class_="item__layout-inner").text
        result.cases = clean_number(match(text, "{} cas de COVID-19 ont été diagnostiqués"))
        result.deaths = -1
        #result.deaths = clean_number(match(text, "incluant {} décès survenus"))
        #result.hospitalised = clean_number(match(text, "{} cas de COVID-19 étaient hospitalisés"))
        #result.intensive_care = clean_number(match(text, "dont {} en"))
        
        return result
예제 #9
0
    def scrape(self, browser, date = datetime.datetime.now()):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self, date)
        soup = get_parsed_javascript_html(self.source_website, browser)
        #save_to_file(soup.prettify(), "output.txt", )

        #objects = soup.find("td", text=re.compile("Geografsk område")).parent.parent.find_all("td")
        #table = [i.text for i in objects]

        #result.cases = denmark_cases + faraoe_cases + greenland_cases
        #result.deaths = denmark_deaths + faraoe_deaths + greenland_deaths
        #result.tested = denmark_tested + faraoe_tested + greenland_tested

        #soup = get_parsed_javascript_html(self.source2_website, browser)

        return result
예제 #10
0
    def scrape(self, browser, date = datetime.datetime.now()):
        """ Scrape function. Returns a data object with the reported cases. Uses Selenium and Beautifulsoup to extract the data """ 
        result = dataobject.DataObject(self, date)

        result.screenshot_path = self.screenshot(browser)

        soup = get_html(self.source_website)
        # Cases - Fertőzött
        result.cases = clean_number(soup.find("span", class_="label", text=re.compile("Fertőzött")).parent.find("span", class_="number").text)
        # Recovered - Gyógyult
        result.recovered = clean_number(soup.find("span", class_="label", text=re.compile("Gyógyult")).parent.find("span", class_="number").text)
        # Tested - Mintavétel
        result.tested = clean_number(soup.find("span", class_="label", text=re.compile("Mintavétel")).parent.find("span", class_="number").text)
        
        soup = get_html(self.source2_website)
        result.deaths = clean_number(soup.find("tbody").find("tr").find("td").text)

        return result
예제 #11
0
def check_from_sheet(country,
                     country_iso_code,
                     date,
                     ignore_cache_timeout=False):
    log.info("Retrieving data from the Google Sheet database...")
    result = dataobject.DataObject()
    result.date = date
    result.country_name = country
    result.source_update_date = date

    check_drive_cache()

    sheet_country = channel_to_sheet_country(country)

    sheet_path = ""
    if country in bot_data.europe_channels:
        sheet_path = 'data/drive/europe.xlsx'
    elif country in bot_data.us_channels:
        sheet_path = 'data/drive/us.xlsx'
    elif country in bot_data.canada_channels:
        sheet_path = 'data/drive/canada.xlsx'

    if sheet_path != "":
        sheet = pd.read_excel(sheet_path, sheet_name=sheet_country)

        dates = sheet[sheet.columns[0]].tolist()
        cases = sheet[sheet.columns[1]].tolist()
        deaths = sheet[sheet.columns[2]].tolist()
        recovered = sheet[sheet.columns[3]].tolist()

        index = get_sheet_date_index(date, dates)
        if index == -1:
            result.data_validity = "Could not find specified date in the Google Sheet data. Has a new date been added to the sheet yet?"
            return result
        if not math.isnan(cases[index]):
            result.cases = int(cases[index])
            result.deaths = int(deaths[index])
            result.recovered = int(recovered[index])
        else:
            result.data_validity = "Cell was NAN. Has a new date been added to the sheet yet?"

    return result
예제 #12
0
                                            self.stop_event)
        self.receiver.start()

    def connect_serial(self, port, rate, timeout):
        try:
            return serial.Serial(port=port, baudrate=rate, timeout=timeout)
        except:
            return None

    def close_serial(self):
        #print(__name__, "Close")
        self.stop_event.set()
        self.receiver.join()
        self.ser.close()

    def transmit(self, message):
        self.transmitter.transmit(message)


if __name__ == "__main__":
    print("Start")
    data = dataobject.DataObject()
    event = threading.Event()
    comm = SerialCommunicator(data, event)
    for i in range(4):
        print(i, data.get_data())
        time.sleep(1.5)
    comm.transmit("Fnord")
    comm.close()
    print("End")