Example #1
0
def get_names():
    person_name = 'not available'
    employee_name = 'not available'
    email_id = ''
    list_personname = []
    if not os.path.exists('./cropped_images'):
        os.makedirs('./cropped_images')
    filenames = listdir('./cropped_images/')
    print(filenames)
    if '.DS_Store' in filenames:
        filenames.remove('.DS_Store')
    for f in filenames:
        unknown_image = face_recognition.load_image_file(os.path.join('./cropped_images/',f))
        unknown_image_encoding = face_recognition.face_encodings(unknown_image)
        if len(unknown_image_encoding)>0:
            person_name = detect_face_names(unknown_image_encoding[0])
            if person_name not in list_personname and person_name != 'not available':
                list_personname.append(person_name)
    print(person_name)
    df = pd.read_excel('employee_database.xlsx')
    if len(list_personname)!=0:
        for i in range(df.shape[0]):
            if list_personname[0] == df['EMP_ID'][i]:
                email_id = df['Emp_emaiil_id'][i]
                employee_name = df['Emp_name'][i]
        if email_id!='':
            send_email(email_id)
    print("The name of the person is ",employee_name)
    files = glob.glob('./cropped_images/*')
    for f in files:
        os.remove(f)
    return employee_name
def train(model, epoch, layers):
    """Train the model."""

    # Training dataset.
    dataset_train = CrossarmDataset()
    dataset_train.load_crossarm(glo_var.DATASET_DIR, "train")
    dataset_train.prepare()

    # Validation dataset
    dataset_val = CrossarmDataset()
    dataset_val.load_crossarm(glo_var.DATASET_DIR, "val")
    dataset_val.prepare()

    if layers == "heads":
        print("Training network heads")
        model.train(dataset_train,
                    dataset_val,
                    learning_rate=config.LEARNING_RATE,
                    epochs=epoch,
                    layers='heads')
    else:
        print("Training all layers")
        model.train(dataset_train,
                    dataset_val,
                    learning_rate=config.LEARNING_RATE / 10,
                    epochs=epoch,
                    layers='all')

    subject = "NEURAL NETWORK DONE TRAINING"
    text = "YAY your neural work training is done"
    email_notification.send_email(subject, text)

    return None
def main():
    row_count = get_db_row_count(database_name, "sites");

    for x in range(1,row_count+1):
        date_time = datetime.now()
        site_details = get_site_details(database_name, x)

        try:
            last_update = date_time.strptime(site_details[7], "%Y-%m-%d").date()

        except:

            last_update = date.today() - timedelta(1)

        if its_between_dawn_sunset(site_details[3]):
            kwhr = get_energy_values(site_details[4], site_details[0], date_time.date())
            write_energy_to_database(database_name, site_details[0], date_time, kwhr)
            print("{}'s solar system produced {} KWhr of electricity so far".format(site_details[2], kwhr))

        elif its_after_sunset(site_details[3]) and (last_update < date_time.date()):
            kwhr = get_energy_values(site_details[4], site_details[0], date_time.date())
            power_values = get_power_values(site_details[4], site_details[0], date_time.date(), date_time.date())
            write_power_to_database(database_name, site_details[0], power_values)
            touch_site(database_name,site_details[0],date_time.date())
            if len(site_details[5]) > 0:
                send_email(site_details[2], site_details[5], 'Solar production notification', kwhr)
            print("{}'s solar system produced {} KWhr of electricity today".format(site_details[2], kwhr))
def run_scrapping(scrap_pages=600):
    # run through all pages and get all the URLs of add details, save to CSV
    get_details_url(scrap_pages)

    directory = "car_update_data"
    csv_file_name = "car_list_all_v2_sauto_update.csv"

    if os.path.exists(directory):
        car_list_all_v2_sauto_update = pd.DataFrame(columns=["car_brand", "car_model"])
        print("creating new dataframe")
    else:
        os.makedirs(directory)
        car_list_all_v2_sauto_update = pd.DataFrame(columns=["car_brand", "car_model"])
        print("creating new dir and dataframe")

    car_detail_url_list = pd.read_csv("car_detail_url_list.csv")
    car_detail_url_list.reset_index(drop=True)

    browser = run_browser()

    for index, url in car_detail_url_list.iterrows():
        if (index + 1) % 100 == 0:
            print("restarting browser...")
            browser.quit()
            time.sleep(5)
            browser = run_browser()
            time.sleep(3)

        try:
            browser.get(url[0])
        except Exception:
            if "crash" in str(Exception.with_traceback):
                print("browser has crashed")
            browser.quit()
            browser = run_browser()
            browser.get(url[0])
            time.sleep(5)

        detail_df = scrape_car_detail(url[0], browser)
        car_list_all_v2_sauto_update = car_list_all_v2_sauto_update.append(detail_df, sort=False)
        print(f"details added: {index + 1}", end="\r", flush=True)
        time.sleep(1)
        if (index + 1) % 100 == 0:
            car_list_all_v2_sauto_update.to_csv(f"{directory}/{csv_file_name}", index=False)
            print("100 more details added, saving...")

    if (index + 1) % 1000 == 0:
        email_notification.send_email(f"{index + 1} details done")

    car_list_all_v2_sauto_update.to_csv(f"{directory}/{csv_file_name}", index=False)
    email_notification.send_email(f"got all the {index + 1} details")
def get_details_url(n_pages):
    car_detail_url_list = []
    index = 0
    reload_browser = True
    for n in range(1, (n_pages+1)):
        if reload_browser:
            browser = start_chrome(get_page_url(n), headless=True)
            reload_browser = False

        browser.get(get_page_url(n))
        soup = bs(browser.page_source, "html.parser")
        results = soup.find_all("a", class_= "toDetail")
        all_urls = n_pages*15
        for element in results:
            url_part = element.get("href")
            url = f"https://www.sauto.cz{url_part}"
            if url not in car_detail_url_list:
                car_detail_url_list.append(url)
                index += 1 
            print(f"urls added: {index}/{all_urls}", end="\r", flush=True)

        # save to csv each 500 URLs
        if (n + 1) % 25 == 0:
            df = pd.DataFrame(car_detail_url_list)
            print(f"saving at {index}")
            df.to_csv("car_detail_url_list.csv", index = False)
            browser.quit()
            reload_browser = True

        time.sleep(2)

    browser.quit()
    df = pd.DataFrame(car_detail_url_list, columns = ["url"])
    df.to_csv("car_detail_url_list.csv", index=False)
    print(f"finished with {index} urls saved, time to finish {n_pages/50} hours")
    time.sleep(20)
    email_notification.send_email(f"got all {index} detail URLs")
        email_notification.send_email(f"{index + 1} details done")

    car_list_all_v2_sauto_update.to_csv(f"{directory}/{csv_file_name}", index=False)
    email_notification.send_email(f"got all the {index + 1} details")

print("started script")
args = parser.parse_args()
try:
    while True:
        now = datetime.datetime.now()
        weekday_now = now.weekday()
        hour_now = now.hour
        minute_now = now.minute

        if hour_now == 8 or args.t:
            if args.s:
                print(f"run scrapping on {args.s} pages")
                run_scrapping(scrap_pages = args.s)
            print("updating model")
            if args.u:
                update_model(update_data=True)
            else:
                update_model(update_data=False)

        if args.o:
            break
        time.sleep(60*50)
except Exception as er:
    print(er)
    email_notification.send_email(message=f"{er}")
        # Start from ImageNet trained weights
        weights_path = model.get_imagenet_weights()
    else:
        weights_path = args.weights

    # Load weights
    print("Loading weights ", weights_path)
    if args.weights.lower() == "coco":
        # Exclude the last layers because they require a matching
        # number of classes
        model.load_weights(weights_path,
                           by_name=True,
                           exclude=[
                               "mrcnn_class_logits", "mrcnn_bbox_fc",
                               "mrcnn_bbox", "mrcnn_mask"
                           ])
    else:
        model.load_weights(weights_path, by_name=True)

    # Train or evaluate
    try:
        train(model, args.epoch, args.layers)
    except KeyboardInterrupt:
        print("KeyboardInterrupt Detected")
    except:
        subject = "NEURAL NETWORK TRAINING FAILURE"
        text = traceback.format_exc()
        email_notification.send_email(subject, text)

        print(text)
Example #8
0
'''
Created on 16 kwi 2018

@author: Kacper
'''

from consumer import *
from flight_webscrap import check_flights
from email_notification import send_email

empty_model = MoneyModel(10)
for i in range(2):
    empty_model.step()
check_flights(3, 6)
send_email("284")

print("end")