Ejemplo n.º 1
0
def email_gen(logfile, region_name, config):
    # upload the log file to s3
    s3 = boto3.Session(profile_name='trinity').resource(
        's3', region_name=region_name)
    utils.upload_to_s3(s3, 'quantization-benchmark-data', logfile)

    dic = defaultdict(dict)
    for model_zoo_name in model_zoos:
        for model_name in model_zoo_models[model_zoo_name]:
            dic[model_zoo_name][model_zoo_name] = {
                'quantized performance': '',
                'original performance': '',
                'quantized accuracy': '',
                'original accuracy': '',
            }
            dic[model_zoo_name][model_name] = {}

    with open(logfile, 'r') as log_file:
        for line in log_file:
            line = line.rstrip()
            if 'INFO:root:' in line:
                line = line[len('INFO:root:'):]
                if 'quantization failed' in line:
                    # quantization failed
                    model_zoo_name = line.split()[3]
                    model_name = line.split()[4]
                    dic[model_zoo_name][model_name][
                        'quantized performance'] = ''
                    dic[model_zoo_name][model_name][
                        'original performance'] = ''
                    dic[model_zoo_name][model_name]['quantized accuracy'] = ''
                    dic[model_zoo_name][model_name]['original accuracy'] = ''
                if 'latency' in line:
                    model_zoo_name = line.split()[1]
                    model_name = line.split()[2]
                    data = line.split()[4]
                    model_type = line.split()[0]
                    dic[model_zoo_name][model_name][model_type +
                                                    ' performance'] = data
                if 'top1' in line:
                    model_type = line.split()[0]
                    model_zoo_name = line.split()[1]
                    model_name = line.split()[2]
                    top1 = line.split()[4]
                    top5 = line.split()[6]
                    dic[model_zoo_name][model_name][
                        model_type + ' accuracy'] = top1 + '/' + top5

    # format the table TOO

    import pandas as pd
    html = ''
    for model_zoo_name in model_zoos:
        df = pd.DataFrame(data=dic[model_zoo_name])
        df = df.fillna(' ').T
        html += df.to_html()
    utils.send_email_html('*****@*****.**',
                          '*****@*****.**',
                          'Neo Quantization Report', html,
                          config.get('email', 'ARN'))
def main():
    """
    The main function of the project.
    It iterates over all experiments in the config file, performs the experiment
     and saves its results to external files.
    :return: None.
    """

    experiment_config_path = _parse_input()
    all_experiments = read_experiments_config(experiment_config_path)

    for experiment_name, experiment_config in all_experiments.items():
        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            results, model = perform_experiment(experiment_config)
            weights_file_name = save_model_weights(experiment_name, model)
            testing_layers_files = save_layers_logs(
                results['Layers Testing Output'], 'Testing')
            training_layers_files = save_layers_logs(
                results['Layers Training Output'], 'Training')

            results.pop('Layers Training Output')
            results.pop('Layers Testing Output')
            print("Testing Data Confusion Matrix")
            print(np.array2string(results['Confusion Matrix']))
            results['Confusion Matrix'] = str(
                results['Confusion Matrix'].tolist())
            print("Experiment Results:")
            print(json.dumps(results, indent=2, sort_keys=True))

            results_file = save_experiment_log(results, experiment_name)
            upload_to_s3([], [], [results_file], [weights_file_name],
                         testing_layers_files + training_layers_files)
Ejemplo n.º 3
0
def create_and_upload(template_s3_url, local_param_file, local_userinput_file,
                      s3bucket):
    '''
    Main function - parse cloudformation template from S3URL and generate
    parameter.json and parameter.yaml files.
    '''
    try:
        result = cfn_client.validate_template(TemplateURL=template_s3_url)

        # create parameter.json structure
        param_obj = result['Parameters']
        for obj in param_obj:
            [obj.pop(k) for k in list(obj.keys()) if k != 'ParameterKey']
            obj['ParameterValue'] = "{{ " + obj['ParameterKey'] + " }}"

        param_str = json.dumps(param_obj, indent=2, separators=(',', ': '))

        dict = {"\"{{": "{{", "}}\"": "}}"}
        cfn_params = search_and_replace(param_str, dict)

        userinput_content = generate_userinput_params(param_obj)
        #cfn_params = search_and_replace(json.dumps(param_obj, indent=2, separators=(',', ': ')))

        # generate user_input yaml parameter file and upload to s3
        utils.write_to_file('/tmp/' + local_userinput_file, userinput_content)
        utils.upload_to_s3(s3bucket, '/tmp/' + local_userinput_file,
                           local_userinput_file)

        # generate parameter.json file and upload to s3
        utils.write_to_file('/tmp/' + local_param_file, cfn_params)
        utils.upload_to_s3(s3bucket, '/tmp/' + local_param_file,
                           local_param_file)

    except Exception as e:
        print(e)
 def _crawl_using_requests(self, url):
     html = None
     try:
         id = self.page_id(url)
         r = requests.get(url)
         html = r.text
         #timeDelay = random.randrange(2, 3)
         #time.sleep(timeDelay)
         '''
         with open("/tmp/" + id, "wb") as f:
             f.write(html)
         
         with open("/tmp/" + id, "rb") as f:
             html = f.read()
         '''
         filename = self.base_folder + self.page_id(
             url) + "/" + self.page_id(url) + ".html"
         randomfile = save_to_random_file(html, filename, as_json=False)
         if self.save_to_s3:
             upload_to_s3(self.bucket_name, randomfile)
         if self.is_delete_cache:
             os.remove(randomfile)
     except Exception as ex:
         error = str(traceback.format_exc())
         print('ERROR: Helper crawl error = ', error, ' for url, ', url)
     return html
 def save_page_screenshot(self, url, driver):
     from slugify import slugify
     try:
         datafilename = slugify(url,
                                replacements=[['|', 'or'], ['%',
                                                            'percent']])
         S = lambda X: driver.execute_script(
             'return document.body.parentNode.scroll' + X)
         driver.set_window_size(S('Width'),
                                S('Height'))  # May need manual adjustment
         screenshot_filename = self.base_folder + self.page_id(
             url) + "/screenshot_" + datafilename + ".png"
         driver.find_element_by_tag_name('body').screenshot(
             screenshot_filename)
         #driver.get_screenshot_as_file(screenshot_filename)
         print('Saved screenshot at = ', screenshot_filename, ' for url = ',
               url)
         if self.save_to_s3:
             upload_to_s3(self.bucket_name, screenshot_filename)
         if self.is_delete_cache:
             os.remove(screenshot_filename)
         return screenshot_filename
     except Exception as ex:
         error = str(traceback.format_exc())
         print('ERROR: Screenshot save error = ', error, ' for url, ', url)
     return None
Ejemplo n.º 6
0
def loop():
    """Loop through the training data directory, upload files to S3, 
    delete after uploading
    """
    while True:
        files = utils.search_path(config.TRAIN_DIR, filetypes=['.pkl', '.txt'])
        LOGGER.info('Uploading %s files', len(files))
        for file in files:
            key = os.path.basename(file)
            try:
                utils.upload_to_s3(BUCKET, file, key)
                os.remove(file)
            except:
                LOGGER.exception("message")
                LOGGER.error('Error while uploading file %s', file)
        time.sleep(60 * 5)
def loop():
    """Loop through the training data directory, upload files to S3, 
    delete after uploading
    """
    while True:
        files = utils.search_path(config.TRAIN_DIR, filetypes=['.pkl', '.txt'])
        LOGGER.info('Uploading %s files', len(files))
        for file in files:
            key = os.path.basename(file)
            try:
                utils.upload_to_s3(BUCKET, file, key)
                os.remove(file)
            except:
                LOGGER.exception("message")
                LOGGER.error('Error while uploading file %s', file)
        time.sleep(60*5)
    def _crawl_using_selenium(self, url):
        html = None
        try:
            driver = self.driver
            driver.get(url)

            # scroll down and setting time delay - to get the images loaded on each scroll.
            driver.execute_script("window.scrollTo(0,400)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)
            driver.execute_script("window.scrollTo(0,600)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)
            driver.execute_script("window.scrollTo(0,900)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)
            driver.execute_script("window.scrollTo(0,1100)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)
            driver.execute_script("window.scrollTo(0,1300)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)
            driver.execute_script("window.scrollTo(0,1500)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)
            driver.execute_script("window.scrollTo(0,1700)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)
            driver.execute_script("window.scrollTo(0,1900)")
            timeDelay = random.randrange(2, 3)
            time.sleep(timeDelay)

            #get html page of url
            content = driver.page_source.encode('utf-8').strip()
            soup = BeautifulSoup(content, 'lxml')
            html = soup.prettify()
            filename = self.base_folder + self.page_id(
                url) + "/" + self.page_id(url) + ".html"
            randomfile = save_to_random_file(html, filename, as_json=False)
            if self.save_to_s3:
                upload_to_s3(self.bucket_name, randomfile)
            if self.is_delete_cache:
                os.remove(randomfile)
        except Exception as ex:
            error = str(traceback.format_exc())
            print('ERROR: Helper crawl error = ', error, ' for url, ', url)
        return html
def lambda_handler(event, context):
    logger.info('## ENVIRONMENT VARIABLES')
    logger.info(os.environ)
    logger.info('## EVENT')
    logger.info(event)
    
    # Lambda inputs
    temp_s3_bucket = event['temp_s3_bucket']
    product_name = event['product_name']
    
    addOn_zip_filename="alz-qs-"+product_name+".zip"
    sc_product_template_name="sc-"+product_name+".template"

    create_add_on(temp_s3_bucket, addOn_zip_filename, local_file_path, sc_product_template_name)
    utils.upload_to_s3(temp_s3_bucket, local_file_path+sc_product_template_name, sc_product_template_name)

    return {
        'statusCode': 200,
        'body': json.dumps('SC template created successfully!')
    }
def main():
    person_group_id = FACE_GROUP_ID
    display_image = False
    
    face_api_wrapper = FaceAPIWrapper(FACE_API_KEY, FACE_BASE_URL)
    create_dir_if_not_exists('captured_images/' + CAMERA_NAME)
   
    print("Capturing Image every ", CAPTURE_INTERVAL, " seconds...")

    i = 0

    while 1:
        try:
            image_filename = 'captured_images/' + CAMERA_NAME + "/" + current_time_to_string() + ".jpg"
            image = Camera().capture_image()
            cv2.imwrite(image_filename, image)

            if display_image:
                cv2.imshow("Camera Image", image)
                cv2.waitKey(1)

            image_link = upload_to_s3(image_filename)
            image_url = 'https://mybuckfucket.s3-eu-west-1.amazonaws.com/public_folder/'+image_filename
            face_ids = face_api_wrapper.detect_faces(image=image_filename)
            i += 1
            print(i, "Captured at ", current_time_to_string())
            print("S3 url",image_url)
            if face_ids:
                person_ids = \
                    face_api_wrapper.identify_faces(face_ids=face_ids,
                                                    large_person_group=person_group_id)
                req_ids = [{id} for id in person_ids]
                print("Detected Faces...", req_ids)
                contents=load_dict_from_file('json.txt')
                
                #my_data=(contents[0]['faceAttributes'])
                requests.post('http://127.0.0.1:8000/students/', data={
                    'attributes' : contents,
                    'name' : "Darragh",
                    'time_date' : current_time_to_string(),
                    'face_ids' : person_ids,
                    'image_link' : image_url,
                })
                print("#####",data)
                #print(contents[0]['faceAttributes']['emotion'])
                #requests.post('http://127.0.0.1:8000/students/', data=contents)
                

            time.sleep(CAPTURE_INTERVAL)
        except Exception as e:
            print(e)
Ejemplo n.º 11
0
def lambda_handler(event, context):
    "Lambda entry point"
    with open('config.json') as data_file:
        CONFIG = json.load(data_file)
    #Read the task completion sheet and filter current quarter data
    spreadsheet_data, headers = utils.read_sheet()
    filtered_data = utils.filter_current_quarter_data(spreadsheet_data)
    #Read the data into a csv
    complete_data = utils.to_csv(spreadsheet_data, headers)
    current_quarter_data = utils.to_csv(filtered_data, headers)
    #Upload the csv files to s3 bucket
    utils.upload_to_s3(complete_data, CONFIG['complete_sheet_s3_key'])
    utils.upload_to_s3(current_quarter_data, CONFIG['current_quarter_s3_key'])
    #Prepare the data to initiate transfer to dynamodb
    prepared_complete_data = utils.prepare_data(
        CONFIG['complete_sheet_s3_key'])
    prepared_quarter_data = utils.prepare_data(
        CONFIG['current_quarter_s3_key'])
    #Store the complete task compeltion sheet data if the dynamodb is empty
    utils.migrate_to_dynamodb(prepared_complete_data)
    #Update the dynamodb with edits to current quarter data
    utils.update_dynamodb(prepared_quarter_data)
    return "Read task completion sheet and populated dynamodb"
Ejemplo n.º 12
0
async def move_beets(context):
    """TODO"""
    uploads = []
    for file, local_path in context.extracted_files.items():
        # TODO: to fix via regex etraction here
        if 'forUnitTests' in file:
            destination = f"maven2/org/mozilla/telemetry/glean-forUnitTests/{context.version}/{file}"
        else:
            destination = f"maven2/org/mozilla/telemetry/glean/{context.version}/{file}"

        uploads.append(
            asyncio.ensure_future(
                upload_to_s3(context=context,
                             s3_key=destination,
                             path=local_path)))

        await raise_future_exceptions(uploads)
Ejemplo n.º 13
0
def main():
    person_group_id = FACE_GROUP_ID
    display_image = False

    face_api_wrapper = FaceAPIWrapper(FACE_API_KEY, FACE_BASE_URL)
    create_dir_if_not_exists('captured_images/' + CAMERA_NAME)

    print("Capturing Image every ", CAPTURE_INTERVAL, " seconds...")

    i = 0

    while 1:
        try:
            image_filename = 'captured_images/' + CAMERA_NAME + "/" + current_time_to_string(
            ) + ".jpg"
            image = Camera().capture_image()
            cv2.imwrite(image_filename, image)

            if display_image:
                cv2.imshow("Camera Image", image)
                cv2.waitKey(1)

            image_link = upload_to_s3(image_filename)
            face_ids = face_api_wrapper.detect_faces(image=image_filename)
            i += 1
            print(i, "Captured at ", time.time())
            if face_ids:
                person_ids = \
                    face_api_wrapper.identify_faces(face_ids=face_ids,
                                                    large_person_group=person_group_id)
                req_ids = [{id} for id in person_ids]
                print("Detected Faces...", req_ids)

                requests.post(REST_SERVER_URL + 'time-face-id',
                              data={
                                  'lecture_number': get_lecture_number(),
                                  'face_ids': req_ids,
                                  'image-link': image_link,
                                  'camera-name': CAMERA_NAME,
                              })
                print("Present IDs:", req_ids)

            time.sleep(CAPTURE_INTERVAL)
        except Exception as e:
            print(e)
Ejemplo n.º 14
0
async def move_beets(context):
    """TODO"""
    uploads = []
    for file, local_path in context.extracted_files.items():
        for package_name in GLEAN_PACKAGES:
            if file.startswith(f"{package_name}-{context.version}"):
                destination = f"maven2/org/mozilla/telemetry/{package_name}/{context.version}/{file}"
                break
        else:
            continue

        uploads.append(
            asyncio.ensure_future(
                upload_to_s3(context=context,
                             s3_key=destination,
                             path=local_path)))

        await raise_future_exceptions(uploads)
Ejemplo n.º 15
0
def handleUpload():
    if 'x_ray_image' not in request.files:
        print('no file found in upload request')
        return jsonify("{'message': 'no file in upload request'}")
    file = request.files['x_ray_image']

    if file.filename == '':
        return jsonify({'message': 'No file name provided'})

    file.filename = get_unique_filename() + '.' + file.filename.split('.')[-1]

    if file and allowed_file_extensions(file.filename):
        if not os.path.exists(os.getenv('UPLOAD_FOLDER')):
            os.makedirs(os.getenv('UPLOAD_FOLDER'))
        uploaded_file_path = os.path.join(os.getenv('UPLOAD_FOLDER'), file.filename)
        file.save(uploaded_file_path)
        prediction, all_predictions = perform_inference(uploaded_file_path)
        s3_url = upload_to_s3(uploaded_file_path, str(prediction) + '__' + str(file.filename))

        print('*****************')
        print('*****************')
        print('*****************')
        print(prediction)
        print('*****************')
        print('*****************')
        print('*****************')

        return app.response_class(
            response=json.dumps({"prediction": str(prediction), "file_url": str(s3_url), "all": all_predictions}),
            status=200,
            mimetype='application/json'
        )
    else:
        return app.response_class(
            response=json.dumps({"message": "invalid file provided"}),
            status=500,
            mimetype='application/json'
        )
    def parse(self, url, html, counter=0):
        from slugify import slugify
        images = []
        # A dictionary table of fields with values extracted from the html
        fields_map = {}
        final_filename = None
        try:
            fields_map['url'] = url
            datafilename = self.base_folder + self.page_id(
                url) + "/" + slugify(
                    url, replacements=[['|', 'or'], ['%', 'percent']])
            driver = self.driver
            driver.get(url)

            self.save_page_screenshot(url, driver)

            # Getting brand name
            brands = driver.find_elements_by_class_name("pdp-title")
            fields_map['brand'] = self.get_text_value(brands)

            # Getting product name
            product_name = driver.find_elements_by_class_name("pdp-name")
            fields_map['name'] = self.get_text_value(product_name)
            if not fields_map['name']:
                counter += 1
                if counter > 1:
                    pass
                else:
                    print('Retrying again URL = ', url, ' with counter = ',
                          counter)
                    timeDelay = random.randrange(3, 5)
                    time.sleep(timeDelay)
                    return self.parse(url, html, counter)

            ## getting prices
            price = driver.find_elements_by_class_name("pdp-price")
            fields_map['price'] = self.get_text_value(price)

            # getting taxes
            taxes = driver.find_elements_by_class_name("pdp-vatInfo")
            fields_map['tax'] = self.get_text_value(taxes)

            # getting product details
            product_details = driver.find_elements_by_class_name(
                "pdp-product-description-content")
            fields_map['product_details'] = self.get_text_value(
                product_details)

            metadescs = driver.find_elements_by_class_name("meta-desc")
            fields_map['meta_desc'] = self.get_text_value(metadescs, True)

            print('Fields map so far = ', fields_map, 'for url = ', url)
            #getting images, and adding to images[]

            soup = BeautifulSoup(html, 'lxml')

            image_links = soup.find_all('div', {"class": "image-grid-image"})
            for link in image_links:
                l1 = link['style']
                final_url = l1[23:-3]
                print('Image found so far = ', final_url, 'for url = ', url)
                images.append(final_url)

            fields_map['images'] = images

            image_prefix = self.base_folder + self.page_id(url) + "/"
            s3_images = []
            '''
            for image in images:
                print('Uploading image: ', image, ' to S3')
                if self.save_to_s3:
                    image_file, image_size = download_and_save(image_prefix, image, None, is_override=True, add_type=None)
                    if image_file:
                        s3_images.append({"original":image, "uploaded":image_file, "size":image_size})
                        upload_to_s3(self.bucket_name, image_file)
                        if self.is_delete_cache:
                            os.remove(image_file)
                print('Uploaded image: ', image, ' to S3')
            '''
            fields_map['uploaded_images'] = s3_images
            datajson = self.make_data_json(fields_map)
            randomfile = save_to_random_file(datajson,
                                             datafilename,
                                             as_json=True)
            print('Uploading fields data file: ', randomfile, ' to S3')
            if self.save_to_s3:
                upload_to_s3(self.bucket_name, randomfile)
            print('Uploaded fields data file: ', randomfile, ' to S3')
            if self.is_delete_cache:
                #os.remove(randomfile)
                pass
            final_filename = randomfile
            if fields_map['name'] == '' or fields_map['images'] == []:
                self.bad_url_count += 1
                update_counter_value()
                with open('error_urls.txt', 'a') as eff:
                    eff.write(fields_map['url'] + "\n")
                if self.bad_url_count % 10 == 0:
                    self.bad_url_count = 0
                    #sending email as alert
                    #send_mail("*****@*****.**", "*****@*****.**", "NO DATA FOUND", str(fields_map['url']))
                    pass
            #update_counter_value()
        except Exception as ex:
            error = str(traceback.format_exc())
            print('ERROR: URL parse error = ', error, ' for url, ', url)
        return final_filename
    def parse_using_requests(self, url, html, counter=0):
        from slugify import slugify
        images = []
        # A dictionary table of fields with values extracted from the html
        fields_map = {}
        final_filename = None
        try:
            maybejson = html.split("window.__myx = ")[1]
            maybejson = maybejson.split('''</script><script>window''')[0]
            #print(maybejson.strip())
            json_data = json.loads(maybejson.strip())

            fields_map['url'] = url
            datafilename = self.base_folder + self.page_id(
                url) + "/" + slugify(
                    url, replacements=[['|', 'or'], ['%', 'percent']])
            json_data = self.make_final_json_from_pdp_data(json_data)
            product_name = None
            if json_data:
                product_name = json_data["name"]
            if not product_name:
                counter += 1
                if counter > 1:
                    pass
                else:
                    print('Retrying again URL = ', url, ' with counter = ',
                          counter)
                    timeDelay = random.randrange(3, 5)
                    time.sleep(timeDelay)
                    return self.parse_using_requests(url, html, counter)

            fields_map = json_data

            image_prefix = self.base_folder + self.page_id(url) + "/"
            images = json_data["images"]
            s3_images = []
            '''
            for image in images:
                print('Uploading image: ', image, ' to S3')
                if self.save_to_s3:
                    image_file, image_size = download_and_save(image_prefix, image, None, is_override=True, add_type=None)
                    if image_file:
                        s3_images.append({"original":image, "uploaded":image_file, "size":image_size})
                        upload_to_s3(self.bucket_name, image_file)
                        if self.is_delete_cache:
                            os.remove(image_file)
                print('Uploaded image: ', image, ' to S3')
            '''
            fields_map['uploaded_images'] = s3_images
            datajson = self.make_data_json(fields_map)
            randomfile = save_to_random_file(datajson,
                                             datafilename,
                                             as_json=True)
            print('Uploading fields data file: ', randomfile, ' to S3')
            if self.save_to_s3:
                upload_to_s3(self.bucket_name, randomfile)
            print('Uploaded fields data file: ', randomfile, ' to S3')
            if self.is_delete_cache:
                #os.remove(randomfile)
                pass
            final_filename = randomfile
            if fields_map['name'] == '' or fields_map['images'] == []:
                self.bad_url_count += 1
                update_counter_value()
                with open('error_urls.txt', 'a') as eff:
                    eff.write(fields_map['url'] + "\n")
                if self.bad_url_count % 10 == 0:
                    self.bad_url_count = 0
                    #sending email as alert
                    #send_mail("*****@*****.**", "*****@*****.**", "NO DATA FOUND", str(fields_map['url']))
                    pass
            #update_counter_value()
        except Exception as ex:
            error = str(traceback.format_exc())
            print('ERROR: URL parse error = ', error, ' for url, ', url)
            with open('error_urls.txt', 'a') as eff:
                eff.write(url + "\n")
        return final_filename
Ejemplo n.º 18
0
def submit():
    """
    Endpoint for HTML form for new job creation. Uploads user file to S3, creates job entry in database, and triggers async creations
    of all tasks needed for the job.

    Parameters
    ----------
    file: html file upload
    n_init: int
    n_experiments: int
    max_k: int
    covars: list(str)
    columns: list(str)
    scale: bool

    Returns
    -------
    redirects to index
    """
    if request.method == 'POST':
        # Ensure that file is part of the post
        if 'file' not in request.files:
            flash("No file part in form submission!", category='danger')
            return redirect(url_for('index'))

        # Ensure that files were selected by user
        file = request.files['file']
        if file.filename == '':
            flash("No selected file!", category='danger')
            return redirect(url_for('index'))

        # Ensure that file type is allowed
        if file and allowed_file(file.filename):
            filename = secure_filename(file.filename)
            if not os.path.isdir(UPLOAD_FOLDER):
                os.mkdir(UPLOAD_FOLDER)

            filepath = os.path.join(UPLOAD_FOLDER, filename)
            file.save(filepath)

            n_init = int(request.form.get('n_init'))
            n_experiments = int(request.form.get('n_experiments'))
            max_k = int(request.form.get('max_k'))
            covars = request.form.getlist('covars')
            columns = request.form.getlist('columns')
            scale = 'scale' in request.form
            n_tasks = n_experiments * max_k * len(covars)
            pr = [n_experiments, max_k, columns, filename, n_tasks, scale]

            # Create the job synchronously
            job = Job(n_experiments=n_experiments,
                      n_init=n_init,
                      max_k=max_k,
                      scale=scale,
                      columns=columns,
                      filename=filename,
                      n_tasks=n_tasks,
                      start_time=datetime.utcnow())
            s3_file_key = upload_to_s3(filepath, filename, job.job_id)
            job.s3_file_key = s3_file_key
            db.session.add(job)
            db.session.commit()
            os.remove(filepath)

            # Create all tasks asynchronously
            create_tasks.apply_async((job.job_id, n_init, n_experiments, max_k,
                                      covars, columns, s3_file_key, scale),
                                     queue='high')
            print('creating all tasks asynchronously')
            flash(
                'Your request with job ID "{}" and {} tasks are being submitted. Refresh this page for updates.'
                .format(str(job.job_id), n_tasks),
                category='success')

            return redirect(url_for('status', job_id=str(job.job_id)))

        else:
            filename = secure_filename(file.filename)
            flash('Incorrect file extension for file "{}"!'.format(filename),
                  category='danger')
            return redirect(url_for('index'))
    else:
        return redirect(request.url)
def main():
    person_group_id = 'students'
    display_image = False

    face_api_wrapper = FaceAPIWrapper("##MY_KEY###", 'https://projectface.cognitiveservices.azure.com/face/v1.0')
    create_dir_if_not_exists('capturedImages/' + "Camera 1")
    print("Capture Image every ", 60, " seconds...")

    i = 0

    while 1:
        try:
            image_filename = 'capturedImages/' + "Camera 1" + "/" + current_time_to_string() + ".jpg"
            image = Camera().capture_image()
            cv2.imwrite(image_filename, image)

            if display_image:
                cv2.imshow("Camera Image", image)
                cv2.waitKey(1)

            image_link = upload_to_s3(image_filename)
            face_ids = face_api_wrapper.detect_faces(image=image_filename)
            i += 1
            print(i, "Captured at ", time.time())
            if face_ids:
                person_ids = \
                    face_api_wrapper.identify_faces(face_ids=face_ids,
                                                    large_person_group=person_group_id)
                req_ids = [{id} for id in person_ids]
                print("Detected Faces...", req_ids

            time.sleep(60)
        except Exception as e:
            print(e)



if __name__ == '__main__':
    # initial_setup()
    main()
                      
def current_time_to_string():
    from datetime import datetime
    return datetime.now().strftime("%Y%m%d_%H%M%S%f")

def upload_to_s3(key):
    print("Uploading file to S3...")
    bucket_name = 'mybuckfucket'

    folder_name = "public_folder"
    output_name = folder_name + "/" + key
    location = 'us-west-1'

    s3 = boto3.client('s3')
    s3.upload_file(key, bucket_name, output_name, ExtraArgs={'ACL': 'public-read'})

    url = "https://s3.amazonaws.com/%s/%s/%s" % (bucket_name, folder_name, key)
    return url
                      

def create_dir_if_not_exists(output_dir):
    try:
        os.makedirs(output_dir)
    except OSError:
        if not os.path.isdir(output_dir):
            raise