def get_and_merge_s3_logs(dirname, rootname='log', chunk_pat=None, subdir=None):
    """
    Fetches all lambda logs from a job folder on S3 that meet rootname, chunk_pat.
    combine into one file, write it to dirname/{rootname}_{dirname}.txt
    :param logs_folder: an S3 folder to fetch lambda logs from
    :return:
    
    log file name: f"log_{group_root}_{dirname}_chunk_{str(chunk_idx)}.txt"
    """
    utils.sts(f"Getting the {rootname} files from s3 and combining")
    
    # download all the log files
    # make sure tmp is empty.
    tmp_dirpath = DB.dirpath_from_dirname('tmp')
    shutil.rmtree(tmp_dirpath, ignore_errors=True)
    
    sts(f"Downloading all {rootname} files, one per chunk", 3)
    # download according to matching pattern
    DB.download_entire_dirname(dirname=dirname, subdir=subdir, file_pat=fr"{rootname}_{chunk_pat}\.txt", local_dirname='tmp')
    
    sts(f"Combining {rootname} files", 3)
    dest_name = f"{rootname}_{dirname}.txt"
    dest_dirpath = DB.dirpath_from_dirname(dirname=dirname, s3flag=False)
    combined_log_filepath = dest_dirpath + dest_name

    num_files = merge_txt_dirname(dirname='tmp', subdir=subdir, destpath=combined_log_filepath, file_pat=f"{rootname}_*.txt")
    
    sts(f"Writing combined {rootname} file: {combined_log_filepath} to s3 in dirname:'{dirname}'", 3)
    if os.path.exists(combined_log_filepath):
        DB.upload_file_dirname(dirname, dest_name, local_dirname='tmp')
    return num_files
Пример #2
0
    def set_cells_with_images_to_writeins(file_paths):
        """Reads CVR spreadsheet as a ZIP and extracts information from
        the .xml file about the cells that have images in them.
        Then sets null cells in CVR data frame to write-in, if the cell
        has an image within.
        :param file_path: Path to the CVR file.
        @TODO: Need to fix for s3 operation.
                probably first download the file and then perform the work.
        """
        dirpath = DB.dirpath_from_dirname('archives')
        if dirpath.startswith('s3'):
            utils.sts("Cannot convert images to writeins on s3")
            sys.exit(1)

        if isinstance(file_paths, str):
            file_paths = [file_paths]
        for file_path in file_paths:
            archive = ZipFile(file_path, 'r')
            xml_path = 'xl/drawings/drawing1.xml'
            try:
                xml_file = archive.read(xml_path)
            except KeyError:
                utils.sts(f'Couldn\'t find {xml_path} in {file_path}')
                break
            doc = xml.dom.minidom.parseString(xml_file.decode())
            for cellAnchorElement in doc.getElementsByTagName(
                    'xdr:twoCellAnchor'):
                fromElement = cellAnchorElement.getElementsByTagName(
                    'xdr:from')[0]
                row = fromElement.getElementsByTagName(
                    'xdr:row')[0].firstChild.data
                col = fromElement.getElementsByTagName(
                    'xdr:col')[0].firstChild.data
                CVR.data_frame.iat[int(row) - 1, int(col)] = 'write-in:'
def merge_csv_dirname_local(dirname,
                            subdir,
                            dest_name,
                            dest_dirname=None,
                            file_pat=None):
    """ merge all csv files in local dirname meeting file_pat into one to dest_name
        uses header line from first file, discards header is subsequent files.
        all csv files must have the same format.
    """

    if dest_dirname is None: dest_dirname = dirname

    sts(f"Merging csv from {dirname} to {dest_dirname}/{dest_name}", 3)

    src_dirpath = DB.dirpath_from_dirname(dirname, subdir=subdir, s3flag=False)
    dest_dirpath = DB.dirpath_from_dirname(dest_dirname, s3flag=False)
    destpath = os.path.join(dest_dirpath, dest_name)

    first_pass = True
    infilepath_list = glob.glob(f"{src_dirpath}*.csv")

    for idx, infilepath in enumerate(infilepath_list):
        basename = os.path.basename(infilepath)
        if file_pat is not None and not re.search(file_pat, basename):
            # skip any files that are not the lambda download format, including the one being built
            continue
        if infilepath == destpath:
            # make sure we are not appending dest to itself.
            continue
        #sts(f"Appending result #{idx} from {infilepath}", 3)
        if first_pass:
            shutil.copyfile(infilepath, destpath)
            # first file just copy to new name
            fa = open(destpath, 'a+', encoding="utf8")
            first_pass = False
            continue
        # the rest of the chunks, first strip header, and append
        with open(infilepath, encoding="utf8") as fi:
            buff = fi.read()
            lines = re.split(r'\n', buff)  # .decode('utf-8')
            non_header_lines = '\n'.join(lines[1:])  # skip header line
            fa.write(non_header_lines)

    try:
        fa.close()
    except UnboundLocalError:
        pass
def get_logfile_pathname(rootname='log'):
    """ lambdas can only open files in /tmp
        Used only within this module.
    """
    if utils.on_lambda():
        return f"/tmp/{rootname}.txt"
    else:
        dirpath = DB.dirpath_from_dirname('logs', s3flag=False)   # this also creates the dir
        return f"{dirpath}{rootname}.txt"
def write_html_summary(html_doc, filename='summary'):
    summary_path = DB.dirpath_from_dirname(filename)
    if not os.path.exists(summary_path):
        os.makedirs(summary_path)
    html_file_path = f"{summary_path}{filename}.html"
    html_file = open(html_file_path, 'w')
    html_file.write(html_doc.render())
    html_file.close()
    return os.path.abspath(html_file_path)
def remove_dirname_files_by_pattern(dirname, file_pat=None):
    """ remove files from dirpath that match regex file_pat
    """
    dirpath = DB.dirpath_from_dirname(dirname)
    for filename in os.listdir(dirpath):
        full_path = os.path.join(dirpath, filename)
        if os.path.isfile(full_path) and not (file_pat and bool(
                re.search(file_pat, filename))):
            os.remove(full_path)
def merge_txt_dirname(dirname: str, destpath: str, file_pat: str, subdir=None): #, main_logfile: str = "logfile.log"):
    """
    Local only.
    Consumes all .txt files in a given dirname and merges them into one
    :param dirname: name of dir of chunks to combine.
    :param destpath: path of file to create with combined files.
    :return:
    """
    dirpath = DB.dirpath_from_dirname(dirname, subdir=subdir, s3flag=False)
    txt_files = glob.iglob(os.path.join(dirpath, file_pat))
    with open(destpath, "a+b") as wfh:
        for txt_file in txt_files:
            with open(txt_file, 'rb') as rfh:
                shutil.copyfileobj(rfh, wfh)
    return len(list(txt_files))
Пример #8
0
def copy_ballot_pdfs_from_archive_to_report_folder(archive, filepaths,
                                                   ballot_id, dirname):
    target_filename = f"{ballot_id}i.pdf"
    target_folder = DB.dirpath_from_dirname(dirname)
    ballot_paths = [
        x for x in filepaths if re.search(r'[\\/]' + target_filename, x)
    ]
    if len(ballot_paths):
        utils.sts(f"Extracting {ballot_paths[0]} from archive", 3)
        archive.extract(ballot_paths[0], path=target_folder)
        return

    utils.sts(
        f"Logic error: Failed to find ballot_id {ballot_id} in ballot archive.",
        0)
    traceback.print_stack()
    sys.exit(1)
def reprocess_failing_ballots(argsdict):
    """ given list of ballots in inputfile, attempt to align these ballots.

            1. builds single bif table.
            2. looks each ballot up.
            3. using the entry, call 
                get_ballot_from_image_filepaths(argsdict:dict, file_paths:list, mode=local)
    """
    
    dirpath = DB.dirpath_from_dirname('styles')
    ballot_list = argsdict['ballotid']
    
    for ballot_id in ballot_list:
        local_file_path = f"{dirpath}{ballot_id}/{ballot_id}.tif"
        local_file_paths = [local_file_path]
        
        ballot = get_ballot_from_image_filepaths(argsdict, local_file_paths, mode='local')
        ballot.get_timing_marks()       # for each image, capture the timing marks to ballot instance.
def save_failing_ballots(argsdict):
    """ given list of ballots in inputfile, copy the original ballot image files
        to (job_folder_path)/styles/(ballot_id) folders
        
        this function
            1. builds single bif table.
            2. looks each ballot up.
            3. using entry, opens the indicated archive and extracts the original file.
            4. saves the file in folder of jobname and ballot_id in styles, see above.
    """
    
    full_bif_df = combine_archive_bifs()
    
    ballot_list = argsdict['ballotid']
    
    #archives_folder_path = argsdict['archives_folder_path']
    opened_archive_basename = ''
    archive = None
    
    for ballot_id in ballot_list:
        utils.sts(f"processing ballot_id:{ballot_id}", 3)
        rows = full_bif_df.loc[full_bif_df['ballot_id'] == ballot_id]       # select set of rows with value in column_name equal to some_value.
        
        archive_basename = rows['archive_basename'].values.item()     # return one item from a row
        file_paths_str = rows['file_paths'].values.item()
        file_paths = file_paths_str.split(';')
        
        dest_dirpath = DB.dirpath_from_dirname('styles')
        
        if archive_basename != opened_archive_basename:
            if opened_archive_basename:
                archive.close()
            archive = open_archive(argsdict, archive_basename)
            opened_archive_basename = archive_basename
            
        for file_path in file_paths:
            basename = os.path.basename(file_path)
            dest_filepath = os.path.join(dest_dirpath, ballot_id, basename)
            extract_file(archive, file_path, dest_filepath)
            utils.sts(f"...extracted:{file_path} to {dest_filepath}", 3)
        
    if opened_archive_basename:
        archive.close()
Пример #11
0
def copy_ballot_pdfs_to_report_folder(argsdict, ballot_id_list, dirname):
    utils.sts(
        f"Copying {len(ballot_id_list)} ballot image files classified as {dirname}",
        3)
    if not len(ballot_id_list): return

    target_folder = DB.dirpath_from_dirname(dirname)
    mutated_ballot_id_list = ballot_id_list.copy()

    # first create the list of all the archive paths in this archive that are in ballot_id_list
    # and open the archives and leave them open during processing.
    if not file_paths_cache:
        for archive_idx, archive_path in enumerate(argsdict['source']):
            archive = open_archive(argsdict, archive_path)
            archives.append(archive)
            file_paths_list = get_image_file_paths_from_archive(archive)
            file_paths_cache[archive_idx] = file_paths_list

    while mutated_ballot_id_list:
        ballot_id = mutated_ballot_id_list.pop(0)
        target_filename = f"{ballot_id}i.pdf"
        for archive_idx in range(len(archives)):
            ballot_paths = [
                x for x in file_paths_cache[archive_idx]
                if re.search(r'[\\/]' + target_filename, x)
            ]
            if len(ballot_paths):
                utils.sts(
                    f"Extracting {ballot_paths[0]} from archive {archive_idx}",
                    3)
                archives[archive_idx].extract(ballot_paths[0],
                                              path=target_folder)
                break
        else:
            mbidl = ', '.join(mutated_ballot_id_list)
            utils.sts(
                f"Logic error: Failed to find some ballot_ids in ballot archives: {mbidl}",
                0)
            traceback.print_stack()
            sys.exit(1)
def create_html_string(COUNTERS, BALLOTLISTS, DISAGREED_INFO_DICT):
    """Creates a HTML string for generating the summary file.
    
    Accesses the following:
        COUNTERS['ballots_processed']
        COUNTERS['styles_detected']
        COUNTERS['matched_ballots']
        COUNTERS['non_matched_ballots']
        COUNTERS['blank_ballots']
        list of ballot OVERVOTED_BALLOTS
        list of ballot DISAGREED_BALLOTS
            accesses ballot pdf files per precinct and ballot_id
        DISAGREE_INFO_DICT is keyed by ballot_id which provides dict of contests providing error information
                f"{config_dict['RESOURCES_PATH']}{config_dict['DISAGREEMENTS_PATHFRAG']}{ballot.ballotdict['precinct']}/{ballot.ballotdict['ballot_id']}.pdf")

        list STYLES
            style.style_num
            style.number
            style.build_from_count
        files style_summary = glob.glob(f"{config_dict['RESOURCES_PATH']}{config_dict['STYLES_PATHFRAG']}{style.code}.html")[0]
        list VOTES_RESULTS   (results for each contest)
            result_contest['contest_name']
            result_contest['selections']
            result_contest['vote_for']
            result_contest['question']
            result_contest['total_ballots']
            result_contest['total_votes']
            result_contest['undervote']
            result_contest['overvote']
    """
    script_abs_path = os.path.abspath('assets/copy_to_clipboard.js')
    version = utils.show_version()
    doc = dominate.document(title='Audit Engine version: ' + version)
    with doc.head:
        link(
            rel='stylesheet',
            href=
            'https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css',
            integrity=
            "sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T",
            crossorigin="anonymous",
        )
        script(type='text/javascript', src=script_abs_path)

    with doc:
        with div(cls='container'):
            with div(cls='jumbotron'):
                h1('Audit Engine: {version} - vote records summary'.format(
                    version=version))
                build_time = datetime.datetime.now(datetime.timezone.utc)
                p(f'Summary built at: {build_time.strftime("%Y-%m-%d %H:%M:%S")}',
                  cls='lead')
            with table(cls='table table-striped'):
                with tbody():
                    with tr():
                        th('Number of ballots processed')
                        td(COUNTERS['ballots_processed'])
                    with tr():
                        th('Number of different ballot types')
                        td(COUNTERS['styles_detected'])
                    with tr():
                        th('Number of ballots matching the CVR results')
                        td(COUNTERS['matched_ballots'])
                    with tr():
                        th('Number of ballots not matching the CVR results')
                        td(COUNTERS['non_matched_ballots'])
                    with tr():
                        th('Number of completely blank ballots')
                        td(COUNTERS['blank_ballots'])
                    with tr():
                        th('Number of overvotes')
                        td(COUNTERS['overvoted_ballots'])
                    with tr():
                        th('Number of disagreements')
                        td(COUNTERS['disagreed_ballots'])
            with div(cls='my-4'):
                h2('Styles')
            with table(cls='table table-striped'):
                with thead():
                    with tr():
                        th('Style code', scope="col")
                        th('Style number', scope="col")
                        th('Based on number of ballots', scope="col")
                        th('Built at', scope="col")
                with tbody():
                    for style in STYLES:
                        with tr():
                            utc_time = datetime.datetime.utcfromtimestamp(
                                style.timestamp)
                            style_summary = glob.glob(
                                f"{config_dict['RESOURCES_PATH']}{config_dict['STYLES_PATHFRAG']}{style.code}.html"
                            )[0]
                            td(
                                a(style.style_num,
                                  href=os.path.realpath(style_summary),
                                  target="_blank"))
                            td(style.number)
                            td(style.build_from_count)
                            td(f'{utc_time.strftime("%Y-%m-%d %H:%M:%S")}')
            # Tables with contests results:
            with div(cls='my-4'):
                h2('Contests results')
            for result_contest in VOTES_RESULTS:
                contest_name = result_contest['contest_name']
                selections = result_contest['selections']
                vote_for = result_contest['vote_for']
                question = result_contest['question']
                with div(cls='my-4'):
                    h5(f'Contest results "{contest_name}" (vote for {vote_for}):'
                       )
                    if question:
                        h6(f'Question "{question}"')
                with table(cls='table table-striped'):
                    with thead():
                        with tr():
                            th('#', scope="col")
                            th('Candidate', scope="col")
                            th('Votes', scope="col")
                            th('%', scope="col")
                    with tbody():
                        for index, candidate_name in enumerate(
                                sort_option_names(selections.keys())):
                            try:
                                total_votes = result_contest['total_votes']
                                percent = round(
                                    (selections[candidate_name] / total_votes)
                                    * 100, 2)
                            except ZeroDivisionError:
                                percent = 0.0
                            with tr():
                                th(index + 1, scope="row")
                                td(candidate_name)
                                td(candidate_name)
                                td(f'{percent}%')
                with table(cls='table table-striped'):
                    with tbody():
                        with tr():
                            th('Total number of ballots')
                            td(result_contest['total_ballots'])
                        with tr():
                            th('Number of votes')
                            td(result_contest['total_votes'])
                        with tr():
                            th('Number of undervotes')
                            td(result_contest['undervote'])
                        with tr():
                            th('Number of overvotes')
                            td(result_contest['overvote'])
            # Table with overvotes:
            with div(cls='my-4'):
                h2('Ballots with overvotes:')
            with table(cls='table table-striped'):
                with thead():
                    with tr():
                        th('#', scope="col")
                        th('Precinct / Contest name', scope="col")
                        th('Ballot file / Ballot and CVR status', scope="col")
                        th('Overvotes / Contest validation status',
                           scope="col")
                with tbody():
                    dirpath = DB.dirpath_from_dirname('overvotes')
                    for index, ballot_id in enumerate(
                            BALLOTLISTS['overvoted_ballots']):
                        filepathlist = glob.glob(
                            f"{dirpath}**/{ballot_id}i.pdf", recursive=True)
                        if not filepathlist: continue
                        filepath = filepathlist[0]
                        with tr():
                            th(index + 1, scope="row")
                            td('')
                            with td():
                                ballot_image_filepath = os.path.abspath(
                                    filepath)
                                a(ballot_id,
                                  href=ballot_image_filepath,
                                  target="_blank")
                            td('')


#                        overvotes_contests = list(
#                            filter(
#                                lambda x: (x.contest_ballot_status == STATUS_DICT['overvote']) or
#                                (x.contest_cvr_status == STATUS_DICT['overvote']), ballot.ballotdict['contests']))
#                        for contest in overvotes_contests:
#                            with tr():
#                                td()
#                                td(contest.contest_name)
#                                td(f"{contest.contest_ballot_status} / {contest.contest_cvr_status}")
#                                td(contest.contest_validation if contest.contest_validation is not None else '')
# Table with blank ballots:
            with div(cls='my-4'):
                h2('Blank Ballots:')
            with table(cls='table table-striped'):
                with thead():
                    with tr():
                        th('#', scope="col")
                        th('Precinct / Contest name', scope="col")
                        th('Ballot file / Ballot and CVR status', scope="col")
                        th('Overvotes / Contest validation status',
                           scope="col")
                with tbody():
                    dirpath = DB.dirpath_from_dirname('blank_ballots')
                    for index, ballot_id in enumerate(
                            BALLOTLISTS['blank_ballots']):
                        filepathlist = glob.glob(f"f{dirpath}{ballot_id}i.pdf",
                                                 recursive=True)
                        if not filepathlist:
                            continue
                        filepath = filepathlist[0]
                        with tr():
                            th(index + 1, scope="row")
                            td('')
                            with td():
                                ballot_image_filepath = os.path.abspath(
                                    filepath)
                                a(ballot_id,
                                  href=ballot_image_filepath,
                                  target="_blank")
                            td('')
            # Table with disagreements:
            with div(cls='my-4'):
                h2('Ballots with disagreements:')
            with table(cls='table table-striped'):
                with thead():
                    with tr():
                        th('#', scope="col")
                        th('Ballot file', scope="col")
                        th('Disagreement Details', scope="col")
                with tbody():
                    dirpath = DB.dirpath_from_dirname('disagreements')
                    for index, ballot_id in enumerate(
                            BALLOTLISTS['disagreed_ballots']):
                        filepathlist = glob.glob(
                            f"{dirpath}**/{ballot_id}i.pdf", recursive=True)
                        if not filepathlist: continue
                        filepath = filepathlist[0]
                        with tr():
                            th(index + 1, scope="row")
                            with td():
                                ballot_image_filepath = os.path.abspath(
                                    filepath)
                                a(ballot_id,
                                  href=ballot_image_filepath,
                                  target="_blank")
                            td(
                                raw(f"<pre>{DISAGREED_INFO_DICT[ballot_id]}</PRE>"
                                    ))
    return doc
def generate_cmpcvr_report(argsdict):
    discrepancy_reports = []
    report_dirpath = DB.dirpath_from_dirname('reports')
    report_path = f"{report_dirpath}Discrepancy Report for Automated Independent Audit.html"
    #cmpcvr_dirpath = DB.dirpath_from_dirname('cmpcvr')
    #try:
    # #    cmpcvr_agreed_df = pd.read_csv(f"{cmpcvr_dirpath}cmpcvr-agreed.csv")
    # cmpcvr_agreed_df = DB.load_data(dirname='cmpcvr', name='cmpcvr-agreed.csv', silent_error=True)
    # #except pd.errors.EmptyDataError:
    # if cmpcvr_agreed_df is None:
    # cmpcvr_agreed_df = pd.DataFrame(columns=['ballot_id', 'style', 'precinct', 'contest', 'agreed', 'blank',
    #                                         'chunk_name', 'contests_mismatch'])
    # try:
    # cmpcvr_disagreed_df = pd.read_csv(f"{cmpcvr_dirpath}cmpcvr-disagreed.csv")
    # except pd.errors.EmptyDataError:
    cmpcvr_disagreed_df = DB.load_data(dirname='cmpcvr',
                                       name='disagreed.csv',
                                       silent_error=True)
    if cmpcvr_disagreed_df is None:
        cmpcvr_disagreed_df = pd.DataFrame(columns=[
            'ballot_id', 'style', 'precinct', 'contest', 'agreed', 'blank',
            'chunk_name', 'contests_mismatch', 'vote_difference', 'audit_info',
            'cvr_info'
        ])
    # try:
    # columns = ['idx', 'ballot_id', 'style', 'precinct', 'contest', 'option',
    # 'has_indication', 'num_marks', 'num_votes', 'pixel_metric_value',
    # 'writein_name', 'overvotes', 'undervotes', 'ssidx', 'delta_y',
    # 'ev_coord_str', 'ev_logical_style', 'ev_precinct_id']
    # dtype = {'idx': int, 'ballot_id': int, 'style': int, 'precinct': str,
    # 'option': str, 'has_indication': str, 'writein_name': str,
    # 'num_marks': int, 'num_votes': int, 'pixel_metric_value': float,
    # 'overvotes': int, 'undervotes': int, 'ssidx': int, 'delta_y': int}
    # ballot_marks_df = pd.read_csv(cmpcvr_dirpath + 'ballot_marks_df.csv', dtype=dtype, skiprows=1, names=columns)
    # except ValueError:
    # ballot_marks_df = pd.read_csv(cmpcvr_dirpath + 'ballot_marks_df.csv')

    # the following will require that all marks_df segments are combined.
    ballot_marks_df = DB.load_data(dirname='marks',
                                   name='marks.csv',
                                   silent_error=True)

    num_marks_ballots = len(ballot_marks_df['ballot_id'].unique())

    precincts = ballot_marks_df['precinct'].unique().tolist()
    for precinct in precincts:
        #precinct_cmpcvr_agreed_df = cmpcvr_agreed_df.loc[cmpcvr_agreed_df['precinct'] == precinct]
        precinct_cmpcvr_disagreed_df = cmpcvr_disagreed_df.loc[
            cmpcvr_disagreed_df['precinct'] == precinct]
        disagreed_rows = len(
            precinct_cmpcvr_disagreed_df['ballot_id'].unique())

        # Pass precincts in which number of disagreed ballots is smaller than the threshold.
        discrepancy = round((disagreed_rows / num_marks_ballots) * 100, 2)
        if discrepancy < argsdict.get('precinct_reporting_threshold_percent',
                                      0):
            continue
        precinct_report_path = f"{report_dirpath}Report - {precinct}.html"
        discrepancy_reports.append({
            'precinct': precinct,
            'ballots': num_marks_ballots,
            'discrepancy': discrepancy,
            'path': precinct_report_path
        })
        precinct_marks_df = ballot_marks_df.loc[ballot_marks_df['precinct'] ==
                                                precinct]
        with open(precinct_report_path, 'w') as html_file:
            doc = build_discrepancy_reports(
                precinct,
                precinct_agreed_df=None,
                precinct_disagreed_df=precinct_cmpcvr_disagreed_df,
                precinct_marks_df=precinct_marks_df)

            html_file.write(doc.render())
    with open(report_path, 'w') as html_file:
        doc = build_discrepancy_parent_report(discrepancy_reports)
        html_file.write(doc.render())
        utils.sts(os.path.abspath(report_path))
def get_marks_df_path_from_ballot_id(ballot_id):
    dirpath = DB.dirpath_from_dirname('marks')
    return f"{dirpath}marks_df_{ballot_id}.csv"
def wait_for_lambdas(argsdict: dict, task_name=None): #, download_failed=False):
    """ Waits for every lambda request added to LambdaTracker.
    
        Note: not specific to task_name. Only only one use of Lambdas at a time
                by a specific job_name.
        We may want to use task_name to create separate folders for any given task.
        So keep task_name for now even though we are not using it.
    
    """
    if not argsdict['use_lambdas']: return
        
    # running_requests = LambdaTracker.get_status_request_keys('Running')
    total_requests = len(LambdaTracker.lambda_requests.keys())
    running_requests = total_requests
    if not running_requests: return

    wait = 10
    timeout = 60 * 20
    time.sleep(10)  # Just to be sure that all lambdas tracker files are on the bucket
    s3dirpath_completed = DB.dirpath_from_dirname('lambda_tracker', subdir='Completed')
    s3dirpath_failed = DB.dirpath_from_dirname('lambda_tracker', subdir='Failed')

    while timeout > 0 and running_requests:
        time.sleep(wait)
        timeout -= wait

        # running_requests = LambdaTracker.get_status_request_keys('Running')
        files_completed = s3utils.list_files_in_s3dirpath(s3dirpath_completed)
        files_failed = s3utils.list_files_in_s3dirpath(s3dirpath_failed)
        completed_requests = len(files_completed)
        failed_requests = len(files_failed)
        running_requests = total_requests - completed_requests - failed_requests
        if timeout <= 0 or not running_requests:
            break
        logs.sts(f'Waiting for lambdas. Timeout (s): {timeout}. Running: {running_requests}')
        # for request in running_requests:
        #     chunk_name = LambdaTracker.lambda_requests[request].get('chunk_name')
        #     tracker = s3utils.check_lambda_status(argsdict, task_name=task_name, chunk_name=chunk_name)
        #     if tracker:
        #         if tracker.get('status') != 'Running':
        #             #import pdb; pdb.set_trace()
        #             LambdaTracker.lambda_requests[request]['status'] = tracker['status']
        #             utils.sts(f"Task {chunk_name}, ID {request} changed status to {tracker['status']}")
        #             if tracker.get('error_info'):
        #                 LambdaTracker.lambda_requests[request]['error_type'] = tracker['error_info']['error_type']
        #                 LambdaTracker.lambda_requests[request]['error_message'] = tracker['error_info']['error_message']
        #                 LambdaTracker.lambda_requests[request]['error_stack'] = tracker['error_info']['error_stack']
        #     else:
        #         utils.sts(f"Trackign info from job:{job_name}, task:{task_name} and chunk:{chunk_name} not found", 3)

    # failed_requests = LambdaTracker.get_not_done_request_keys()
    failed_requests_log_list = s3utils.list_files_in_s3dirpath(s3dirpath_failed)
    all_succeeded = True
    if failed_requests_log_list:
        # if download_failed:
            # #download_results(argsdict)
            # pass
        for failed_request in failed_requests_log_list:
            print(f'Lambda request failed. please check cloudwatch logs for chunks: {failed_request} \n')
            # request = LambdaTracker.lambda_requests[failed_request]
            # chunk_name = request.get('chunk_name')
            # utils.sts(f'Task: {chunk_name}, ID: {failed_request} failed')
            # if request['status'] == 'Failed':
            #     utils.sts(f"{request.get('error_type')}: {request.get('error_message')}")
            #     error_stack = request.get('error_stack')
            #     for error_item in error_stack:
            #         print(error_item)
            #         #utils.sts(f"Error Stack: {request.get('error_stack')}")
            # else:
            #     utils.sts('Error: TIMEOUT')
            # utils.sts(f"Files payload: {json.dumps(request['task_args'])}", verboselevel=1)
            # print('Files payload list saved to log file')
        all_succeeded = False
        
    logs.sts(f"All lambdas finished; {completed_requests} {round(100 * completed_requests/(completed_requests + failed_requests), 2)}% successful, "
             f"{failed_requests} {round(100 * failed_requests/(completed_requests + failed_requests), 2)}% failed", 3)
             
    return all_succeeded