def generate_e_file(self):
        """ Write file E to an appropriate CSV. """
        log_data = {'message': 'Starting file E generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
                    'submission_id': self.job.submission_id, 'file_type': 'executive_compensation'}
        logger.info(log_data)

        d1 = self.sess.query(AwardProcurement.awardee_or_recipient_uniqu).\
            filter(AwardProcurement.submission_id == self.job.submission_id).\
            distinct()
        d2 = self.sess.query(AwardFinancialAssistance.awardee_or_recipient_uniqu).\
            filter(AwardFinancialAssistance.submission_id == self.job.submission_id).\
            distinct()
        duns_set = {r.awardee_or_recipient_uniqu for r in d1.union(d2)}
        duns_list = list(duns_set)    # get an order

        rows = []
        for i in range(0, len(duns_list), 100):
            rows.extend(fileE.retrieve_rows(duns_list[i:i + 100]))

        # Add rows to database here.
        # TODO: This is a temporary solution until loading from SAM's SFTP has been resolved
        for row in rows:
            self.sess.merge(ExecutiveCompensation(**fileE.row_to_dict(row)))
        self.sess.commit()

        log_data['message'] = 'Writing E file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)

        write_csv(self.job.original_filename, self.job.filename, self.is_local, fileE.Row._fields, rows)

        log_data['message'] = 'Finished writing E file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
コード例 #2
0
def generate_f_file(sess, job, is_local):
    """Write rows from fileF.generate_f_rows to an appropriate CSV.

        Args:
            sess: database session
            job: upload Job
            is_local: True if in local development, False otherwise
    """
    log_data = {
        'message': 'Starting file F generation',
        'message_type': 'ValidatorInfo',
        'job_id': job.job_id,
        'submission_id': job.submission_id,
        'file_type': 'sub_award'
    }
    logger.info(log_data)

    rows_of_dicts = fileF.generate_f_rows(job.submission_id)
    header = [key for key in fileF.mappings]  # keep order
    body = []
    for row in rows_of_dicts:
        body.append([row[key] for key in header])

    log_data['message'] = 'Writing file F CSV'
    logger.info(log_data)
    write_csv(job.original_filename, job.filename, is_local, header, body)

    log_data['message'] = 'Finished file F generation'
    logger.info(log_data)
コード例 #3
0
    def generate_e_file(self):
        """ Write file E to an appropriate CSV. """
        log_data = {'message': 'Starting file E generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
                    'submission_id': self.job.submission_id, 'file_type': 'executive_compensation'}
        logger.info(log_data)

        d1 = self.sess.query(AwardProcurement.awardee_or_recipient_uniqu).\
            filter(AwardProcurement.submission_id == self.job.submission_id).\
            distinct()
        d2 = self.sess.query(AwardFinancialAssistance.awardee_or_recipient_uniqu).\
            filter(AwardFinancialAssistance.submission_id == self.job.submission_id).\
            distinct()
        duns_set = {r.awardee_or_recipient_uniqu for r in d1.union(d2)}
        duns_list = list(duns_set)    # get an order

        rows = []
        for i in range(0, len(duns_list), 100):
            rows.extend(fileE.retrieve_rows(duns_list[i:i + 100]))

        # Add rows to database here.
        # TODO: This is a temporary solution until loading from SAM's SFTP has been resolved
        for row in rows:
            self.sess.merge(ExecutiveCompensation(**fileE.row_to_dict(row)))
        self.sess.commit()

        log_data['message'] = 'Writing file E CSV'
        logger.info(log_data)

        write_csv(self.job.original_filename, self.job.filename, self.is_local, fileE.Row._fields, rows)
コード例 #4
0
def generate_e_file(submission_id, job_id, timestamped_name, upload_file_name, is_local):
    """Write file E to an appropriate CSV."""
    with job_context(job_id) as session:
        d1 = session.\
            query(AwardProcurement.awardee_or_recipient_uniqu).\
            filter(AwardProcurement.submission_id == submission_id).\
            distinct()
        d2 = session.\
            query(AwardFinancialAssistance.awardee_or_recipient_uniqu).\
            filter(AwardFinancialAssistance.submission_id == submission_id).\
            distinct()
        duns_set = {r.awardee_or_recipient_uniqu for r in d1.union(d2)}
        duns_list = list(duns_set)    # get an order

        rows = []
        for i in range(0, len(duns_list), 100):
            rows.extend(fileE.retrieve_rows(duns_list[i:i + 100]))

        # Add rows to database here.
        # TODO: This is a temporary solution until loading from SAM's SFTP has been resolved
        for row in rows:
            session.merge(ExecutiveCompensation(**fileE.row_to_dict(row)))
        session.commit()

        write_csv(timestamped_name, upload_file_name, is_local, fileE.Row._fields, rows)
コード例 #5
0
def generate_f_file(submission_id, job_id, timestamped_name, upload_file_name,
                    is_local):
    """Write rows from fileF.generate_f_rows to an appropriate CSV.

        Args:
            submission_id - Submission ID for generation
            job_id - Job ID for upload job
            timestamped_name - Version of filename without user ID
            upload_file_name - Filename to use on S3
            is_local - True if in local development, False otherwise
    """
    log_data = {
        'message': 'Starting file F generation',
        'message_type': 'BrokerInfo',
        'submission_id': submission_id,
        'job_id': job_id,
        'file_type': 'sub_award'
    }
    logger.info(log_data)

    with job_context(job_id):
        rows_of_dicts = fileF.generate_f_rows(submission_id)
        header = [key for key in fileF.mappings]  # keep order
        body = []
        for row in rows_of_dicts:
            body.append([row[key] for key in header])

        log_data['message'] = 'Writing file F CSV'
        logger.info(log_data)
        write_csv(timestamped_name, upload_file_name, is_local, header, body)

    log_data['message'] = 'Finished file F generation'
    logger.info(log_data)
コード例 #6
0
def generate_f_file(task, submission_id, job_id, timestamped_name,
                    upload_file_name, is_local):
    """Write rows from fileF.generateFRows to an appropriate CSV."""
    with job_context(task, job_id) as session:
        rows_of_dicts = fileF.generateFRows(session, submission_id)
        header = [key for key in fileF.mappings]  # keep order
        body = []
        for row in rows_of_dicts:
            body.append([row[key] for key in header])

        write_csv(timestamped_name, upload_file_name, is_local, header, body)
コード例 #7
0
def generate_f_file(task, submission_id, job_id, interface_holder_class,
                    timestamped_name, upload_file_name, is_local):
    """Write rows from fileF.generateFRows to an appropriate CSV. Here the
    third parameter, interface_holder_class, is a bit of a hack. Importing
    InterfaceHolder directly causes cyclic dependency woes, so we're passing
    in a class"""
    with job_context(task, interface_holder_class, job_id) as job_manager:
        rows_of_dicts = fileF.generateFRows(job_manager.session,
                                            submission_id)
        header = [key for key in fileF.mappings]    # keep order
        body = []
        for row in rows_of_dicts:
            body.append([row[key] for key in header])

        write_csv(timestamped_name, upload_file_name, is_local, header,
                  body)
コード例 #8
0
    def generate_f_file(self):
        """ Write rows from fileF.generate_f_rows to an appropriate CSV. """
        log_data = {'message': 'Starting file F generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
                    'submission_id': self.job.submission_id, 'file_type': 'sub_award'}
        logger.info(log_data)

        rows_of_dicts = fileF.generate_f_rows(self.job.submission_id)
        header = [key for key in fileF.mappings]    # keep order
        body = []
        for row in rows_of_dicts:
            body.append([row[key] for key in header])

        log_data['message'] = 'Writing file F CSV'
        logger.info(log_data)

        write_csv(self.job.original_filename, self.job.filename, self.is_local, header, body)
コード例 #9
0
    def load_d_file(self, url, upload_name, timestamped_name, job_id, isLocal):
        """ Pull D file from specified URL and write to S3 """
        job_manager = self.interfaces.jobDb
        try:
            full_file_path = "".join(
                [CONFIG_BROKER['d_file_storage_path'], timestamped_name])

            CloudLogger.log("DEBUG: Downloading file...",
                            log_type="debug",
                            file_name=self.smx_log_file_name)
            if not self.download_file(full_file_path, url):
                # Error occurred while downloading file, mark job as failed and record error message
                job_manager.markJobStatus(job_id, "failed")
                job = job_manager.getJobById(job_id)
                file_type = job_manager.getFileType(job_id)
                if file_type == "award":
                    source = "ASP"
                elif file_type == "award_procurement":
                    source = "FPDS"
                else:
                    source = "unknown source"
                job.error_message = "A problem occurred receiving data from {}".format(
                    source)

                raise ResponseException(job.error_message,
                                        StatusCode.CLIENT_ERROR)
            lines = self.get_lines_from_csv(full_file_path)

            write_csv(timestamped_name, upload_name, isLocal, lines[0],
                      lines[1:])

            CloudLogger.log("DEBUG: Marking job id of " + str(job_id) +
                            " as finished",
                            log_type="debug",
                            file_name=self.smx_log_file_name)
            job_manager.markJobStatus(job_id, "finished")
            return {"message": "Success", "file_name": timestamped_name}
        except Exception as e:
            CloudLogger.log("ERROR: Exception caught => " + str(e),
                            log_type="debug",
                            file_name=self.smx_log_file_name)
            # Log the error
            JsonResponse.error(e, 500)
            job_manager.getJobById(job_id).error_message = str(e)
            job_manager.markJobStatus(job_id, "failed")
            job_manager.session.commit()
            raise e
コード例 #10
0
def generate_e_file(submission_id, job_id, timestamped_name, upload_file_name,
                    is_local):
    """Write file E to an appropriate CSV.

        Args:
            submission_id - Submission ID for generation
            job_id - Job ID for upload job
            timestamped_name - Version of filename without user ID
            upload_file_name - Filename to use on S3
            is_local - True if in local development, False otherwise
    """
    log_data = {
        'message': 'Starting file E generation',
        'message_type': 'BrokerInfo',
        'submission_id': submission_id,
        'job_id': job_id,
        'file_type': 'executive_compensation'
    }
    logger.info(log_data)

    with job_context(job_id) as session:
        d1 = session.query(AwardProcurement.awardee_or_recipient_uniqu).\
            filter(AwardProcurement.submission_id == submission_id).\
            distinct()
        d2 = session.query(AwardFinancialAssistance.awardee_or_recipient_uniqu).\
            filter(AwardFinancialAssistance.submission_id == submission_id).\
            distinct()
        duns_set = {r.awardee_or_recipient_uniqu for r in d1.union(d2)}
        duns_list = list(duns_set)  # get an order

        rows = []
        for i in range(0, len(duns_list), 100):
            rows.extend(fileE.retrieve_rows(duns_list[i:i + 100]))

        # Add rows to database here.
        # TODO: This is a temporary solution until loading from SAM's SFTP has been resolved
        for row in rows:
            session.merge(ExecutiveCompensation(**fileE.row_to_dict(row)))
        session.commit()

        log_data['message'] = 'Writing file E CSV'
        logger.info(log_data)
        write_csv(timestamped_name, upload_file_name, is_local,
                  fileE.Row._fields, rows)

    log_data['message'] = 'Finished file E generation'
    logger.info(log_data)
コード例 #11
0
def generate_f_file(submission_id, job_id, timestamped_name, upload_file_name, is_local):
    """Write rows from fileF.generate_f_rows to an appropriate CSV."""

    logger.debug('Starting file F generation')

    with job_context(job_id):
        logger.debug('Calling genearte_f_rows')
        rows_of_dicts = fileF.generate_f_rows(submission_id)
        header = [key for key in fileF.mappings]    # keep order
        body = []
        for row in rows_of_dicts:
            body.append([row[key] for key in header])

        logger.debug('Writing file F CSV')
        write_csv(timestamped_name, upload_file_name, is_local, header, body)

    logger.debug('Finished file F generation')
コード例 #12
0
def generate_e_file(task, submission_id, job_id, timestamped_name,
                    upload_file_name, is_local):
    """Write file E to an appropriate CSV."""
    with job_context(task, job_id) as session:
        d1 = session.\
            query(AwardProcurement.awardee_or_recipient_uniqu).\
            filter(AwardProcurement.submission_id == submission_id).\
            distinct()
        d2 = session.\
            query(AwardFinancialAssistance.awardee_or_recipient_uniqu).\
            filter(AwardFinancialAssistance.submission_id == submission_id).\
            distinct()
        dunsSet = {r.awardee_or_recipient_uniqu for r in d1.union(d2)}
        dunsList = list(dunsSet)  # get an order

        rows = []
        for i in range(0, len(dunsList), 100):
            rows.extend(fileE.retrieveRows(dunsList[i:i + 100]))
        write_csv(timestamped_name, upload_file_name, is_local,
                  fileE.Row._fields, rows)