def generate_a_file(self, agency_code, file_path):
        """ Write file A to an appropriate CSV. """
        self.job.filename = file_path
        self.job.original_filename = file_path.split('/')[-1]
        self.sess.commit()

        log_data = {'message': 'Starting file A generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
                    'agency_code': agency_code, 'file_type': self.job.file_type.letter_name,
                    'start_date': self.job.start_date, 'end_date': self.job.end_date,
                    'filename': self.job.original_filename}
        logger.info(log_data)

        local_file = "".join([CONFIG_BROKER['d_file_storage_path'], self.job.original_filename])
        headers = [key for key in fileA.mapping]
        # add 3 months to account for fiscal year
        period_date = self.job.end_date + relativedelta(months=3)

        log_data['message'] = 'Writing A file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)

        query_utils = {"agency_code": agency_code, "period": period_date.month, "year": period_date.year,
                       "sess": self.sess}
        logger.debug({'query_utils': query_utils})

        # Generate the file and put in S3
        write_stream_query(self.sess, a_file_query(query_utils), local_file, self.job.filename, self.is_local,
                           header=headers)
        log_data['message'] = 'Finished writing A file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
    def generate_a_file(self, agency_code, file_path):
        """ Write file A to an appropriate CSV. """
        self.job.filename = file_path
        self.job.original_filename = file_path.split('/')[-1]
        self.sess.commit()

        log_data = {'message': 'Starting file A generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
                    'agency_code': agency_code, 'file_type': self.job.file_type.letter_name,
                    'start_date': self.job.start_date, 'end_date': self.job.end_date,
                    'filename': self.job.original_filename}
        logger.info(log_data)

        local_file = "".join([CONFIG_BROKER['d_file_storage_path'], self.job.original_filename])
        headers = [key for key in fileA.mapping]
        # add 3 months to account for fiscal year
        period_date = self.job.end_date + relativedelta(months=3)

        log_data['message'] = 'Writing A file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)

        query_utils = {"agency_code": agency_code, "period": period_date.month, "year": period_date.year,
                       "sess": self.sess}
        logger.debug({'query_utils': query_utils})

        # Generate the file and put in S3
        write_stream_query(self.sess, a_file_query(query_utils), local_file, self.job.filename, self.is_local,
                           header=headers)
        log_data['message'] = 'Finished writing A file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
示例#3
0
    def generate_f_file(self):
        """ Write rows from fileF.generate_f_rows to an appropriate CSV. """
        log_data = {
            'message': 'Starting file F generation',
            'message_type': 'ValidatorInfo',
            'job_id': self.job.job_id,
            'submission_id': self.job.submission_id,
            'file_type': 'sub_award'
        }
        logger.info(log_data)

        file_f_sql = fileE_F.generate_file_f_sql(self.job.submission_id)

        # writing locally first without uploading
        log_data['message'] = 'Writing F file CSV: {}'.format(
            self.job.original_filename)
        logger.info(log_data)
        # Generate the file and put in S3
        write_stream_query(self.sess,
                           file_f_sql,
                           self.job.original_filename,
                           self.job.filename,
                           self.is_local,
                           generate_headers=True,
                           generate_string=False)

        log_data['message'] = 'Finished writing F file CSV: {}'.format(
            self.job.original_filename)
        logger.info(log_data)
示例#4
0
    def generate_e_file(self):
        """ Write file E to an appropriate CSV. """
        log_data = {
            'message': 'Starting file E generation',
            'message_type': 'ValidatorInfo',
            'job_id': self.job.job_id,
            'submission_id': self.job.submission_id,
            'file_type': 'executive_compensation'
        }
        logger.info(log_data)

        file_e_sql = fileE_F.generate_file_e_sql(self.job.submission_id)

        log_data['message'] = 'Writing E file CSV: {}'.format(
            self.job.original_filename)
        logger.info(log_data)
        # Generate the file and put in S3
        write_stream_query(self.sess,
                           file_e_sql,
                           self.job.original_filename,
                           self.job.filename,
                           self.is_local,
                           generate_headers=True,
                           generate_string=False)

        log_data['message'] = 'Finished writing E file CSV: {}'.format(
            self.job.original_filename)
        logger.info(log_data)
示例#5
0
    def generate_d_file(self, file_path):
        """ Write file D1 or D2 to an appropriate CSV. """
        log_data = {
            'message': 'Starting file {} generation'.format(self.file_type), 'message_type': 'ValidatorInfo',
            'agency_code': self.file_generation.agency_code, 'agency_type': self.file_generation.agency_type,
            'start_date': self.file_generation.start_date, 'end_date': self.file_generation.end_date,
            'file_generation_id': self.file_generation.file_generation_id, 'file_type': self.file_type,
            'file_format': self.file_generation.file_format, 'file_path': file_path
        }
        logger.info(log_data)

        original_filename = file_path.split('/')[-1]
        local_file = "".join([CONFIG_BROKER['d_file_storage_path'], original_filename])

        # Prepare file data
        if self.file_type == 'D1':
            file_utils = fileD1
        elif self.file_type == 'D2':
            file_utils = fileD2
        else:
            raise ResponseException('Failed to generate_d_file with file_type:{} (must be D1 or D2).'.format(
                self.file_type))
        headers = [key for key in file_utils.mapping]

        log_data['message'] = 'Writing {} file {}: {}'.format(self.file_type, self.file_generation.file_format.upper(),
                                                              original_filename)
        logger.info(log_data)

        query_utils = {
            "sess": self.sess, "file_utils": file_utils, "agency_code": self.file_generation.agency_code,
            "agency_type": self.file_generation.agency_type, "start": self.file_generation.start_date,
            "end": self.file_generation.end_date}
        logger.debug({'query_utils': query_utils})

        # Generate the file locally, then place in S3
        write_stream_query(self.sess, d_file_query(query_utils), local_file, file_path, self.is_local, header=headers,
                           file_format=self.file_generation.file_format)

        log_data['message'] = 'Finished writing {} file {}: {}'.format(self.file_type,
                                                                       self.file_generation.file_format.upper(),
                                                                       original_filename)
        logger.info(log_data)

        self.file_generation.file_path = file_path
        self.sess.commit()

        for job in self.sess.query(Job).filter_by(file_generation_id=self.file_generation.file_generation_id).all():
            copy_file_generation_to_job(job, self.file_generation, self.is_local)
    def generate_d_file(self, file_path):
        """ Write file D1 or D2 to an appropriate CSV. """
        log_data = {
            'message': 'Starting file {} generation'.format(self.file_type), 'message_type': 'ValidatorInfo',
            'agency_code': self.file_generation.agency_code, 'agency_type': self.file_generation.agency_type,
            'start_date': self.file_generation.start_date, 'end_date': self.file_generation.end_date,
            'file_generation_id': self.file_generation.file_generation_id, 'file_type': self.file_type,
            'file_path': file_path
        }
        logger.info(log_data)

        original_filename = file_path.split('/')[-1]
        local_file = "".join([CONFIG_BROKER['d_file_storage_path'], original_filename])

        # Prepare file data
        if self.file_type == 'D1':
            file_utils = fileD1
        elif self.file_type == 'D2':
            file_utils = fileD2
        else:
            raise ResponseException('Failed to generate_d_file with file_type:{} (must be D1 or D2).'.format(
                self.file_type))
        headers = [key for key in file_utils.mapping]

        log_data['message'] = 'Writing {} file CSV: {}'.format(self.file_type, original_filename)
        logger.info(log_data)

        query_utils = {
            "sess": self.sess, "file_utils": file_utils, "agency_code": self.file_generation.agency_code,
            "agency_type": self.file_generation.agency_type, "start": self.file_generation.start_date,
            "end": self.file_generation.end_date}
        logger.debug({'query_utils': query_utils})

        # Generate the file locally, then place in S3
        write_stream_query(self.sess, d_file_query(query_utils), local_file, file_path, self.is_local, header=headers)

        log_data['message'] = 'Finished writing {} file CSV: {}'.format(self.file_type, original_filename)
        logger.info(log_data)

        self.file_generation.file_path = file_path
        self.sess.commit()

        for job in self.sess.query(Job).filter_by(file_generation_id=self.file_generation.file_generation_id).all():
            copy_file_generation_to_job(job, self.file_generation, self.is_local)
    def generate_f_file(self):
        """ Write rows from fileF.generate_f_rows to an appropriate CSV. """
        log_data = {
            'message': 'Starting file F generation',
            'message_type': 'ValidatorInfo',
            'job_id': self.job.job_id,
            'submission_id': self.job.submission_id,
            'file_type': 'sub_award'
        }
        logger.info(log_data)

        f_file_contracts_query, f_file_grants_query = fileF.generate_f_file_queries(
            self.job.submission_id)

        # writing locally first without uploading
        log_data['message'] = 'Writing F file contracts to CSV: {}'.format(
            self.job.original_filename)
        logger.info(log_data)
        local_f_file = self.job.filename if self.is_local else self.job.original_filename
        write_query_to_file(self.sess,
                            f_file_contracts_query,
                            local_f_file,
                            generate_headers=True,
                            generate_string=False)

        # writing locally again but then uploading
        log_data['message'] = 'Writing F file grants to CSV: {}'.format(
            self.job.original_filename)
        logger.info(log_data)
        write_stream_query(self.sess,
                           f_file_grants_query,
                           self.job.original_filename,
                           self.job.filename,
                           self.is_local,
                           generate_headers=False,
                           generate_string=False)

        log_data['message'] = 'Finished writing F file CSV: {}'.format(
            self.job.original_filename)
        logger.info(log_data)
    def generate_f_file(self):
        """ Write rows from fileF.generate_f_rows to an appropriate CSV. """
        log_data = {'message': 'Starting file F generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
                    'submission_id': self.job.submission_id, 'file_type': 'sub_award'}
        logger.info(log_data)

        f_file_contracts_query, f_file_grants_query = fileF.generate_f_file_queries(self.job.submission_id)

        # writing locally first without uploading
        log_data['message'] = 'Writing F file contracts to CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
        local_f_file = self.job.filename if self.is_local else self.job.original_filename
        write_query_to_file(self.sess, f_file_contracts_query, local_f_file, generate_headers=True,
                            generate_string=False)

        # writing locally again but then uploading
        log_data['message'] = 'Writing F file grants to CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
        write_stream_query(self.sess, f_file_grants_query, self.job.original_filename, self.job.filename,
                           self.is_local, generate_headers=False, generate_string=False)

        log_data['message'] = 'Finished writing F file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
    def generate_e_file(self):
        """ Write file E to an appropriate CSV. """
        log_data = {'message': 'Starting file E generation', 'message_type': 'ValidatorInfo', 'job_id': self.job.job_id,
                    'submission_id': self.job.submission_id, 'file_type': 'executive_compensation'}
        logger.info(log_data)

        # Get the raw SQL to work with
        sql_dir = os.path.join(CONFIG_BROKER["path"], "dataactcore", "scripts", "raw_sql")
        with open(os.path.join(sql_dir, 'fileE.sql'), 'r') as file_e:
            file_e_sql = file_e.read()

        # Remove newlines (write_stream_query doesn't like them) and add the submission ID to the query
        file_e_sql = file_e_sql.replace('\n', ' ')
        file_e_sql = file_e_sql.format(self.job.submission_id)

        log_data['message'] = 'Writing E file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
        # Generate the file and put in S3
        write_stream_query(self.sess, file_e_sql, self.job.original_filename, self.job.filename, self.is_local,
                           generate_headers=True, generate_string=False)

        log_data['message'] = 'Finished writing E file CSV: {}'.format(self.job.original_filename)
        logger.info(log_data)
            submission = sess.query(Submission).filter_by(
                submission_id=submission_id).one()
            filename = 'SubID-{}_comments_{}.csv'.format(
                submission_id, filename_fyp_sub_format(submission))
            local_file = "".join([CONFIG_BROKER['broker_files'], filename])
            file_path = local_file if is_local else '{}/{}'.format(
                str(submission_id), filename)

            unpublished_query = sess.query(FileType.name, Comment.comment).\
                join(FileType, Comment.file_type_id == FileType.file_type_id).\
                filter(Comment.submission_id == submission_id)

            # Generate the file locally, then place in S3
            write_stream_query(sess,
                               unpublished_query,
                               local_file,
                               file_path,
                               is_local,
                               header=headers)

        logger.info('Finished generating unpublished comments files')

        # Published comments files
        logger.info('Copying published comments files')
        commented_pub_submissions = sess.query(
            PublishedComment.submission_id).distinct()

        for published_submission in commented_pub_submissions:
            submission_id = published_submission.submission_id
            submission = sess.query(Submission).filter_by(
                submission_id=submission_id).one()
            filename = 'SubID-{}_comments_{}.csv'.format(
        headers = ['File', 'Comment']
        commented_submissions = sess.query(Comment.submission_id).distinct()

        for submission in commented_submissions:
            # Preparing for the comments files
            filename = 'submission_{}_comments.csv'.format(submission.submission_id)
            local_file = "".join([CONFIG_BROKER['broker_files'], filename])
            file_path = local_file if is_local else '{}/{}'.format(str(submission.submission_id), filename)

            uncertified_query = sess.query(FileType.name, Comment.comment).\
                join(FileType, Comment.file_type_id == FileType.file_type_id).\
                filter(Comment.submission_id == submission.submission_id)

            # Generate the file locally, then place in S3
            write_stream_query(sess, uncertified_query, local_file, file_path, is_local, header=headers)

        logger.info('Finished generating uncertified comments files')

        # Certified comments files
        logger.info('Copying certified comments files')
        commented_cert_submissions = sess.query(CertifiedComment.submission_id).distinct()

        for certified_submission in commented_cert_submissions:
            submission_id = certified_submission.submission_id
            submission = sess.query(Submission).filter_by(submission_id=submission_id).one()
            filename = 'submission_{}_comments.csv'.format(str(submission_id))

            # See if we already have this certified file in the list
            existing_cert_history = sess.query(CertifiedFilesHistory).\
                filter(CertifiedFilesHistory.submission_id == submission_id,