def run():
    today = date.today()
    today_datetime = datetime.combine(today, datetime.min.time())
    output_path = definitions.oppty_temporal_path.format(today.isoformat())

    batch_id = datetime.now().strftime("%Y%m%d%-H%M%S%f")

    # make output directory if it doesn't exist
    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            output_path):
        os.makedirs(output_path)

    # generate opportunity shape
    oppty_shape_file = output_path + 'OpportunityShape.csv'
    oppty_shape_gen.run(batch_id, definitions.source_oppty_shape,
                        oppty_shape_file, today_datetime)

    # generate temporal opportunity shape
    oppty_file = output_path + 'Opportunity.csv'
    cutoff_date = today_datetime - timedelta(days=365 * 2)
    oppty_shape_gen.run(
        batch_id, oppty_shape_file, oppty_file, today_datetime,
        lambda cv: dateutil.parser.parse(cv['CreatedDate__c']) >= cutoff_date)

    # generate accounts
    account_file = output_path + 'Account.csv'
    oppty_account_gen.run(batch_id, oppty_file, account_file)

    # generate contacts
    contact_file = output_path + 'Contact.csv'
    oppty_contact_gen.run(batch_id, account_file, contact_file)

    # generate users
    user_file = output_path + 'User.csv'
    manager_file = output_path + 'Manager.csv'
    oppty_user_gen.run(batch_id, oppty_file, user_file, manager_file)

    # generate forecasting quota
    forecasting_quota_file = output_path + 'ForecastingQuota.csv'
    oppty_forecasting_quota_gen.run(batch_id, user_file,
                                    forecasting_quota_file)

    # generate forecasting quota
    forecasting_user_file = output_path + 'ForecastingUser.csv'
    oppty_forecasting_user_gen.run(batch_id, user_file, forecasting_user_file)

    # generate quota
    quota_file = output_path + 'Quota.csv'
    oppty_quota_gen.run(batch_id, user_file, quota_file)

    # generate cases
    case_file = output_path + 'Case.csv'
    oppty_case_gen.run(batch_id, account_file, case_file)

    # generate products and pricebook entries
    product_file = output_path + 'Product2.csv'
    pricebook_file = output_path + 'PricebookEntry.csv'
    oppty_product_gen.run(batch_id, oppty_file, product_file, pricebook_file)

    # generate opportunity line items
    line_item_file = output_path + 'OpportunityLineItem.csv'
    oppty_line_item_gen.run(batch_id, oppty_file, line_item_file, product_file,
                            pricebook_file)

    # generate opportunity history
    history_file = output_path + 'OpportunityHistory.csv'
    oppty_history_gen.run(batch_id, oppty_file, history_file, today_datetime)

    # generate events
    event_file = output_path + 'Event.csv'
    oppty_event_gen.run(batch_id, oppty_file, event_file, today_datetime)

    # generate tasks
    task_file = output_path + 'Task.csv'
    oppty_task_gen.run(batch_id, oppty_file, task_file, today_datetime)

    # generate leads
    lead_file = output_path + 'Lead.csv'
    oppty_lead_gen.run(batch_id, oppty_file, lead_file, account_file,
                       contact_file)

    # generate opportunities
    oppty_file = output_path + 'Opportunity.csv'
    oppty_gen.run(batch_id, oppty_file, oppty_file)

    # clean up accounts
    oppty_account_cleanup.run(account_file, account_file)

    # copy all files to the latest folder
    latest_output_path = definitions.oppty_latest_path

    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            latest_output_path):
        os.makedirs(latest_output_path)

    latest_oppty_shape_file = latest_output_path + 'OpportunityShape.csv'
    copy_data_file.run(oppty_shape_file, latest_oppty_shape_file)

    latest_oppty_file = latest_output_path + 'Opportunity.csv'
    copy_data_file.run(oppty_file, latest_oppty_file)

    latest_account_file = latest_output_path + 'Account.csv'
    copy_data_file.run(account_file, latest_account_file)

    latest_contact_file = latest_output_path + 'Contact.csv'
    copy_data_file.run(contact_file, latest_contact_file)

    latest_user_file = latest_output_path + 'User.csv'
    copy_data_file.run(user_file, latest_user_file)

    latest_manager_file = latest_output_path + 'Manager.csv'
    copy_data_file.run(manager_file, latest_manager_file)

    latest_forecasting_quota_file = latest_output_path + 'ForecastingQuota.csv'
    copy_data_file.run(forecasting_quota_file, latest_forecasting_quota_file)

    latest_forecasting_user_file = latest_output_path + 'ForecastingUser.csv'
    copy_data_file.run(forecasting_user_file, latest_forecasting_user_file)

    latest_quota_file = latest_output_path + 'Quota.csv'
    copy_data_file.run(quota_file, latest_quota_file)

    latest_case_file = latest_output_path + 'Case.csv'
    copy_data_file.run(case_file, latest_case_file)

    latest_product_file = latest_output_path + 'Product2.csv'
    copy_data_file.run(product_file, latest_product_file)

    latest_pricebook_file = latest_output_path + 'PricebookEntry.csv'
    copy_data_file.run(pricebook_file, latest_pricebook_file)

    latest_line_item_file = latest_output_path + 'OpportunityLineItem.csv'
    copy_data_file.run(line_item_file, latest_line_item_file)

    latest_history_file = latest_output_path + 'OpportunityHistory.csv'
    copy_data_file.run(history_file, latest_history_file)

    latest_event_file = latest_output_path + 'Event.csv'
    copy_data_file.run(event_file, latest_event_file)

    latest_task_file = latest_output_path + 'Task.csv'
    copy_data_file.run(task_file, latest_task_file)

    latest_lead_file = latest_output_path + 'Lead.csv'
    copy_data_file.run(lead_file, latest_lead_file)
예제 #2
0
def run():
    """Loads input CSV files for MFG and creates External Ids for all records. Then maps all rows in the files using this newly generated External Ids.

    This data will be loaded either from local disk or S3 depending on the READ_MODE
    environment variable. If READ_MODE=S3, it will load the dataset file using the
    load_dataset_from_s3 function, otherwise it will read from local disk using
    the load_dataset_from_s3 function.

    Parameters
    ----------
    No parameters needed. Everything is specified in mfg_linker.definitions file.

    Returns
    -------
    None
        Generates files in output folders.
    """

    today = date.today()
    today_datetime = datetime.combine(today, datetime.min.time())
    output_path = definitions.mfg_temporal_path.format(today.isoformat())

    configs = json.loads(file_to_string(definitions.config_file))
    files_list = configs.get('configs')

    batch_id = datetime.now().strftime("%Y%m%d%-H%M%S%f")

    # make output directory if it doesn't exist
    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            output_path):
        os.makedirs(output_path)

    out_files_list = []
    for f in files_list:
        # generate file
        source_file = definitions.mfg_source_path + f['mainInputFile']
        output_file = output_path + f['outputFile']
        mfg_file_gen.run(batch_id, source_file, output_file, f, today_datetime)
        out_files_list.append({
            'filePath': output_file,
            'fileName': f['outputFile']
        })

    # add status files
    out_files_list.append({
        'filePath': output_path + 'Contract.status.ALL.csv',
        'fileName': 'Contract.status.ALL.csv'
    })
    out_files_list.append({
        'filePath': output_path + 'Order.status.ALL.csv',
        'fileName': 'Order.status.ALL.csv'
    })
    out_files_list.append({
        'filePath': output_path + 'SalesAgreement.status.ALL.csv',
        'fileName': 'SalesAgreement.status.ALL.csv'
    })
    out_files_list.append({
        'filePath': output_path + 'SalesAgreement.status.APPROVED.csv',
        'fileName': 'SalesAgreement.status.APPROVED.csv'
    })
    out_files_list.append({
        'filePath': output_path + 'SalesAgreement.status.DISCARDED.csv',
        'fileName': 'SalesAgreement.status.DISCARDED.csv'
    })
    out_files_list.append({
        'filePath': output_path + 'SalesAgreement.status.CANCELLED.csv',
        'fileName': 'SalesAgreement.status.CANCELLED.csv'
    })
    out_files_list.append({
        'filePath': output_path + 'SalesAgreement.status.EXPIRED.csv',
        'fileName': 'SalesAgreement.status.EXPIRED.csv'
    })

    # copy all files to the latest folder
    latest_output_path = definitions.mfg_latest_path

    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            latest_output_path):
        os.makedirs(latest_output_path)

    for f in out_files_list:
        # first remove all Id columns
        mfg_file_gen.dropId(f['filePath'], f['filePath'])

        # then copy to 'latest'
        latest_data_file = latest_output_path + f['fileName']
        copy_data_file.run(f['filePath'], latest_data_file)
def run():
    data_gen = DataGenerator()
    today = date.today()
    today_datetime = datetime.combine(today, datetime.min.time())
    output_path = definitions.ss_temporal_path.format(today.isoformat())

    batch_id = datetime.now().strftime("%Y%m%d%-H%M%S%f")

    # make output directory if it doesn't exist
    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(output_path):
        os.makedirs(output_path)

    # generate opportunity shape
    oppty_shape_file = output_path + 'OpportunityShape.csv'
    oppty_shape_gen.run(batch_id, definitions.ss_source_oppty_shape, oppty_shape_file, today_datetime)

    # generate temporal opportunity shape
    oppty_file = output_path + 'Opportunity.csv'
    cutoff_date = today_datetime - timedelta(days=365 * 2)
    oppty_shape_gen.run(batch_id, oppty_shape_file, oppty_file, today_datetime,
                        lambda cv: dateutil.parser.parse(cv['CreatedDate__c']) >= cutoff_date)

    # generate accounts
    account_file = output_path + 'Account.csv'
    ss_account_gen.run(batch_id, oppty_file, account_file)

    # generate contacts
    contact_file = output_path + 'Contact.csv'
    ss_contact_gen.run(batch_id, account_file, contact_file)

    # generate products and pricebook entries
    product_file = output_path + 'Product2.csv'
    pricebook_file = output_path + 'PricebookEntry.csv'
    oppty_product_gen.run(batch_id, oppty_file, product_file, pricebook_file)

    # generate opportunity line items
    line_item_file = output_path + 'OpportunityLineItem.csv'
    oppty_line_item_gen.run(batch_id, oppty_file, line_item_file, product_file, pricebook_file)

    # generate opportunity history
    history_file = output_path + 'OpportunityHistory.csv'
    oppty_history_gen.run(batch_id, oppty_file, history_file, today_datetime)

    # generate tasks from opptys
    sales_tasks_file = output_path + 'SalesTask.csv'
    sales_task_gen.run(batch_id, oppty_file, sales_tasks_file, today_datetime)

    # generate events
    sales_event_file = output_path + 'SalesEvent.csv'
    sales_event_gen.run(batch_id, oppty_file, sales_event_file, today_datetime)

    # generate leads
    lead_file = output_path + 'Lead.csv'
    oppty_lead_gen.run(batch_id, oppty_file, lead_file, account_file, contact_file)

    # generate opportunities
    oppty_file = output_path + 'Opportunity.csv'
    oppty_gen.run(batch_id, oppty_file, oppty_file)

    # clean up accounts
    oppty_account_cleanup.run(account_file, account_file)

    # copy all files to the latest folder
    latest_output_path = definitions.ss_latest_path

    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(latest_output_path):
        os.makedirs(latest_output_path)

    latest_oppty_file = latest_output_path + 'Opportunity.csv'
    copy_data_file.run(oppty_file, latest_oppty_file)

    latest_account_file = latest_output_path + 'Account.csv'
    copy_data_file.run(account_file, latest_account_file)

    latest_contact_file = latest_output_path + 'Contact.csv'
    copy_data_file.run(contact_file, latest_contact_file)

    latest_product_file = latest_output_path + 'Product2.csv'
    copy_data_file.run(product_file, latest_product_file)

    latest_pricebook_file = latest_output_path + 'PricebookEntry.csv'
    copy_data_file.run(pricebook_file, latest_pricebook_file)

    latest_line_item_file = latest_output_path + 'OpportunityLineItem.csv'
    copy_data_file.run(line_item_file, latest_line_item_file)

    latest_history_file = latest_output_path + 'OpportunityHistory.csv'
    copy_data_file.run(history_file, latest_history_file)

    latest_sales_task_file = latest_output_path + 'SalesTask.csv'
    copy_data_file.run(sales_tasks_file, latest_sales_task_file)

    latest_sales_event_file = latest_output_path + 'SalesEvent.csv'
    copy_data_file.run(sales_event_file, latest_sales_event_file)

    latest_lead_file = latest_output_path + 'Lead.csv'
    copy_data_file.run(lead_file, latest_lead_file)

    #### section from service starts ####
    
    # generate case shape
    case_shape_file = output_path + 'CaseShape.csv'
    case_shape_gen.run(batch_id, definitions.ss_source_case_shape, case_shape_file, today_datetime)

    # generate case
    case_file = output_path + 'Case.csv'
    cutoff_date = today_datetime - timedelta(days=30 * 2)
    account_dataset = data_gen.load_dataset('account', account_file, ['External_Id__c'])
    account_ids = account_dataset.unique('External_Id__c')
    # we are going to use the same accounts that were generated for Opportunity
    ss_case_gen.run(batch_id, case_shape_file, case_file,
                        lambda cv: dateutil.parser.parse(cv['CreatedDate__c']) >= cutoff_date and cv['Account.External_Id__c'] in account_ids)

    # generate users
    user_file = output_path + 'User.csv'
    manager_file = output_path + 'Manager.csv'
    ss_user_gen.run(batch_id, case_file, user_file, manager_file)

    # generate user presence
    user_presence_file = output_path + 'UserServicePresence.csv'
    case_user_presence_gen.run(batch_id, user_file, user_presence_file)

    ###### sub section from Sales begins ######
    
    # generate forecasting quota
    forecasting_quota_file = output_path + 'ForecastingQuota.csv'
    oppty_forecasting_quota_gen.run(batch_id, user_file, forecasting_quota_file)

    # generate forecasting quota
    forecasting_user_file = output_path + 'ForecastingUser.csv'
    oppty_forecasting_user_gen.run(batch_id, user_file, forecasting_user_file)

    # generate quota
    quota_file = output_path + 'Quota.csv'
    oppty_quota_gen.run(batch_id, user_file, quota_file)
    
    ###### end of sub section from Sales ######

    # generate agent work
    agent_work_file = output_path + 'AgentWork.csv'
    agent_work_files = case_agent_work_gen.run(batch_id, case_file, agent_work_file, today_datetime)

    # generate tasks
    sales_tasks_dataset = data_gen.load_dataset('sales_tasks', sales_tasks_file, ['External_Id__c'])
    service_task_file = output_path + 'ServiceTask.csv'
    service_task_gen.run(batch_id, case_file, service_task_file, today_datetime, len(sales_tasks_dataset.data) + 1)

    # generate service tasks, from cases
    sales_events_dataset = data_gen.load_dataset('sales_events', sales_event_file, ['External_Id__c'])
    service_event_file = output_path + 'ServiceEvent.csv'
    service_event_gen.run(batch_id, case_file, service_event_file, today_datetime, len(sales_events_dataset.data) + 1)

    # generate case history
    history_file = output_path + 'CaseHistory.csv'
    case_history_gen.run(batch_id, case_file, history_file, today_datetime)

    # generate livechat transcripts
    livechat_file = output_path + 'LiveChatTranscript.csv'
    case_live_chat_gen.run(batch_id, case_file, livechat_file)

    livechat_events_file = output_path + 'LiveChatTranscriptEvent.csv'
    case_live_chat_event_gen.run(batch_id, livechat_file, livechat_events_file)

    # generate case articles
    
    article_file = output_path + 'CaseArticle.csv'
    case_article_gen.run(batch_id, case_file, article_file)

    ka_file = output_path + 'KCSArticle_ka.csv'
    case_knowledge_article_gen.run(batch_id, article_file, ka_file)

    kav_file = output_path + 'KCSArticle_kav.csv'
    case_knowledge_article_version_gen.run(batch_id, article_file, kav_file)

    ka_data_cat_file = output_path + 'KCSArticle_DataCategorySelection.csv'
    case_knowledge_article_data_cat_gen.run(batch_id, article_file, ka_data_cat_file)

    ka_votestat_file = output_path + 'KCSArticle_VoteStat.csv'
    case_knowledge_article_votestat_gen.run(batch_id, article_file, ka_votestat_file)

    ka_viewstat_file = output_path + 'KCSArticle_ViewStat.csv'
    case_knowledge_article_viewstat_gen.run(batch_id, article_file, ka_viewstat_file)

    # end of generate case articles

    # copy all files to the latest folder
    
    latest_case_file = latest_output_path + 'Case.csv'
    copy_data_file.run(case_file, latest_case_file)

    latest_user_file = latest_output_path + 'User.csv'
    copy_data_file.run(user_file, latest_user_file)

    latest_manager_file = latest_output_path + 'Manager.csv'
    copy_data_file.run(manager_file, latest_manager_file)

    latest_forecasting_quota_file = latest_output_path + 'ForecastingQuota.csv'
    copy_data_file.run(forecasting_quota_file, latest_forecasting_quota_file)

    latest_forecasting_user_file = latest_output_path + 'ForecastingUser.csv'
    copy_data_file.run(forecasting_user_file, latest_forecasting_user_file)

    latest_quota_file = latest_output_path + 'Quota.csv'
    copy_data_file.run(quota_file, latest_quota_file)

    latest_user_presence_file = latest_output_path + 'UserServicePresence.csv'
    copy_data_file.run(user_presence_file, latest_user_presence_file)

    for index, aw in enumerate(agent_work_files):
        latest_agent_work_file = latest_output_path + 'AgentWork-' + str(index) + '.csv'
        if index == 0:
            latest_agent_work_file = latest_output_path + 'AgentWork.csv'
        copy_data_file.run(aw, latest_agent_work_file)

    latest_service_task_file = latest_output_path + 'ServiceTask.csv'
    copy_data_file.run(service_task_file, latest_service_task_file)

    latest_service_event_file = latest_output_path + 'ServiceEvent.csv'
    copy_data_file.run(service_event_file, latest_service_event_file)

    latest_history_file = latest_output_path + 'CaseHistory.csv'
    copy_data_file.run(history_file, latest_history_file)

    latest_livechat_file = latest_output_path + 'LiveChatTranscript.csv'
    copy_data_file.run(livechat_file, latest_livechat_file)

    latest_livechat_events_file = latest_output_path + 'LiveChatTranscriptEvent.csv'
    copy_data_file.run(livechat_events_file, latest_livechat_events_file)

    latest_article_file = latest_output_path + 'CaseArticle.csv'
    copy_data_file.run(article_file, latest_article_file)

    latest_ka_file = latest_output_path + 'KCSArticle_ka.csv'
    copy_data_file.run(ka_file, latest_ka_file)

    latest_kav_file = latest_output_path + 'KCSArticle_kav.csv'
    copy_data_file.run(kav_file, latest_kav_file)

    latest_ka_data_cat_file = latest_output_path + 'KCSArticle_DataCategorySelection.csv'
    copy_data_file.run(ka_data_cat_file, latest_ka_data_cat_file)

    latest_ka_votestat_file = latest_output_path + 'KCSArticle_VoteStat.csv'
    copy_data_file.run(ka_votestat_file, latest_ka_votestat_file)

    latest_ka_viewstat_file = latest_output_path + 'KCSArticle_ViewStat.csv'
    copy_data_file.run(ka_viewstat_file, latest_ka_viewstat_file)
예제 #4
0
def run():
    today = date.today()
    today_datetime = datetime.combine(today, datetime.min.time())
    source_path = 'fsl/data/input/'
    output_path = 'fsl/data/output/archive/{}/'.format(today.isoformat())

    batch_id = datetime.now().strftime("%Y%m%d%-H%M%S%f")

    # make output directory if it doesn't exist
    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            output_path):
        os.makedirs(output_path)

    account_file = 'Account.csv'
    assigned_resource_file = 'AssignedResource.csv'
    case_file = 'Case.csv'
    operating_hours_file = 'OperatingHours.csv'
    pricebook_entry_file = 'PricebookEntry.csv'
    product_file = 'Product2.csv'
    product_consumed_file = 'ProductConsumed.csv'
    resource_absence_file = 'ResourceAbsence.csv'
    service_appointment_file = 'ServiceAppointment.csv'
    service_resource_file = 'ServiceResource.csv'
    service_territory_file = 'ServiceTerritory.csv'
    time_slot_file = 'TimeSlot.csv'
    user_file = 'User.csv'
    work_order_file = 'WorkOrder.csv'
    work_type_file = 'WorkType.csv'
    profile_file = 'Profile.csv'

    # add external ids to source files
    add_external_id.run(source_path + account_file, 'Account',
                        output_path + account_file)
    add_external_id.run(source_path + assigned_resource_file,
                        'AssignedResource',
                        output_path + assigned_resource_file)
    add_external_id.run(source_path + case_file, 'Case',
                        output_path + case_file)
    add_external_id.run(source_path + operating_hours_file, 'OperatingHours',
                        output_path + operating_hours_file)
    add_external_id.run(source_path + pricebook_entry_file, 'PricebookEntry',
                        output_path + pricebook_entry_file)
    add_external_id.run(source_path + product_file, 'Product',
                        output_path + product_file)
    add_external_id.run(source_path + product_consumed_file,
                        'W_FSL_ProductConsumed',
                        output_path + product_consumed_file)
    add_external_id.run(source_path + resource_absence_file, 'ResourceAbsence',
                        output_path + resource_absence_file)
    add_external_id.run(source_path + service_appointment_file,
                        'ServiceAppointment',
                        output_path + service_appointment_file)
    add_external_id.run(source_path + service_resource_file, 'ServiceResource',
                        output_path + service_resource_file)
    add_external_id.run(source_path + service_territory_file,
                        'ServiceTerritory',
                        output_path + service_territory_file)
    add_external_id.run(source_path + time_slot_file, 'TimeSlot',
                        output_path + time_slot_file)
    add_external_id.run(source_path + user_file, 'User',
                        output_path + user_file)
    add_external_id.run(source_path + work_order_file, 'WorkOrder',
                        output_path + work_order_file)
    add_external_id.run(source_path + work_type_file, 'WorkType',
                        output_path + work_type_file)

    # generate product consumed
    product_consumed_gen.run(batch_id, output_path + product_consumed_file,
                             output_path + product_consumed_file,
                             output_path + pricebook_entry_file,
                             output_path + work_order_file)

    # generate assigned resources
    assigned_resource_gen.run(batch_id, output_path + assigned_resource_file,
                              output_path + assigned_resource_file,
                              output_path + service_resource_file,
                              output_path + service_appointment_file)

    # generate service appointments
    delta = service_appointment_gen.run(
        batch_id, output_path + service_appointment_file,
        output_path + service_appointment_file, output_path + account_file,
        output_path + service_resource_file,
        output_path + service_territory_file, output_path + work_order_file,
        today_datetime)

    assigned_resource_gen.updateCreatedDate(
        output_path + assigned_resource_file,
        output_path + assigned_resource_file,
        output_path + service_appointment_file, today_datetime)

    # generate resource absences
    resource_absence_gen.run(batch_id, output_path + resource_absence_file,
                             output_path + resource_absence_file,
                             output_path + service_resource_file, delta)

    # generate work orders
    work_order_gen.run(batch_id, output_path + work_order_file,
                       output_path + work_order_file, output_path + case_file,
                       output_path + account_file,
                       output_path + work_type_file,
                       output_path + service_appointment_file, today_datetime)

    # generate time slots
    #time_slot_gen.run(batch_id, output_path + time_slot_file, output_path + time_slot_file,
    #                  output_path + operating_hours_file, today_datetime)

    # generate service resources
    service_resource_gen.run(batch_id, output_path + service_resource_file,
                             output_path + service_resource_file,
                             output_path + user_file)

    # generate cases
    case_gen.run(batch_id, output_path + case_file, output_path + case_file,
                 output_path + account_file)

    # generate accounts
    account_gen.run(batch_id, output_path + account_file,
                    output_path + account_file)

    # generate work types
    work_type_gen.run(batch_id, output_path + work_type_file,
                      output_path + work_type_file)

    # generate users
    user_gen.run(batch_id, output_path + user_file, output_path + user_file,
                 source_path + profile_file)

    # generate service territories
    #service_territory_gen.run(batch_id, output_path + service_territory_file, output_path + service_territory_file,
    #                          output_path + operating_hours_file)

    # generate operating hours
    #operating_hours_gen.run(batch_id, output_path + operating_hours_file, output_path + operating_hours_file)

    # generate pricebook entries
    pricebook_entry_gen.run(batch_id, output_path + pricebook_entry_file,
                            output_path + pricebook_entry_file,
                            output_path + product_file)

    # generate product
    product_gen.run(batch_id, output_path + product_file,
                    output_path + product_file)

    product_consumed_gen.update(output_path + product_consumed_file,
                                output_path + product_consumed_file,
                                output_path + work_order_file)

    # copy all files to the latest folder
    latest_output_path = 'fsl/data/output/latest/'

    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            latest_output_path):
        os.makedirs(latest_output_path)

    copy_data_file.run(output_path + product_consumed_file,
                       latest_output_path + product_consumed_file)
    copy_data_file.run(output_path + assigned_resource_file,
                       latest_output_path + assigned_resource_file)
    copy_data_file.run(output_path + service_appointment_file,
                       latest_output_path + service_appointment_file)
    copy_data_file.run(output_path + resource_absence_file,
                       latest_output_path + resource_absence_file)
    copy_data_file.run(output_path + work_order_file,
                       latest_output_path + work_order_file)
    #copy_data_file.run(output_path + time_slot_file, latest_output_path + time_slot_file)
    copy_data_file.run(output_path + service_resource_file,
                       latest_output_path + service_resource_file)
    copy_data_file.run(output_path + case_file, latest_output_path + case_file)
    copy_data_file.run(output_path + account_file,
                       latest_output_path + account_file)
    copy_data_file.run(output_path + work_type_file,
                       latest_output_path + work_type_file)
    copy_data_file.run(output_path + user_file, latest_output_path + user_file)
    #copy_data_file.run(output_path + service_territory_file, latest_output_path + service_territory_file)
    #copy_data_file.run(output_path + operating_hours_file, latest_output_path + operating_hours_file)
    copy_data_file.run(output_path + pricebook_entry_file,
                       latest_output_path + pricebook_entry_file)
    copy_data_file.run(output_path + product_file,
                       latest_output_path + product_file)
예제 #5
0
def run():
    today = date.today()
    today_datetime = datetime.combine(today, datetime.min.time())
    output_path = definitions.case_temporal_path.format(today.isoformat())

    batch_id = datetime.now().strftime("%Y%m%d%-H%M%S%f")

    # make output directory if it doesn't exist
    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            output_path):
        os.makedirs(output_path)

    # generate case shape
    case_shape_file = output_path + 'CaseShape.csv'
    case_shape_gen.run(batch_id, definitions.source_case_shape,
                       case_shape_file, today_datetime)

    # generate case
    case_file = output_path + 'Case.csv'
    cutoff_date = today_datetime - timedelta(days=30 * 2)
    case_gen.run(
        batch_id, case_shape_file, case_file,
        lambda cv: dateutil.parser.parse(cv['CreatedDate__c']) >= cutoff_date)

    # generate accounts
    account_file = output_path + 'Account.csv'
    case_account_gen.run(batch_id, case_file, account_file)

    # generate contacts
    contact_file = output_path + 'Contact.csv'
    case_contact_gen.run(batch_id, account_file, contact_file)

    # generate users
    user_file = output_path + 'User.csv'
    manager_file = output_path + 'Manager.csv'
    case_user_gen.run(batch_id, case_file, user_file, manager_file)

    # generate user presence
    user_presence_file = output_path + 'UserServicePresence.csv'
    case_user_presence_gen.run(batch_id, user_file, user_presence_file)

    # generate agent work
    agent_work_file = output_path + 'AgentWork.csv'
    agent_work_files = case_agent_work_gen.run(batch_id, case_file,
                                               agent_work_file, today_datetime)

    # generate opportunities
    oppty_file = output_path + "Opportunity.csv"
    case_oppty_gen.run(batch_id, account_file, oppty_file, case_shape_file)

    # generate events
    event_file = output_path + 'Event.csv'
    case_event_gen.run(batch_id, case_file, event_file, today_datetime)

    # generate tasks
    task_file = output_path + 'Task.csv'
    case_task_gen.run(batch_id, case_file, task_file, today_datetime)

    # generate case history
    history_file = output_path + 'CaseHistory.csv'
    case_history_gen.run(batch_id, case_file, history_file, today_datetime)

    # generate livechat transcripts
    livechat_file = output_path + 'LiveChatTranscript.csv'
    case_live_chat_gen.run(batch_id, case_file, livechat_file)

    livechat_events_file = output_path + 'LiveChatTranscriptEvent.csv'
    case_live_chat_event_gen.run(batch_id, livechat_file, livechat_events_file)

    # generate case articles
    article_file = output_path + 'CaseArticle.csv'
    case_article_gen.run(batch_id, case_file, article_file)

    ka_file = output_path + 'KCSArticle_ka.csv'
    case_knowledge_article_gen.run(batch_id, article_file, ka_file)

    kav_file = output_path + 'KCSArticle_kav.csv'
    case_knowledge_article_version_gen.run(batch_id, article_file, kav_file)

    ka_data_cat_file = output_path + 'KCSArticle_DataCategorySelection.csv'
    case_knowledge_article_data_cat_gen.run(batch_id, article_file,
                                            ka_data_cat_file)

    ka_votestat_file = output_path + 'KCSArticle_VoteStat.csv'
    case_knowledge_article_votestat_gen.run(batch_id, article_file,
                                            ka_votestat_file)

    ka_viewstat_file = output_path + 'KCSArticle_ViewStat.csv'
    case_knowledge_article_viewstat_gen.run(batch_id, article_file,
                                            ka_viewstat_file)

    # copy all files to the latest folder
    latest_output_path = definitions.case_latest_path

    if os.environ.get('WRITE_MODE') != 'S3' and not os.path.exists(
            latest_output_path):
        os.makedirs(latest_output_path)

    latest_case_shape_file = latest_output_path + 'CaseShape.csv'
    copy_data_file.run(case_shape_file, latest_case_shape_file)

    latest_case_file = latest_output_path + 'Case.csv'
    copy_data_file.run(case_file, latest_case_file)

    latest_account_file = latest_output_path + 'Account.csv'
    copy_data_file.run(account_file, latest_account_file)

    latest_contact_file = latest_output_path + 'Contact.csv'
    copy_data_file.run(contact_file, latest_contact_file)

    latest_user_file = latest_output_path + 'User.csv'
    copy_data_file.run(user_file, latest_user_file)

    latest_manager_file = latest_output_path + 'Manager.csv'
    copy_data_file.run(manager_file, latest_manager_file)

    latest_user_presence_file = latest_output_path + 'UserServicePresence.csv'
    copy_data_file.run(user_presence_file, latest_user_presence_file)

    for index, aw in enumerate(agent_work_files):
        latest_agent_work_file = latest_output_path + 'AgentWork-' + str(
            index) + '.csv'
        if index == 0:
            latest_agent_work_file = latest_output_path + 'AgentWork.csv'
        copy_data_file.run(aw, latest_agent_work_file)

    latest_oppty_file = latest_output_path + "Opportunity.csv"
    copy_data_file.run(oppty_file, latest_oppty_file)

    latest_event_file = latest_output_path + 'Event.csv'
    copy_data_file.run(event_file, latest_event_file)

    latest_task_file = latest_output_path + 'Task.csv'
    copy_data_file.run(task_file, latest_task_file)

    latest_history_file = latest_output_path + 'CaseHistory.csv'
    copy_data_file.run(history_file, latest_history_file)

    latest_livechat_file = latest_output_path + 'LiveChatTranscript.csv'
    copy_data_file.run(livechat_file, latest_livechat_file)

    latest_livechat_events_file = latest_output_path + 'LiveChatTranscriptEvent.csv'
    copy_data_file.run(livechat_events_file, latest_livechat_events_file)

    latest_article_file = latest_output_path + 'CaseArticle.csv'
    copy_data_file.run(article_file, latest_article_file)

    latest_ka_file = latest_output_path + 'KCSArticle_ka.csv'
    copy_data_file.run(ka_file, latest_ka_file)

    latest_kav_file = latest_output_path + 'KCSArticle_kav.csv'
    copy_data_file.run(kav_file, latest_kav_file)

    latest_ka_data_cat_file = latest_output_path + 'KCSArticle_DataCategorySelection.csv'
    copy_data_file.run(ka_data_cat_file, latest_ka_data_cat_file)

    latest_ka_votestat_file = latest_output_path + 'KCSArticle_VoteStat.csv'
    copy_data_file.run(ka_votestat_file, latest_ka_votestat_file)

    latest_ka_viewstat_file = latest_output_path + 'KCSArticle_ViewStat.csv'
    copy_data_file.run(ka_viewstat_file, latest_ka_viewstat_file)