def main():
    sf.default_mode = 'bulk'
    exp_record_type_id = sf.get_recordtype_map()[('LedgerEntry__c',
                                                  'Expenses')].Id
    records = sf.select("""
    SELECT Id, GLCode__c, RecordTypeId, RecordType.Name, HighLevelType__c
    FROM LedgerEntry__c
    WHERE GLCode__c = '50282'
    AND (RecordType.Name != 'Expenses' OR HighLevelType__c != 'Expenses')
    """,
                        return_type='generator')

    def updated_records():
        for record in records:
            snapshot = record.copy()
            record['RecordTypeId'] = exp_record_type_id
            record['HighLevelType__c'] = 'Expenses'
            if snapshot != record:
                yield record

    updated = list(updated_records())

    prompt('Ready?')

    with sf.bypass_settings():
        sf.update(updated)

    return
Example #2
0
    def clone_user(self, source_sf, target_sf, user, inactive_email=True):
        source_id = user['Id']
        del user['Id']
        if inactive_email and not user['Email'].endswith('.inactive'):
            user['Email'] = user['Email'] + '.inactive'
        elif not inactive_email and user['Email'].endswith('.inactive'):
            user['Email'] = user['Email'].replace('.inactive', '')
        
        old_suffix = '.' + source_sf.instance
        new_suffix = '.' + target_sf.instance
        if user['Username'].endswith(old_suffix):
            user['Username'] = user['Username'].replace(old_suffix, new_suffix)
        if user['SourceSystemId__c'] in ('', None):
            user['SourceSystemId__c'] = prompt(f'Set SourceSystemId for user: {user["Username"]}')
            if prompt('Update Source system?', boolean=True):
                source_sf.update([{'Id': source_id, 'SourceSystemId__c': user['SourceSystemId__c']}])
        
        source_object_desc = source_sf.get_object_description('User')
        target_object_desc = target_sf.get_object_description('User')
        target_fields_map = {item.name: item for item in target_object_desc.fields}

        source_fields = {item.name for item in source_object_desc.fields
            if (item.name in target_fields_map
                and target_fields_map[item.name]["createable"] == True
                and (target_fields_map[item.name]["updateable"] == True 
                    or (target_fields_map[item.name]["nillable"] == False and target_fields_map[item.name]["defaultedOnCreate"] == False))
                )
            or item.name == "Id"
        }
        user = {k:v for k,v in user.items() if k in source_fields}
        permission_sets = source_sf.select("SELECT *, PermissionSet.Label FROM PermissionSetAssignment WHERE AssigneeId IN @source_id", return_type='flat')
        perm_labels = [item['PermissionSet.Label'] for item in permission_sets]
        target_permission_set_map = {
            item.Label: item.Id
            for item in
            target_sf.select("SELECT Id, Label FROM PermissionSet WHERE Label IN @perm_labels")
        }
        for item in permission_sets:
            item['PermissionSetId'] = target_permission_set_map[item['PermissionSet.Label']]
        groups = source_sf.select("SELECT * FROM GroupMember WHERE UserOrGroupId IN @source_id", return_type='flat')


        new_user = target_sf.upsert('User', [user], 'SourceSystemId__c')[0]
        new_user_id = new_user['Id']
        for item in permission_sets:
            item['AssigneeId'] = new_user_id
        for item in groups:
            item['GroupOrUserId'] = new_user_id

        if new_user.sf_operation == 'updated':
            self.sf.delete(self.sf.select("SELECT Id FROM GroupMember WHERE UserOrGroupId = @new_user_id"))
            self.sf.delete(self.sf.select("SELECT Id FROM PermissionSetAssignment WHERE AssigneeId = @new_user_id"))

        target_sf.insert('PermissionSetAssignment', permission_sets)
        target_sf.insert('GroupMember', groups)
        return new_user
Example #3
0
def main(sessions, do_fix=False):
    import functions.pandas_helpers as pdh
    from functions.prompt import prompt
    from classes.py_threading import ThreadPool
    threading = ThreadPool()
    sessions = {
        username: sf
        for username, sf in sessions.items() if username in instances_to_run
    }
    return_string = ""

    objects = [
        'EventDateTime__c', 'Deal__c', 'TicketScale__c', 'Deduction__c',
        'LedgerEntry__c'
    ]
    if do_fix is True and prompt("Delete orphan records?",
                                 boolean=True) is False:
        do_fix = False

    @threading.run_async
    def inner(sf):
        if do_fix:
            sf.bypass_prod_operation_approval()
        results = {}
        for obj in objects:
            obj_fields = {
                f.name: f
                for f in sf.get_object_description(obj).fields
            }
            fields = ['Name', 'EventName__c']
            fields = [f for f in fields if f in obj_fields]
            fields_str = ', '.join(fields)
            query = f"""
            SELECT Id, {fields_str}, Event__c, CreatedBy.Name, CreatedDate, LastModifiedBy.Name, LastModifiedDate
            FROM {obj}
            WHERE IsTouringApp__c = True
            AND Event__c = NULL
            AND CreatedDate >= THIS_YEAR
            """
            results[obj] = threading.new(sf.select, query, mode='bulk')
        for obj in objects:
            records = results[obj].result()
            if len(records) > 0:
                if do_fix:
                    sf.add_bypass_settings()
                    sf.delete(records, mode='bulk')
                    sf.remove_bypass_settings()
        return results

    # Run for each Salesforce instance
    results = {sf.instance: inner(sf) for sf in sessions.values()}
    for sf in sessions.values():
        result = results[sf.instance].result()
        for obj in objects:
            records = result[obj].result()
            return_string += f"\n{sf.instance.upper()} has {len(records)} {obj} orphaned records with IsTouringApp__c == True"

        pdh.to_excel(results[sf.instance],
                     f'({sf.instance}) Orphaned Event Child Records.xlsx')
    return return_string
Example #4
0
 def sosl_prompt(self, query, object_name, search_in_fields=None, return_fields=None):
     records = self.sf.sosl(query, object_name, search_in_fields, return_fields)
     selected_record_ids = []
     if len(records) == 1:
         selected_record_ids.append(records[0].Id)
     elif len(records) > 1:
         options = {record['Name']: record for record in records[0:9]}
         options['None of the above'] = None
         record_to_add = prompt(f'SOSL Search performed for "{query}". Which record would you like to clone?', options=options)
         if record_to_add is not None:
             selected_record_ids.append(record_to_add.Id)
     return selected_record_ids
def main():

    issues = sf.select("""
    select Event__c, Type__c, count(Id) cnt, sum(OfferRate__c) OfferRate, sum(InHouseRate__c) InHouseRate
    from ledgerentrybreakout__c
    where type__c IN('TSO','TSO Adjustment')
    and event__r.istouringapp__c = true
    and createddate >= LAST_YEAR
    and Event__c != NULL
    group by event__c, type__c
    having count(id) > 1
    """)

    records = sf.select("""
    SELECT Id, Event__r.TourLeg__r.Tour__c, Event__c, Event__r.EventTitle__c, Type__c, OfferRate__c, InHouseRate__c, CreatedDate
    FROM LedgerEntryBreakout__c
    WHERE Event__c IN @issues.Event__c
    AND Event__r.IsTouringApp__c = True
    AND type__c IN('TSO','TSO Adjustment')
    and createddate >= LAST_YEAR
    and Event__c != NULL
    ORDER BY Event__c, CreatedDate ASC
    """,
                        return_type='flat')

    to_update = []
    to_delete = []
    for issue in issues:
        evt = issue['Event__c']
        typ = issue['Type__c']
        evt_records = [
            i for i in records if i['Event__c'] == evt and i['Type__c'] == typ
        ]
        first = evt_records[0].copy()
        remaining = evt_records[1:]
        first['PriorOfferRate'] = first['OfferRate__c']
        first['PriorInHouseRate'] = first['InHouseRate__c']
        first['OfferRate__c'] = issue['OfferRate']
        first['InHouseRate__c'] = issue['InHouseRate']
        to_update.append(first)
        to_delete.extend(remaining)

    pdh.to_excel(
        {
            'Issues': issues,
            'Rome Records': records,
            'To Update': to_update,
            'To Delete': to_delete,
        }, 'TSO Dupes')
    if (to_update or to_delete) and prompt('Ready?', boolean=True):
        sf.delete(to_delete, batch_size=5)
        sf.update(to_update, batch_size=5)
    return
Example #6
0
def main():
    # Picklist Error handling: {'statusCode': 'INVALID_OR_NULL_FOR_RESTRICTED_PICKLIST', 'message': 'Language: bad value for restricted picklist field: en_AU', 'fields': ['LanguageLocaleKey']}
    usernames = [
        # "[email protected]",
        # "[email protected]",
        # "[email protected]",
        # "*****@*****.**",
        # "*****@*****.**",
        "*****@*****.**",
        # "*****@*****.**",
    ]
    sessions = [Salesforce_API(item) for item in usernames if item is not None]
    # users = pd.read_excel('/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/Import Sandbox Test Users.xlsx').query("Filter == True")
    # users = pd.read_excel('/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/Australia Master Data/PROD Release Users.xlsx').query("Filter == True")
    users = pd.read_excel(
        '/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/UK Master Data/Rome UK Users.xlsx'
    ).query("Filter == True")
    if prompt("Look up EOS Ids for these users?", boolean=True):
        sql = SQL_Server_API(sql_creds)
        emails_str = "', '".join(
            users['Email'].str.strip().str.lower().tolist())
        eos_users = (pd.DataFrame(
            sql.query(
                f"SELECT Id, Name, Email FROM IEmsUser WHERE Email IS NULL OR LOWER(Email) IN ('{emails_str}')"
            )).assign(Email=lambda df: df['Email'].str.strip().str.lower()))
        eos_email_map = eos_users.dropna(
            subset=['Email']).set_index('Email').to_dict('index')
        eos_name_map = eos_users.fillna('').query(
            "Email == ''").drop_duplicates(
                subset=['Name'], keep=False).set_index('Name').to_dict('index')

        def get_eos_id(row):
            email, name = str(row['Email']).strip().lower(), str(
                row['Name']).strip()
            matching_record = eos_email_map.get(email,
                                                eos_name_map.get(name, None))
            if matching_record:
                return matching_record['Id']
            return None

        if len(eos_users) > 0:
            users['EOSId__c'] = users.apply(get_eos_id, axis=1)
    for session in sessions:
        session.create_users(
            users,
            defaults={
                # 'Country': 'United Kingdom'
                # , 'DEFAULTCURRENCYISOCODE': 'GBP'
                # , 'TIMEZONESIDKEY': 'GMT'
            })
Example #7
0
 def set_sourcesystemid(row, counter=None):
     val = '{}{}'.format(row.FirstName[0], row.LastName).replace(' ','').replace("'",'') + ('' if counter is None else str(counter))
     if val in usermap_sourcesystemid:
         sf_email = usermap_sourcesystemid[val].Email
         if sf_email == row.Email or sf_email.replace('.invalid','') == row.Email.replace('.invalid','') or sf_email.replace(f'.{self.sf.instance}','') == row.Email.replace(f'.{self.sf.instance}',''):
             return val
         else:
             resp = prompt(f'({self.sf.instance}) For user {row.FirstName} {row.LastName}, found a User record with matching SourceSystemId__c, but a different Email. Should this be matched?\nFile:       {row.Email}\nSalesforce: {sf_email}', boolean=True)
             if resp:
                 return val
             else:
                 return set_sourcesystemid(row, (2 if counter is None else counter + 1))
     else:
         return val
def find_venue_dupes():
    rome_venues = sf.select("""
    SELECT Id, EOSId__c, Name, VenueName__c, BillingCity
    FROM Account
    WHERE EOSId__c <> NULL
    AND RecordType.Name = 'Venue'
    """,
                            mode='bulk')
    eos_venues = sql.query("""
    SELECT 'Venue-' + CAST(Id AS VARCHAR) AS EOSId__c, Name
    FROM Venue
    """)

    eos_venues_by_name = {item['Name']: item for item in eos_venues}

    issues = []
    for venue in rome_venues:
        issue1 = eos_venues_by_name.get(venue.VenueName__c, None)
        issue2 = eos_venues_by_name.get(venue.Name, None)
        if issue1 is not None and issue1['EOSId__c'] != venue['EOSId__c']:
            issues.append({
                'RomeId': venue.Id,
                'RomeLink': f'https://{sf.simple.sf_instance}/{venue.Id}',
                'Name': venue.Name,
                'CurrRomeEOSId': venue.EOSId__c,
                'ChangeRomeEOSIdTo': issue1['EOSId__c'],
            })
        if issue2 is not None and issue2['EOSId__c'] != venue['EOSId__c']:
            issues.append({
                'RomeId': venue.Id,
                'RomeLink': f'https://{sf.simple.sf_instance}/{venue.Id}',
                'Name': venue.Name,
                'CurrRomeEOSId': venue.EOSId__c,
                'ChangeRomeEOSIdTo': issue2['EOSId__c'],
            })
    if issues:
        pdh.to_excel(issues, f'{sf.instance} Rome EOS Venue Issues.xlsx')
    do_fix = len(issues) > 0 and prompt('Fix the venues?', boolean=True)
    if do_fix:
        update_records = pd.DataFrame(issues).rename(
            columns={
                'RomeId': 'Id',
                'ChangeRomeEOSIdTo': 'EOSId__c'
            }).drop(columns=['Name'])
        sf.update(update_records)
    return
Example #9
0
def main():
    # a = Cache.set('abc', {'a':1})
    # b = Cache.set(('abc', 123), {'a':1})

    def func_value():
        print('waiting...')
        time.sleep(5)
        print('done!')
        return 12

    # c = Cache.define('defg', func_value, calc_on_func_change=func_value)
    d = Cache.define(
        'defg',
        func_value,
        calc_if_true=lambda: prompt('Do you want to re-calculate the value?',
                                    boolean=True))

    return
Example #10
0
def main(can_terminate=False):
    folders = get_folder_info()
    # deployment_folders = list(folders.keys()) + (['Exit'] if can_terminate else [])
    folder_options = {
        f'{f.name} ({len(f.scripts)})': f
        for f in folders.values()
    }
    if can_terminate:
        folder_options['Exit'] = 'Exit'
    while True:
        folder = prompt(
            'Please select the script set you would like to execute:',
            options=folder_options)
        if folder == 'Exit': break
        username_group = select_username_group()
        for script in folder.scripts:
            console.new_line(f'Executing {script.name}...')
            run_job(script.path, script.task, username_group)
def venue_aa_code_update():

    staged = sf.fupdate(
        f"""
    UPDATE Event__c
    SET EvenkoAACode__c = Venue__r.EvenkoAACodePrefix__c + format_date_yymmdd(EventFirstDate__c) + IF(
            Venue__r.EvenkoAACodePrefix__c == 'WE'
            , REPLACE(Venue__r.LNEMDID__c, 'AC', '')
            , '')
    WHERE Office__r.Division__c = 'Evenko'
    AND EvenkoAACode__c = NULL
    AND EventFirstDate__c != NULL
    AND Status__c = 'Confirmed'
    AND Venue__r.EvenkoAACodePrefix__c != NULL
    """,
        show_columns=['EventTitle__c', 'EventFirstDate__c', 'OfficeName__c'])

    pdh.to_excel(staged, 'Staged Evenko updates')

    if prompt('Update?', boolean=True):
        with sf.bypass_settings():
            sf.update(staged)
def main():
    
    # clone_emails = [
    #     '*****@*****.**',
    #     '*****@*****.**',
    #     '*****@*****.**',
    # ]
    # users = src.select("""
    # SELECT *.writable 
    # FROM User 
    # WHERE Email IN @clone_emails
    # """, return_type='flat')

    users = src.select("""
    SELECT *.writable 
    FROM User
    WHERE CreatedDate = TODAY
    -- WHERE UserRole.Name = 'System Administration'
    -- AND (Name LIKE '%Monica Serrato%'
    -- OR Name LIKE '%Diaz%'
    -- OR Name LIKE '%Oscar%'
    -- OR Name LIKE '%Carlos%'
    -- )
    """, return_type='flat')

    new_users = []
    for user in users:
        new_user = tar.clone_user(src, tar, user, inactive_email=False)
        new_users.append(new_user)
    
    if prompt('Reset user passwords?', boolean=True):
        for user in new_user:
            tar.reset_user_password(user)
            

    
    return
def select_username_group(username_group=None):
    if not username_group:
        username_group = prompt('Which sandbox group should be deployed to?',
                                options=list(username_groups.keys()))
    return username_group
def main():

    sf = Salesforce_API('*****@*****.**')

    events = sf.select(f"""
    SELECT Id, TourLeg__r.Tour__r.TourHeadliner__c, TourLeg__r.Tour__r.CurrencyIsoCode, CurrencyIsoCode, TourExchangeRate__c, RecordType.Name, EventTitle__c, PooledHeadlinerInclusiveGuarantees__c
    FROM Event__c
    WHERE IsTouringApp__c = True
    AND TourLeg__r.Tour__r.RecordType.Name = 'Booking'
    AND RecordType.Name = 'Sell Off'
    """,
                       return_type='dataframe')
    existing_deals = sf.select(f"""
    SELECT Id, Event__c, RecordType.Name, Artist__c, CurrencyIsoCode, DealExchangeRate__c, RecordType.Name, Type__c, DealType__c, DealCalculation__c, GuaranteeAmount__c
    FROM Deal__c
    WHERE Event__r.IsTouringApp__c = True
    AND Event__r.TourLeg__r.Tour__r.RecordType.Name = 'Booking'
    AND Event__r.RecordType.Name = 'Sell Off'
    """,
                               return_type='dataframe')

    new_deals = (
        events[[
            'Id', 'PooledHeadlinerInclusiveGuarantees__c',
            'TourLeg__r.Tour__r.CurrencyIsoCode', 'CurrencyIsoCode',
            'TourExchangeRate__c', 'TourLeg__r.Tour__r.TourHeadliner__c'
        ]].rename(
            columns={
                'Id': 'Event__c',
                # 'PooledHeadlinerInclusiveGuarantees__c': 'GuaranteeAmount__c',
                'CurrencyIsoCode': 'EventCurrencyIsoCode',
                'TourLeg__r.Tour__r.CurrencyIsoCode': 'CurrencyIsoCode',
                'TourExchangeRate__c': 'EventTourExchangeRate__c',
                'TourLeg__r.Tour__r.TourHeadliner__c': 'Artist__c',
            }).assign(
                **{
                    'RecordType.Name': 'Artist',
                    'Type__c': 'Primary Headliner',
                    'DealType__c': 'Flat',
                    'DealCalculation__c': 'Guarantee Only',
                }))
    new_deals[
        'DealExchangeRate__c'] = 1 / new_deals['EventTourExchangeRate__c']
    new_deals['GuaranteeAmount__c'] = (
        new_deals['PooledHeadlinerInclusiveGuarantees__c'] /
        new_deals['DealExchangeRate__c']).astype(float).round(2)
    new_deals['RecordTypeId'] = '01236000000nyfQAAQ'

    if len(existing_deals) > 0:
        diff = pdh.diff(
            existing_deals,
            new_deals,
            on=['Event__c'],
        )
        # changed = new_deals[(new_deals['Event__c'] in diff['Event__c'].tolist())]
        print('')
        # merged = existing_deals.merge(new_deals, on='Event__c', indicator=True)

    # diff = pdh.diff()

    pdh.to_excel(
        {
            'eventsbackup': events,
            'existing_deals': existing_deals,
            'new_deals': new_deals
            # , 'merged': merged
        },
        f'({sf.instance}) Inspect Artist Deals Backfill.xlsx')

    if prompt("Update?", boolean=True):
        sf.add_bypass_settings()
        sf.insert('Deal__c', new_deals)
        sf.remove_bypass_settings()

    return
def main(sessions, do_fix=False):
    first_session = sessions[0]
    for s in sessions:
        s.print_messages = False
    session = first_session

    files_list = [
        item for item in listdir("./resources/sf_update_files")
        if isdir("./resources/sf_update_files/" + item) == False
        and item != ".DS_Store" and not item.startswith("~$")
    ]
    files_list = sorted(files_list,
                        key=lambda item: -(path.getmtime(
                            "./resources/sf_update_files/" + item)))

    selected_file = prompt('\nThe following update files are available:',
                           files_list)

    rows = []
    # object_name = lastSettings["objectName"] if file_selection_input == "0" else None
    data_sheet_name = None

    # Settings defaults
    settings = ObjDict({
        "BATCH_SIZE": 2000,
        "OPERATION": None,
        "DO_UPSERT": None  # Should be deprecated... use OPERATION instead
        ,
        "BYPASS_AUTOMATION": None,
        "EXT_ID": None,
        "PARALLEL_CONCURRENCY": True,
        "BULK_API_MODE": True
    })

    # Get data and settings from file
    if selected_file.endswith(".csv"):
        with open("./resources/sf_update_files/" + selected_file,
                  'r',
                  encoding='utf-8-sig') as file:
            reader = csv.DictReader(file)
            for row in reader:
                rows.append(row)
    elif selected_file.endswith(".xlsx"):
        file_path = './resources/sf_update_files/{}'.format(selected_file)
        xlsx_file = pd.ExcelFile(file_path)
        sheets = xlsx_file.sheet_names
        data_sheet_name = sheets[0]
        datadf = pd.read_excel(xlsx_file, data_sheet_name)
        settingsdf = pd.read_excel(
            xlsx_file, 'Settings') if 'Settings' in sheets else None

        # Fill nulls with blank strings
        datadf = datadf.fillna(value='')
        # Set column names to strings
        datadf.columns = datadf.columns.astype(str)
        # Set timestamp columns to string
        for col in datadf.select_dtypes(include=['datetime64']).columns.values:
            datadf[col] = datadf[col].astype(str)
        # Set numeric columns to zero-trimmed string
        for col in datadf.select_dtypes(
                include=['int64', 'float64']).columns.values:
            datadf[col] = datadf[col].astype(float).astype(str)
            datadf[col] = datadf[col].str.replace('.0', '', regex=False)

        rows = datadf.to_dict('records')

        if settingsdf is not None:
            inputsettings = settingsdf.set_index('Field').to_dict('index')
            inputsettings = {
                key: val['Value']
                for key, val in inputsettings.items()
            }
            settings.update(inputsettings)

        # xlsx_file = xlrd.open_workbook("./resources/sf_update_files/" + selected_file)
        # sheets = xlsx_file.sheet_names()
        # data_sheet = xlsx_file.sheet_by_index(0)
        # data_sheet_name = sheets[0]
        # settings_sheet = xlsx_file.sheet_by_name("Settings") if "Settings" in sheets else None

        # headers = [str(v) for v in data_sheet.row_values(0)]
        # for row_num in range(1,data_sheet.nrows):
        #     new_row = dict()
        #     src_row = data_sheet.row_values(row_num)
        #     for col_num in range(0,len(headers)):
        #         new_row[headers[col_num]] = src_row[col_num]
        #     rows.append(new_row)

        # if settings_sheet is not None:
        #     for row_num in range(0, settings_sheet.nrows):
        #         src_row = settings_sheet.row_values(row_num)

        #         settings[src_row[0]] = True if src_row[1] == 1 else False if src_row[1] == 0 else src_row[1]
        pass
    else:
        print("No file")

    # Handle for deprecated DO_UPSERT setting
    if settings.DO_UPSERT is True and settings.OPERATION is None:
        settings.OPERATION = 'UPSERT'

    operation = str(
        settings.OPERATION).lower() if settings.OPERATION is not None else None

    # Try to detect Object name from record Ids in file
    # If no record Ids are present, try to use the name of the tab in the file we are loading
    # If no match is found, prompt the user for the object name
    rows_with_id = [r for r in rows if "Id" in r and r["Id"] != ""]
    source_field_names = {key for key in rows[0].keys()}
    if len(rows_with_id) > 0:
        object_name = session.get_object_name(rows_with_id[0]["Id"])
    else:
        all_object_names = [
            item["name"] for item in session.get_org_description()["sobjects"]
        ]
        object_names_in_file_name = [
            item for item in all_object_names
            if " " + item + " " in selected_file
        ]
        if data_sheet_name in all_object_names:
            object_name = data_sheet_name
        elif len(object_names_in_file_name) == 1:
            object_name = object_names_in_file_name[0]
        else:
            object_name = prompt(
                "\nWhat object are the records in this file for?")

    # lastSettings["objectName"] = object_name
    # with open(settingsLoc, 'w') as outfile:
    #     json.dump(lastSettings, outfile)

    try:
        object_desc = session.get_object_description(object_name)
    except:
        raise

    if operation is None or operation == 'upsert':
        source_fields_relationship_names = {
            f[0:f.find('.')]
            for f in source_field_names if '.' in f
        }

        upsert_matches = [{
            "field": item["name"]
        } for item in object_desc.fields if item["externalId"] == True
                          and item["name"] in source_field_names]

        possible_reference_upsert_matches = [
            {
                "referenceTo":
                item.referenceTo[0],
                "match_string":
                item.relationshipName + ".",
                "reference_object_descs": [
                    threading.new(session.get_object_description, r)
                    for r in item.referenceTo
                ]
            } for item in object_desc.fields if len(item.referenceTo) > 0
            and item.relationshipName in source_fields_relationship_names
        ]

        reference_upsert_matches = []

        for field in source_field_names:
            for match in possible_reference_upsert_matches:
                if field.startswith(match["match_string"]):
                    upsert_match_object_desc = session.get_object_description(
                        match["referenceTo"])
                    reference_upsert_matches.extend(
                        [{
                            "field": field,
                            "matching_object": match["referenceTo"],
                            "matching_field": item["name"]
                        } for item in upsert_match_object_desc["fields"]
                         if item["externalId"] == True
                         and item["name"] == field[field.find(".") + 1:]])

        if len(upsert_matches) > 0:
            print(
                "\nFound the following External ID references for this object: {}"
                .format(", ".join([item["field"] for item in upsert_matches])))
        if len(reference_upsert_matches) > 0:
            print(
                "Found the following External ID references for a lookup object: {}"
                .format(", ".join(
                    [item["field"] for item in reference_upsert_matches])))

        if len(upsert_matches) + len(
                reference_upsert_matches) > 0 and settings.OPERATION is None:
            if prompt("\nWould you like to upsert?", boolean=True):
                operation = 'upsert'

    if operation is None:
        if ((len(rows_with_id) > 0) == False  # If true, cannot do insert
                and (len(rows_with_id) != len(rows))
                == False):  # If true, cannot do update
            operation = prompt("\nWhat operation would you like to perform?",
                               options={
                                   'Insert': 'insert',
                                   'Update': 'update'
                               })

    # lastSettings["doUpsert"] = do_upsert
    # with open(settingsLoc, 'w') as outfile:
    #     json.dump(lastSettings, outfile)

    if operation == 'upsert':
        upsert_matches.insert(0, {"field": "Id"})
        self_external_id = settings.EXT_ID

        if self_external_id is None:
            if len(upsert_matches) == 1:
                self_external_id = upsert_matches[0]["field"]
            else:
                self_external_id = prompt(
                    "\nWhat ID field would you like to use for upsert?",
                    options=[item['field'] for item in upsert_matches])
                # print("\nWhat ID field would you like to use for upsert?")
                # counter = 1
                # print_str = ""
                # for item in upsert_matches:
                #     print_str += "{}) {} \n".format(counter, item["field"])
                #     counter += 1
                # print(print_str)
                # self_external_id = upsert_matches[int(input())-1]["field"]

        if len([
                item for item in upsert_matches
                if item["field"] == self_external_id
        ]) == 0:
            print(
                "External ID field '{}' does not appear in the selected file name."
                .format(self_external_id))
            raise

    fields_to_update = [
        item["name"] for item in object_desc["fields"]
        if item["name"] in rows[0] and item["updateable"] == True
        and item["calculated"] == False and item["autoNumber"] == False
    ]
    fields_to_update.extend(
        [item["field"] for item in reference_upsert_matches])
    fields_to_ignore = [
        item for item in rows[0] if item not in fields_to_update
    ]

    rows_to_update = [{
        f: v
        for (f, v) in r.items() if f == "Id" or f in fields_to_update
        or f in [mat["field"] for mat in upsert_matches]
    } for r in rows]

    mode = 'bulk' if settings.BULK_API_MODE == True else 'simple'
    if settings.BYPASS_AUTOMATION is None:
        settings.BYPASS_AUTOMATION = prompt(
            f"\nDo you need to bypass automation for this {operation}?",
            boolean=True)

    print("Selected file:     {}".format(selected_file))
    print("Operation:         {}".format(operation.title()) +
          (" (on {})".format(self_external_id
                             ) if self_external_id is not None else ""))
    print("Table:             {}".format(object_name))
    print("Bypass automation: {}".format(settings.BYPASS_AUTOMATION))
    print("Fields to update:  {}".format(", ".join(fields_to_update)))
    print("Fields to ignore:  {}".format(", ".join(fields_to_ignore)))
    do_operation_confirmation = prompt(
        f"\nWill {operation} {len(rows)} records. Are you sure?", boolean=True)

    # Now that all settings have been determined, perform the insert/update/delete in ALL sessions that were passed into the process
    # It is assumed that the system metadata that was queried for the 1st session is the same in the other sessions
    concurrency = "Parallel" if settings.PARALLEL_CONCURRENCY else "Serial"
    if do_operation_confirmation:
        settings.BYPASS_AUTOMATION = settings.BYPASS_AUTOMATION

        def perform_crud_operation(session):
            if settings.BYPASS_AUTOMATION:
                session.add_bypass_settings()
            else:
                session.remove_bypass_settings()

            if operation == "insert":
                job_result = session.insert_records(object_name,
                                                    rows_to_update,
                                                    concurrency=concurrency)
            elif operation == "update":
                job_result = session.update_records(rows_to_update,
                                                    concurrency=concurrency)
            elif operation == "upsert":
                job_result = session.upsert_records(object_name,
                                                    rows_to_update,
                                                    self_external_id,
                                                    concurrency=concurrency,
                                                    mode=mode)
            else:
                pass

            if job_result is not None and "status" in job_result and job_result[
                    "status"]["numberRecordsFailed"] != "0":
                print("{} records failed.".format(
                    job_result["status"]["numberRecordsFailed"]))
                # results = session.get_job_results(job_result)
                # session.write_file("./resources/sf_update_files/error_logs/error_{}".format(selected_file.replace(".xlsx", ".csv")), results)

            if settings.BYPASS_AUTOMATION:
                session.remove_bypass_settings()

        for session in sessions:
            threading.new(perform_crud_operation, session)
        threading.wait()

        print("\nOperation complete!")
    else:
        print("\nTerminated.")

    pass
Example #16
0
def hourly():
    jobs = get_folder_jobs('hourly')
    return threading.new(run, "HOURLY", jobs)


def daily():
    jobs = get_folder_jobs('daily')
    return threading.new(run, "DAILY", jobs)


get_new_sessions()
p = prompt(
    "Run jobs now?",
    multiselect=True,
    options={
        'No': lambda: 0,
        'Every 10 Minutes': every10min,
        # 'Hourly': hourly,
        'Daily': daily,
    })
for f in p:
    f()


def start_10_min_jobs():
    schedule.every().hour.at(':00').until('17:30').do(every10min)
    schedule.every().hour.at(':10').until('17:30').do(every10min)
    schedule.every().hour.at(':20').until('17:30').do(every10min)
    schedule.every().hour.at(':30').until('17:30').do(every10min)
    schedule.every().hour.at(':40').until('17:30').do(every10min)
    schedule.every().hour.at(':50').until('17:30').do(every10min)
Example #17
0
def main():
    sf = Salesforce_API('*****@*****.**')
    prod = Salesforce_API('*****@*****.**')

    reports_path = './resources/report_type_editor/'
    report_type_name = 'Ticket_Scales_Extended'
    # file_path = f'{reports_path}{report_type_name}.reportType.xlsx'
    report_types = {report_type_name: ReportTypeFile(sf, report_type_name)}
    current_report_type = report_types[report_type_name]
    # current_report_type.sf_retrieve()
    # current_report_type.to_excel()

    while True:
        options = [
            'SF Download ReportType',
            'SF Upload ReportType' if current_report_type.tree is not None else '',
            'Write ReportType to Excel' if current_report_type.tree is not None else '',
            'Read ReportType from Excel' if os.path.exists(current_report_type.excel_file_path) else '',
            'Command Line Editor' if current_report_type.tree is not None else '',
            'Remove Fields not in Production' if current_report_type.tree is not None else '',
            'Change Report to Edit',
            'Alphabetize Fields' if current_report_type.tree is not None else '',
        ]
        next_action = prompt(f"\nCurrent Report Type: {report_type_name}", options=[s for s in options if s != ''])
        if next_action == 'SF Download ReportType':
            print('Downloading...')
            current_report_type.sf_retrieve()
        if next_action == 'SF Upload ReportType':
            current_report_type.sf_deploy()
        if next_action == 'Write ReportType to Excel':
            current_report_type.to_excel()
        if next_action == 'Read ReportType from Excel':
            current_report_type.from_excel()
        if next_action == 'Change Report to Edit':
            name = prompt('What Report Type needs to be edited?')
            if name not in report_types:
                report_types[name] = ReportTypeFile(sf, name)
            current_report_type = report_types[name]
            # file_path = f'{reports_path}{report_type_name}.reportType.xlsx'
        if next_action == 'Remove Fields not in Production':
            for obj in current_report_type.meta().join_objects:
                prod_fields = {item.name: item for item in prod.get_object_description(obj.name).fields}
                for field in obj.fields:
                    if field.name not in prod_fields:
                        print(f'Removed {obj.name}.{field.name}')
                        current_report_type.remove_field(obj.relationship, field.name)
            for obj in current_report_type.meta().lookup_objects:
                prod_fields = {item.name: item for item in prod.get_object_description(obj.object_name).fields}
                for section, f in current_report_type.iter_tree_fields():
                    lookup_name, field_name = f.field[:f.field.rfind('.')], f.field[f.field.rfind('.')+1:]
                    if lookup_name == obj.lookup_name and field_name not in prod_fields:
                        print(f'Removed {f.field}')
                        current_report_type.remove_field(f.table, f.field)
            current_report_type.meta()
        if next_action == 'Command Line Editor':
            while True:
                options2 = [
                    'Add fields',
                    'Add a section',
                    'Exit Command Line Editor',
                ]
                next_action2 = prompt(f"\nEditing {report_type_name}:", options=[s for s in options2 if s != ''])
                if next_action2 == 'Add fields':
                    threading.new(current_report_type.meta)
                    obj = prompt('Which Object?', options={
                        item['relationship']: item for item in current_report_type.meta().join_objects
                    })
                    fields = prompt('Which fields?', multiselect=True, options=[
                        item.name for item in obj['unused_fields'].values()
                    ])
                    section_name = prompt(f'{len(fields)} field(s) selected. Add to which section?', options=[s['masterLabel'] for s in current_report_type.meta().sections])
                    # print("DEV: obj['name'] needs to be modified to be the full reference (i.e. 'Tour__c.TourLegs__r.Events__r.LedgerEntries__r')")
                    new_fields = [
                        (section_name, obj['relationship'], f, None)
                        for f in fields
                    ]
                    current_report_type.append_fields(new_fields)
                if next_action2 == 'Add a section':
                    section_name = prompt('What would you like to name the section?')
                    current_report_type.append_section(section_name)
                if next_action2 == 'Exit Command Line Editor':
                    break
        if next_action == 'Alphabetize Fields':
            current_report_type.alphabetize_fields()
import json
import re
import pandas as pd
import numpy as np
from functions.prompt import prompt
from classes.salesforce_api import Salesforce_API

from classes.py_threading import ThreadPool
threading = ThreadPool()

from classes.obj_dict import ObjDict

script, instance = ["", ""] if len(argv) == 1 else argv

if instance == "":
    instance = prompt("What Salesforce instance do you want to use?")

settingsLoc = "./resources/sf_CRUD_Operation_Memory.json"

instances = [s.strip() for s in str(instance).split(',')]
usernames = [
    "*****@*****.**" +
    ("" if instance == "lne" or instance == "prod" else "." + instance)
    for instance in instances
]
sessions = [Salesforce_API(username) for username in usernames]

# lastSettings = json.load(open(settingsLoc, "r"))


def main(sessions, do_fix=False):
Example #19
0
    def dataload_file(self, file_path):
        object_name = None
        external_id_field_name = None
        # Settings defaults
        settings = ObjDict({
            "BATCH_SIZE": 2000
            , "OPERATION": None
            , "BYPASS_AUTOMATION": None
            , "CONCURRENCY": 'Parallel'
            , "MODE": 'simple'
            , "DO_BACKUP": True
            , "DISABLE_ACCOUNT_DUPLICATE_RULE": False
        })
        if file_path.endswith('.csv'):
            df = pd.read_csv(file_path)
            data_sheet_name = None
        elif file_path.endswith('.xlsx'):
            xlsx = pd.ExcelFile(file_path)
            all_sheets = xlsx.sheet_names
            option_sheets = [s for s in all_sheets if s != 'Settings']
            data_sheet_name = option_sheets[0] if len(option_sheets) == 1 else prompt('Which sheet should be loaded?', options=option_sheets)
            df = pd.read_excel(xlsx, data_sheet_name)

            df.columns = df.columns.astype(str)

            if 'Settings' in all_sheets:
                df_settings = pd.read_excel(xlsx, 'Settings')
                inputsettings = {
                    key:val['Value']
                    for key,val in df_settings.set_index('Field').to_dict('index').items()
                }
                settings.update(inputsettings)
                assert type(settings.BATCH_SIZE) is int
                assert settings.OPERATION.lower() in (None,'insert','update','upsert','delete','undelete')
                assert settings.BYPASS_AUTOMATION in (True,False)
                assert settings.CONCURRENCY in ('Parallel','Serial')
                assert settings.MODE in ('simple','bulk')
        else:
            raise Exception('Can only parse files with a .csv or .xlsx extension')

        instances = prompt('For which instance(s) should this file be loaded?', multiselect=True, options={
            (key[key.rindex('.')+1:] + (' prod' if val['sandbox'] == 'False' else '')).strip(): val 
            for key, val in self.sf.instance_credentials.items() if 'security_token' in val
        })
        sessions = [Salesforce_API(c) for c in instances]

        # Try to detect Object based on records in the file
        # If no record Ids are present, try to use the name of the tab in the file
        if 'Id' in df:
            id_list = df[['Id']].fillna('').query("Id != ''")['Id'].to_list()
            if len(id_list) > 0:
                try:
                    object_name = sessions[0].get_object_name(id_list)
                except: pass
        if object_name is None:
            all_object_names = {item.name for item in sessions[0].get_org_description().sobjects}
            if data_sheet_name is not None and data_sheet_name in all_object_names:
                object_name = data_sheet_name
            if object_name is None:
                object_name = prompt("\nWhat object are the records in this file for?", expected=all_object_names)
        if 'RecordType.Name' in df and 'RecordTypeId' not in df:
            df['RecordTypeId'] = df['RecordType.Name'].apply(lambda x: self.get_recordtype_map()[(object_name, x)].Id if (object_name, x) in self.get_recordtype_map() else None)
        object_desc = threading.new(sessions[0].get_object_description, object_name)

        operation = str(settings.OPERATION).lower() if settings.OPERATION is not None else None
        if operation is None:
            all_records_have_id = 'Id' in df and len(df) == len(df.fillna('').query("Id != ''"))
            operation_options = [
                'insert',
                'upsert',
                'update' if all_records_have_id else None,
                'delete' if all_records_have_id else None,
                'undelete' if all_records_have_id else None,
            ]
            operation = prompt('What operation needs to be performed?', options=[o for o in operation_options if o is not None])

        if operation == 'upsert':
            external_id_fields = [f.name for f in object_desc.result().fields if f.externalId is True and f.name in df.columns.values]
            if len(external_id_fields) == 1:
                external_id_field_name = external_id_fields[0]
            elif len(external_id_fields) == 0:
                external_id_field_name = 'Id'
            else:
                external_id_field_name = prompt('Which field would you like to use as the External ID?', options=external_id_fields)

        if 'RecordTypeId' not in df and operation in ('insert','upsert') and len(object_desc.result().recordTypeInfos) > 1:
            default_record_type = [item.name for item in object_desc.result().recordTypeInfos if item.defaultRecordTypeMapping is True][0]
            if prompt(f'No RecordTypeId was specified. New records will be defaulted to RecordType = "{default_record_type}". Proceed?', boolean=True) is False:
                raise Exception('Terminated - No RecordTypeId specified')

        if settings.DO_BACKUP and len(df) > 5000:
            if not prompt(f"Large record count: {len(df)} records. Do you want to run a backup? (Normally automatic)", boolean=True):
                settings.DO_BACKUP=False

        valid_fields = sessions[0]._get_valid_fields_for_operation(operation, object_name, first_row=df.iloc[0].to_dict(), external_id_field_name=external_id_field_name)
        fields_to_update = [f for f in df.columns.values if f.lower() in valid_fields]
        fields_to_ignore = [f for f in df.columns.values if f.lower() not in valid_fields]

        custom_settings_options = {
            'Yes': True,
            'No': False,
            'Set Data Admin Only': {'dataadmin': True, 'automation': False, 'pbflow': False, 'fc': False, 'multicurrency': False},
            'Custom': self.sf.lndatautil._configure_GeneralSettings__c
        }
        
        settings.BYPASS_AUTOMATION = settings.BYPASS_AUTOMATION if settings.BYPASS_AUTOMATION is not None else prompt(f"\nDo you need to bypass automation for this {operation}?", options=custom_settings_options, call_options=True)
        while True:
            external_id_lookups_text = ', '.join([f for f in fields_to_update if '.' in f])
            operation_message_end_text =  f" on {external_id_field_name}" if operation == 'upsert' else ''
            operation_message_end_text += f" and External Id lookup(s) for: {external_id_lookups_text}" if len(external_id_lookups_text) > 0 else ''
            print(f"Selected file:     {file_path[file_path.rindex('/')+1:]}")
            print(f"Operation:         {operation.title()}{operation_message_end_text}")
            print(f"Table:             {object_name}")
            print(f"Bypass Settings:   {settings.BYPASS_AUTOMATION}")
            print(f"Job Settings:      Mode: {settings.MODE}, Batch size: {settings.BATCH_SIZE}, Concurrency: {settings.CONCURRENCY}")
            print(f"Fields to update:  {', '.join(fields_to_update)}")
            print(f"Fields to ignore:  {', '.join(fields_to_ignore)}")
            operation_confirmation = prompt(f"\nWill {operation} {len(df)} records. Are you sure? Press 'M' to modify settings. (Y/N/M)", expected=['Y','N','M'])
            if operation_confirmation == 'M':
                options = {
                    "BYPASS_AUTOMATION": {'options':custom_settings_options, 'call_options': True},
                    "BATCH_SIZE": {'expected_type': int},
                    "CONCURRENCY": {'expected': ['Parallel','Serial','']},
                    "MODE": {'expected': ['simple','bulk','']},
                    "DISABLE_ACCOUNT_DUPLICATE_RULE": {'options': [True, False]},
                }
                while True:
                    modify_options = ['All'] + list(options.keys()) + ['Done']
                    selected_option = prompt('Modify:', modify_options)
                    if selected_option == 'All':
                        for option, val in options.items():
                            new = prompt(f'Set setting {option} (currently {settings[option]}): ', **val)
                            settings[option] = new if new != '' else settings[option]
                        break
                    elif selected_option == 'Done':
                        break
                    else:
                        option, val = selected_option, options[selected_option]
                        new = prompt(f'Set setting {option} (currently {settings[option]}): ', **val)
                        settings[option] = new if new != '' else settings[option]
                continue
            break


        # Now that all settings have been determined, perform the insert/update/delete in ALL sessions that were passed into the process
        # It is assumed that the system metadata that was queried for the 1st sf is the same in the other sessions
        if operation_confirmation == 'Y':
            def perform_operation(sf):
                params = {key.lower(): val for key,val in settings.items() if key not in ('BYPASS_AUTOMATION')}
                if settings.BYPASS_AUTOMATION is True:
                    sf.add_bypass_settings()
                elif settings.BYPASS_AUTOMATION is False:
                    sf.remove_bypass_settings()
                else:
                    sf.add_bypass_settings(**settings.BYPASS_AUTOMATION)
                if settings.DISABLE_ACCOUNT_DUPLICATE_RULE is True:
                    self.toggle_duplicate_rule('Account.', False)

                if operation == "insert":
                    job_result = sf.insert(object_name, df, **params)
                elif operation == "update":
                    job_result = sf.update(df, **params)
                elif operation == "upsert":
                    job_result = sf.upsert(object_name, df, external_id_field_name, **params)
                elif operation == "delete":
                    job_result = sf.delete(df, **params)
                elif operation == "undelete":
                    job_result = sf.undelete(df, **params)

                num_failed_records = (
                    len([item for item in job_result if 'Error' in item.sf_result]) if settings.MODE == 'simple'
                    else int(job_result["status"]["numberRecordsFailed"]) if job_result is not None and "status" in job_result
                    else 0
                )
                if num_failed_records > 0:
                    print(f"({sf.instance}) {num_failed_records} records failed.")

                if settings.BYPASS_AUTOMATION is not False:
                    sf.remove_bypass_settings()
                if settings.DISABLE_ACCOUNT_DUPLICATE_RULE is True:
                    self.toggle_duplicate_rule('Account.', True)
                return job_result
            def get_backup(sf, id_field):
                if not (settings.DO_BACKUP and operation in {'upsert','update','delete'}):
                    return None
                try:
                    backup_ids = df[id_field]
                    mode = 'simple' if object_name != 'Event__c' else 'bulk'
                    sf.console.new_line('Running Backup...')
                    return sf.select(f"SELECT * FROM {object_name} WHERE {id_field} != NULL AND {id_field} IN @backup_ids", mode=mode, return_type='dataframe')
                except:
                    return None

            id_field = external_id_field_name if operation == 'upsert' else 'Id'
            results = ObjDict()
            for sf in sessions:
                backup = get_backup(sf, id_field)
                job_thread = threading.new(perform_operation, sf)
                results[sf.instance] = ObjDict({'backup': backup, 'job': job_thread})
            for result in results.values():
                result.job = result.job.result()
                if result.backup is not None:
                    ordered_cols = list({f:f for f in (['Id', id_field] + result.job.sent_fields + list(result.backup.columns.values))})
                    result.backup = result.backup[ordered_cols]

            print("\nAll operations complete!")
            return results
        else:
            print("\nTerminated.")
Example #20
0
def main(can_quit=False):
    dataloader_files_path = os.path.abspath('./resources/dataloader')
    print(f'\nListening... drop files in this directory to dataload or query:\n{dataloader_files_path}')
    print(f'\nFor query text files, include the prefix "--bulk" at the beginning of the file if you would like to use the Bulk API')
    time.sleep(0.2)
    dir.make_dir(dataloader_files_path, delete_first=False)

    def get_file_name(path):
        return os.path.split(path)[1]
    def move(file_path, to_folder):
        timenow = datetime.datetime.now().strftime("%Y-%m-%d %H.%M")
        dir.make_dir(f'{dataloader_files_path}/{to_folder}', delete_first=False)
        file_name = get_file_name(file_path)
        new_path = f'{dataloader_files_path}/{to_folder}/{timenow} {file_name}'
        os.rename(file_path, new_path)
    def write_result(to_folder, file_name, results):
        timenow = datetime.datetime.now().strftime("%Y-%m-%d %H.%M")
        output_file_path = f'{dataloader_files_path}/{to_folder}/{timenow} {file_name}.csv'
        dir.make_dir(f'{dataloader_files_path}/{to_folder}', delete_first=False)
        pd.DataFrame(results).to_csv(output_file_path, index=False)
        return output_file_path

    while True:
        files = [f for f in os.listdir(dataloader_files_path) if os.path.isfile(f'{dataloader_files_path}/{f}') and not f.startswith('.')]
        if len(files) > 0:
            options = {**{f: f'{dataloader_files_path}/{f}' for f in files if not f.startswith('~')}, **{'Refresh': None}}
            if can_quit: options.update({'Quit': 'QUIT'})
            file_path = prompt('\nWhich file would you like to load?', options)
            if file_path == 'QUIT':
                break
            if file_path is None:
                continue
            file_name = get_file_name(file_path)
            if file_name.endswith('.csv') or file_name.endswith('.xlsx') or file_name.endswith('.xls'):
                results = None
                try:
                    results = sf.dataload_file(file_path)
                    sf.console.clear()
                except Exception as e:
                    traceback.print_exc()
                next_options = ['Keep in working folder', 'Move to Archive',]
                if results is not None:
                    for instance, result in results.items():
                        if result.backup is not None:
                            threading.new(write_result, 'backups', f'({instance}) {result.job.object_name} BACKUP ({len(result.backup)})', result.backup)
                        if result is not None and hasattr(result.job, 'errors') and len(result.job.errors) > 0:
                            next_options.append('View errors')
                            threading.new(write_result, 'errors', f'({instance}) {file_name} ({len(result.job.errors)})', result.job.errors)
                            print(f'({instance}) Errors have been written to the /errors folder.')
                next_action = prompt(f'What would you like to do with the file? ({file_name})', options=next_options)
                if next_action == 'Move to Archive':
                    move(file_path, 'archive')
                elif next_action == 'View errors':
                    for instance, result in results.items():
                        df = pd.DataFrame().from_records(result.job.errors)
                        cols = list({**{'sf_result':None}, **{c:c for c in df.columns.values}})[0:5]
                        print(f'### {instance} ###')
                        print(df[cols].to_string())
            else:
                # Is query
                query_text = open(file_path, 'r').read()
                parsed_query = sf.parse_query(query_text)
                if parsed_query is not None:
                    try:
                        instances = prompt('Which instance(s) would you like to query?', multiselect=True, options={
                            (key[key.rindex('.')+1:] + (' prod' if val['sandbox'] == 'False' else '')).strip(): val 
                            for key, val in sf.instance_credentials.items() if 'security_token' in val
                        })
                        sessions = [Salesforce_API(c) for c in instances]
                        mode = 'bulk' if re.match(r'^\s*--\s*bulk', query_text) else 'simple'
                        queries = {
                            session.instance: threading.new(session.select, query_text, mode=mode, return_type='flat')
                            for session in sessions
                        }
                        threading.wait(queries.values())
                        print('\nResult file(s) written to:')
                        for instance, result in queries.items():
                            result = result.result()
                            output_file_path = write_result('query_results', f'{instance} {parsed_query.object_name} ({len(result)})', result)
                            print(f'/query_results/{os.path.split(output_file_path)[1]}')
                        sf.console.clear()
                    except Exception as e:
                        print(e)
                else:
                    print('Could not parse query file. Please update the file and try again.')
        time.sleep(1)
def eos_venues():
    def get_eos_dupes_analysis():
        records = sql.query("""
        SELECT v.Id, v.Name, v.RomeId, v.IsActive
        FROM Venue v
        LEFT JOIN TownCity t
            ON v.TownCityId = t.Id
        LEFT JOIN Region r
            ON t.RegionId = r.Id
        LEFT JOIN Country c
            ON r.CountryId = c.Id
        WHERE c.Name IN ('United Kingdom')
        """)
        return pdh.find_duplicates(records, 'Name', None,
                                   ['Id', 'RomeId', 'IsActive'])

    def get_deletion_log():
        return pd.DataFrame(
            sql.query("""
        SELECT l.DeletedRecordId__c
        , l.MergedAccount__c
        , l.RecordTypeName__c
        , v1.Id AS EOSIdToDelete
        , v1.Name AS EOSNameToDelete
        , v2.Id AS EOSIdToMergeInto
        , v2.Name AS EOSNameToMergeInto
        , CASE WHEN v1.Id < v2.Id THEN 'Double check this - older ID being deleted' ELSE NULL END AS SpotCheck
        FROM RomeDeletionLog l
        LEFT JOIN Venue v1
            ON v1.RomeId = l.DeletedRecordId__c
            AND l.RecordTypeName__c = 'Venue'
        LEFT JOIN Venue v2
            ON v2.RomeId = l.MergedAccount__c
            AND l.RecordTypeName__c = 'Venue'WHERE v1.Id IS NOT NULL
        """))

    eos_dupes = Cache.get('eosdupes', None)
    if eos_dupes is None or prompt('Re-run EOS Venue Dupe Analysis?',
                                   boolean=True):
        eos_dupes = get_eos_dupes_analysis()
        Cache.set('eosdupes', eos_dupes)
    deletion_log = get_deletion_log()

    deleted_ids = set(deletion_log.DeletedRecordId__c)

    def note(row):
        r1, r2 = row['RomeId'], row['m_RomeId']
        if pdh.isnull(r1) and pdh.isnull(r2):
            return 'EOS Merge - No Rome links exist'
        if r1 in deleted_ids and r2 in deleted_ids:
            return 'Both RomeIds are deleted in Rome'
        elif r2 in deleted_ids:
            return 'EOS Merge RIGHT into LEFT - RIGHT Id is deleted in Rome'
        elif r1 in deleted_ids:
            return 'EOS Merge LEFT into RIGHT - LEFT Id is deleted in Rome'
        if pdh.isnull(r1) and pdh.notnull(r2):
            return 'EOS Merge LEFT into RIGHT'
        if pdh.isnull(r2) and pdh.notnull(r1):
            return 'EOS Merge RIGHT into LEFT'
        if r1 == r2:
            return 'EOS Merge - Both EOS Venues have the same RomeId... merge one into the other'
        if r1 not in deleted_ids and r2 not in deleted_ids:
            return 'ROME Merge first, then EOS can be merged'

    eos_dupes.query("`m_%` > 70", inplace=True)
    eos_dupes['Note'] = eos_dupes.apply(note, axis=1)
    eos_dupes[
        'RomeMergeLink'] = 'https://lne.my.salesforce.com/merge/accmergewizard.jsp?goNext=+Next+&cid=' + eos_dupes[
            'RomeId'] + '&cid=' + eos_dupes['m_RomeId']

    output = {
        'EOS Dupe Analysis': eos_dupes,
        'RomeDeletionLog': deletion_log,
    }

    file_name = loc.uk_folder_path + '/Master Data - Output/Venue Dupe Analysis/EOS Venue Duplicate Analysis ' + datetime.now(
    ).strftime('%Y-%m-%d %H.%M') + '.xlsx'

    pdh.to_excel(output, file_name)
    return
Example #22
0
def main():
    source_is_modified = ' (Files have been modified)' if source_files_have_been_modified(
    ) else ''
    re_run = prompt(f'Re-run Historical Extract? {source_is_modified}',
                    boolean=True)
    if re_run:
        uk.extract_historical_file_data(sf,
                                        ask_to_regenerate_files=False,
                                        multi=True)
    file_data = uk.read_pickle(loc.uk_historical_migration_combined_pickle)
    grouped_file_data = threading.new(get_file_data_by_tour_and_event,
                                      file_data)
    offer_ids = file_data.Tour__c.EOSId__c.tolist()
    file_offers = file_data.Tour__c[['EOSId__c']]

    offers_to_migrate_eos = pd.DataFrame(
        sql.query(f"""
    SELECT CAST(OfferId AS VARCHAR) AS EOSId__c, ArtistName, OfferStatusName, Company, OracleCode, PromoterName
        , FORMAT(MIN(ShowDate), 'yyyy-MM-dd') AS FirstDate
        , FORMAT(MAX(ShowDate), 'yyyy-MM-dd') AS LastDate
        , COUNT(DISTINCT ShowId) AS ShowCountNotCancelled
        -- , COUNT(DISTINCT (CASE WHEN PostponedDateTBC = 1 THEN ShowId ELSE NULL END)) AS PostponedDateTBCShows
    FROM vwEOSData
    WHERE OfferId IN ({','.join(offer_ids)})
    GROUP BY OfferId, ArtistName, OfferStatusName, CountryName, Company, OracleCode, PromoterName
    ORDER BY MAX(ShowDate) ASC
    """))
    offers_to_migrate = file_offers.merge(offers_to_migrate_eos,
                                          on='EOSId__c',
                                          how='left').to_dict('records')

    shows_to_migrate = uk.query_Event__c(sql, offer_ids)
    shows_by_offer_id = pdh.groupby_unsorted(shows_to_migrate,
                                             lambda x: x['TourEOSId'])
    migrated_offers = lne.select("""
    SELECT Id, Status__c, EOSId__c, OracleProjectCode__c, TourName__c, ShowCount__c, CreatedBy.Name
    FROM Tour__c
    WHERE EOSId__c IN @offer_ids
    """,
                                 return_type='flat')
    migrated_offers_uat = uat.select("""
    SELECT Id, Status__c, EOSId__c, OracleProjectCode__c, TourName__c, ShowCount__c, CreatedBy.Name
    FROM Tour__c
    WHERE EOSId__c IN @offer_ids
    """,
                                     return_type='flat')

    migrated_offer_ids = {item['EOSId__c'] for item in migrated_offers}
    migrated_offer_ids_uat = {item['EOSId__c'] for item in migrated_offers_uat}

    remaining_to_migrate = [
        item for item in offers_to_migrate
        if item['EOSId__c'] not in migrated_offer_ids
    ]
    offer_ids_remaining_to_migrate = [
        item['EOSId__c'] for item in remaining_to_migrate
    ]
    analysis_missing_eos_ids_by_tour = threading.new(
        uk.analysis_missing_eos_ids_by_tour, sql, sf,
        offer_ids_remaining_to_migrate)
    all_missing_eos_ids = set(
        itertools.chain.from_iterable(
            analysis_missing_eos_ids_by_tour.values()))
    missing_rome_master_data_table = threading.new(uk.query_by_eos_ids,
                                                   sql,
                                                   all_missing_eos_ids,
                                                   combined_result=True)

    costing_show_filenames = set(os.listdir(loc.uk_historical_data_cache_path))
    for item in migrated_offers + remaining_to_migrate:
        shows = shows_by_offer_id.get(item['EOSId__c'], [])
        shows_with_costing = [
            item for item in shows
            if f"{item['EOSId__c']}.pickle" in costing_show_filenames
        ]
        item['ShowCountInclCancelled'] = len(shows)
        item['# Shows w Excel Costing'] = len(shows_with_costing)
        item['Migrated in UAT?'] = item['EOSId__c'] in migrated_offer_ids_uat

    for item in remaining_to_migrate:
        item['Missing Master Data'] = ', '.join(
            analysis_missing_eos_ids_by_tour.get(item['EOSId__c'], []))

    def get_multiple_file_issues(df):
        if len(df) == 0:
            return []
        groupby = df.groupby(['EventEOSId', 'Source_File'])
        df2 = pd.DataFrame(groupby.groups.keys())
        duplicated = df2.duplicated(subset=[0], keep=False)
        df3 = df2[duplicated]
        chunks = []
        if len(df3) > 0:
            groupby2 = df3.groupby(0)
            for eosid in groupby2.groups:
                files = df2.groupby(0).get_group(eosid)[1].tolist()
                chunks.append(f'Show {eosid} in {", ".join(files)}')
        return chunks

    def tour_events_analysis(tour_costing):
        event_ids = tour_costing.Event__c.EOSId__c.tolist()
        output = {}
        events_missing_lebs = []
        for evt in event_ids:
            leb = file_data_by_event.get(evt, {}).get('LedgerEntryBreakout__c',
                                                      None)
            if leb is None or len(leb) == 0:
                events_missing_lebs.append(evt)
        output['Err_EventsMissingLEBs'] = ', '.join(events_missing_lebs)
        return output

    file_data_by_tour, file_data_by_event = grouped_file_data.result()

    for item in remaining_to_migrate:
        costing = file_data_by_tour.get(item['EOSId__c'], None)
        if costing:
            tour = costing.Tour__c.iloc[0].to_dict()
            lebs = costing.LedgerEntryBreakout__c
            les = costing.LedgerEntry__c
            item.update(tour_events_analysis(costing))
            item['Err_HasNegativeLEBs'] = True if (
                lebs.OfferRate__c < 0).any() == True else None
            # item['Err_TourHasNoShows'] = item['EOSId__c'] not in file_events_by_tour.groups
            item['Err_OracleCodeMismatch'] = (
                None if
                (tour['OracleProjectCode__c'] == item['OracleCode']) else
                f"{item['OracleCode']} in EOS, {tour['OracleProjectCode__c']} in file"
            )
            item['Err_MultipleFileSources'] = ' | '.join(
                get_multiple_file_issues(les) + get_multiple_file_issues(lebs))
    remaining_to_migrate = pd.DataFrame(remaining_to_migrate)
    for col in remaining_to_migrate.columns.values:
        vals = remaining_to_migrate[col]
        if col.startswith('Err') and ((vals.isna()) | (vals == '')).all():
            del remaining_to_migrate[col]

    file_name = 'UK On-Sale Migration Status ' + datetime.now().strftime(
        '%Y-%m-%d') + '.xlsx'
    file_name_2 = loc.uk_historical_migration_status + 'UK On-Sale Migration Status ' + datetime.now(
    ).strftime('%Y-%m-%d %H.%M') + '.xlsx'
    output = {
        'Remaining to Migrate': remaining_to_migrate,
        'Migrated Offers': migrated_offers,
    }
    missing_md = missing_rome_master_data_table.result()
    if len(missing_md) > 0:
        output[f'Missing Master Data'] = missing_md
    output = {f'{k} ({len(v)})': v for k, v in output.items()}

    threading.new(pdh.to_excel, output, file_name)
    threading.new(pdh.to_excel, output, file_name_2)
    threading.wait()

    return
def main():
    session = Salesforce_API("*****@*****.**")
    sql = SQL_Server_API('EOS-stage')

    accounts = session.select("SELECT Id, EOSId__c FROM Account",
                              return_type='dataframe')
    account_ids = set(accounts.set_index('Id').to_dict('index'))
    account_eos_ids = set(
        accounts.fillna('').query("EOSId__c != ''").set_index(
            'EOSId__c').to_dict('index'))

    venue = sql.query(
        f"SELECT *, 'Venue-'+CAST(Id AS VARCHAR(10)) AS EOSId__c FROM Venue")
    artist = sql.query(
        f"SELECT *, 'Artist-'+CAST(Id AS VARCHAR(10)) AS EOSId__c FROM Artist")
    copromoter = sql.query(
        f"SELECT *, 'Copromoter-'+CAST(Id AS VARCHAR(10)) AS EOSId__c FROM Copromoter"
    )
    artistagency = sql.query(
        f"SELECT *, 'ArtistAgency-'+CAST(Id AS VARCHAR(10)) AS EOSId__c FROM ArtistAgency"
    )
    ticketagency = sql.query(
        f"SELECT *, 'TicketAgent-'+CAST(Id AS VARCHAR(10)) AS EOSId__c FROM TicketAgent"
    )

    output = {
        'Venue': venue,
        'Artist': artist,
        'Copromoter': copromoter,
        'ArtistAgency': artistagency,
        'TicketAgent': ticketagency,
    }
    to_wipe_RomeId = {obj: [] for obj in output}
    to_delete = {obj: [] for obj in output}
    for obj, data in output.items():
        for record in data:
            if record['EOSId__c'] in account_eos_ids or record['EOSId__c'] in (
                    'ArtistAgency-1', 'ArtistAgency-149', 'ArtistAgency-158'):
                to_wipe_RomeId[obj].append(record)
            elif record['RomeId'] not in (
                    None, '',
                    'None') and record['RomeId'] not in account_ids and record[
                        'CreatedBy'] == 'Rome_EOS_Integration':
                to_delete[obj].append(record)
    pdh.to_excel(to_wipe_RomeId, 'EOSDataToWipeRomeId.xlsx')
    pdh.to_excel(to_delete, 'EOSDataToDelete.xlsx')

    def delete(table, data):
        if len(data) > 0:
            ids = pd.DataFrame(data).Id.tolist()
            ids_text = ','.join([str(i) for i in ids])
            s = f'DELETE FROM {table} WHERE Id IN({ids_text})'
            print(s)
            sql.execute(s, commit=True)

    def blank_romeid(table, df):
        if len(data) > 0:
            ids = pd.DataFrame(data).Id.tolist()
            ids_text = ','.join([str(i) for i in ids])
            s = f'UPDATE {table} SET RomeId = NULL WHERE Id IN({ids_text})'
            print(s)
            sql.execute(s, commit=True)

    if prompt('Ready to update EOS?', boolean=True):
        for obj, data in to_delete.items():
            delete(obj, data)
        for obj, data in to_wipe_RomeId.items():
            blank_romeid(obj, data)
    return
def main():

    records = sf.fupdate("""
        UPDATE LedgerEntryBreakout__c
        SET CoPromoterPlanRate__c = CoPromoterReimbursementRate__c
        , CoPromoterPlanRateText__c = CoPromoterReimbursementRateText__c
        WHERE CoPromoterReimbursementRate__c != NULL
        AND CoPromoterPlanRate__c != CoPromoterReimbursementRate__c
        -- LIMIT 20
    """, return_staged=False)
    print('')


    # with sf.bypass_settings():
    #     res1 = sf.fupdate("""
    #     UPDATE TourOnSale__c
    #     SET Type__c = 'Promoter'
    #     WHERE Type__c IN ('Cuffe & Taylor','Live Nation','Metropolis Music')
    #     """, return_staged=False)
    #     res2 = sf.fupdate("""
    #     UPDATE EventOnSale__c
    #     SET Type__c = 'Promoter'
    #     WHERE Type__c IN ('Cuffe & Taylor','Live Nation','Metropolis Music')
    #     """, return_staged=False)
    
    # pdh.to_excel(df, 'Planned TourOnSale Update')

    # touronsales = sf.fselect("""
    # SELECT Id
    #     , Type__c AS OldType
    #     , 'Promoter' AS Type__c
    # FROM TourOnSale__c
    # WHERE Type__c IN ('Cuffe & Taylor','Live Nation','Metropolis Music')
    # """)

    # eventonsales = sf.fselect("""
    # SELECT Id
    #     , Type__c AS OldType
    #     , 'Promoter' AS Type__c
    # FROM EventOnSale__c
    # WHERE Type__c IN ('Cuffe & Taylor','Live Nation','Metropolis Music')
    # """)

    # touronsales = sf.select("""
    # SELECT Id
    #     , Type__c 
    # FROM TourOnSale__c
    # WHERE Type__c IN ('Cuffe & Taylor','Live Nation','Metropolis Music')
    # """, return_type='dataframe')

    # eventonsales = sf.select("""
    # SELECT Id
    #     , Type__c 
    # FROM EventOnSale__c
    # WHERE Type__c IN ('Cuffe & Taylor','Live Nation','Metropolis Music')
    # """, return_type='dataframe')

    # touronsales['Type__c'] = 'Promoter'
    # eventonsales['Type__c'] = 'Promoter'


    prompt('Press any key to continue')
    sf.bypass_prod_operation_approval()
    with sf.bypass_settings():
        sf.update(touronsales, mode='bulk')
        sf.update(eventonsales, mode='bulk')

    return
Example #25
0
def main():
    print('####### CLONE UTILITY #######')

    while True:
        source_sf = Salesforce_API(prompt(f'What SF Instance would you like to clone from?', options={
            (key[key.rindex('.')+1:] + (' prod' if val['sandbox'] == 'False' else '')).strip(): val 
            for key, val in Salesforce_API.instance_credentials.items() if 'security_token' in val
        }))
        target_sf = Salesforce_API(prompt(f'What other SF Instance would you like to clone to?', options={
            (key[key.rindex('.')+1:] + (' prod' if val['sandbox'] == 'False' else '')).strip(): val 
            for key, val in Salesforce_API.instance_credentials.items() if 'security_token' in val and val['sandbox'] == 'True' and val != source_sf
        }))
        clone_methods = {
            'Tour__c': target_sf.clone_tours,
            'Event__c': target_sf.clone_events,
            'AdPlan__c': target_sf.clone_adplans,
        }
        clone_object = prompt(f'What would you like to clone?', options={
            'Tours': 'Tour__c',
            'Events': 'Event__c',
            'Ad Plans': 'AdPlan__c',
        })
        title_field = clone_object.replace('__c', 'Title__c')
        selected_record_ids = []
        while True:
            selection = prompt(f'What record(s) would you like to clone? Multiple can be selected by a comma-separated entry.\n(You can enter record IDs for an exact match, or enter any other text for SOSL Search)')
            split_selections = selection.split(',')
            for s in split_selections:
                match = re.match(id_regex, s)
                if match:
                    selected_record_ids.append(match.group(1))
                else:
                    records = source_sf.sosl(s, clone_object, None, ['Id', title_field])
                    if len(records) == 1:
                        selected_record_ids.append(records[0].Id)
                    elif len(records) > 1:
                        options = {record[title_field]: record for record in records[0:9]}
                        options['None of the above'] = None
                        record_to_add = prompt(f'SOSL Search performed for "{s}". Which record would you like to clone?', options=options)
                        if record_to_add is not None:
                            selected_record_ids.append(record_to_add.Id)
                    else:
                        print(f'No results found for "{s}"')
            selected_records = source_sf.select(f"SELECT Id, {title_field} FROM {clone_object} WHERE Id IN @selected_record_ids", mute=True)
            if not selected_records:
                continue
            titles = "\n".join([item[title_field] for item in selected_records])
            message = f'\n\nSELECTED:\n{titles}\n'
            message += f'\nThe above {clone_object} record{"(s) have" if len(selected_records) > 1 else " has"} been selected for clone.'
            message += f'\nSOURCE: {source_sf.instance} | TARGET: {target_sf.instance}'
            resp = prompt(message, options={
                f'Ready to clone?': 1,
                f'Search for more {clone_object} records to clone?': 2,
                f'Cancel and restart?': 3
            })
            if resp == 1:
                clone_method = clone_methods[clone_object]
                clone_result = clone_method(
                    [item.Id for item in selected_records]
                    , source_session=source_sf
                    , target_session=target_sf
                )
                source_sf.console.clear()
                target_sf.cloneutil.clone_records_ids_map.clear()
                target_sf.cloneutil.clone_records_ids_processed.clear()
                target_sf.cloneutil.clone_records_mapping_log.clear()
                print("\n\nCLONE COMPLETE\n\n")
                print('\n'.join(pd.DataFrame(clone_result)['Id'].apply(lambda x: f'https://{target_sf.simple.sf_instance}/{x}').tolist()[:10]))
                break
            if resp == 2:
                continue
            if resp == 3:
                break
def venue_primary_office_update():
    venues_to_update_primary_office = [
        '0011Q00002NR3byQAD', '0011Q00002L0CUFQA3', '0013600001YSupjAAD',
        '0013600001YStyOAAT', '0013600001YSv2HAAT', '0011Q00002NR3vwQAD',
        '0013600001YSuq2AAD', '0013600001YSuq8AAD', '0013600001YSuq9AAD',
        '0013600001YSuqAAAT', '0013600001YSv8iAAD', '0013600001YSu3XAAT',
        '0013600001YSup7AAD', '0013600001YSuqYAAT', '0011Q00002NR3guQAD',
        '0011Q00002NR3v7QAD', '0011Q00002NR3gzQAD', '0013600001YT3q5AAD',
        '0011Q00002NR3nYQAT', '0011Q00002NR3fHQAT', '0011Q00002D9VFNQA3',
        '0013600001YSvHoAAL', '0013600001YSvGUAA1', '0011Q00002NR3vfQAD',
        '0013600001YT3x5AAD', '0013600001YSv2EAAT', '0011Q00002NR41EQAT',
        '0013600001YSu7zAAD', '0011Q00002NR3rmQAD', '0011Q00002NR3qDQAT',
        '0013600001YSvRkAAL', '0011Q00002NR40hQAD', '0011Q00002NR3nsQAD',
        '0013600001YT3x1AAD', '0011Q00002NR3jaQAD', '0013600001YSuy2AAD',
        '0013600001YSuy3AAD', '0013600001YSv7hAAD', '0011Q00002NR3fIQAT',
        '0013600001YSulUAAT', '0013600001YSuzgAAD', '0011Q00002L2LWeQAN',
        '0013600001YSu1VAAT', '0011Q00002JMyN7QAL', '0011Q00002NR3fxQAD',
        '0013600001YT3qWAAT', '0011Q00002NR3ftQAD', '0011Q00002NR3g3QAD',
        '0011Q00002NR48wQAD', '0011Q00002NR3cxQAD', '0011Q00002NR3mzQAD',
        '0011Q00002NR46CQAT', '0011Q00002NR3q6QAD', '0013600001YSvLGAA1',
        '0013600001YSv84AAD', '0013600001YSv1UAAT', '0011Q00002NR3hAQAT',
        '0011Q00002NR3cmQAD', '0013600001YSv1SAAT', '0013600001YSv6CAAT',
        '0013600001YSvCyAAL', '0013600001YSv6FAAT', '0013600001YSv6EAAT',
        '0013600001YSv6GAAT', '0011Q00002NR3uzQAD', '0011Q00002NR42gQAD',
        '0013600001YSv6DAAT', '0011Q00002L0CUJQA3', '0013600001YSv25AAD',
        '0013600001YSv2dAAD', '0011Q00002NR41hQAD', '0011Q00002NR47oQAD',
        '0011Q00002NR44VQAT', '0011Q00002NR43zQAD', '0013600001YSu3XAAT',
        '0011Q00002NR3xLQAT', '0011Q00002NR43LQAT', '0011Q00002NR3t0QAD',
        '0011Q00002NR3jsQAD', '0011Q00002NR3hBQAT', '0011Q00002JMyPcQAL',
        '0011Q00002NR3hCQAT', '0013600001YSv7BAAT', '0011Q00002NR45IQAT',
        '0013600001iqVT4AAM', '0013600001YSv6GAAT', '0011Q00002NR427QAD',
        '0011Q00002NR3oSQAT', '0011Q00002NR3k3QAD', '0011Q00002NR3vnQAD',
        '0013600001YSvEtAAL', '0013600001YSvGhAAL', '0011Q00002NR45JQAT',
        '0011Q00002NR44wQAD', '0013600001YSvLuAAL', '0011Q00002NR3zyQAD',
        '0011Q00002NR40oQAD', '0011Q00002NR45nQAD', '0011Q00002NR45oQAD',
        '0011Q00002NR3zWQAT', '0011Q00002NR440QAD', '0011Q00002NR48UQAT',
        '0011Q00002NR43dQAD', '0011Q00002NR3xwQAD', '0011Q00002NR438QAD',
        '0011Q00002NR43sQAD', '0011Q00002NR3uBQAT', '0011Q00002NR44KQAT',
        '0011Q00002NR429QAD', '0011Q00002NR46ZQAT', '0011Q00002NR3ueQAD',
        '0013600001YSvEcAAL', '0011Q00002NR3xNQAT', '0011Q00002NR46nQAD',
        '0013600001YSvPUAA1', '0011Q00002NR42AQAT', '0011Q00002NR40IQAT',
        '0011Q00002NR47ZQAT', '0011Q00002NR48QQAT', '0011Q00002NR413QAD',
        '0011Q00002NR48pQAD', '0011Q00002NR3zqQAD', '0011Q00002NR485QAD',
        '0011Q00002NR3sPQAT', '0011Q00002NR41XQAT', '0011Q00002NR41YQAT',
        '0011Q00002NR3lsQAD', '0011Q00002NR3wVQAT', '0013600001YSv1RAAT',
        '0011Q00002NR47lQAD', '0011Q00002NR43eQAD', '0011Q00002NR43MQAT',
        '0011Q00002NR46rQAD', '0011Q00002NR441QAD', '0011Q00002NR3oxQAD',
        '0011Q00002NR3cyQAD', '0013600001YSvLIAA1', '0011Q00002NR46TQAT',
        '0011Q00002NR42qQAD', '0011Q00002NR43WQAT', '0011Q00002NR3oTQAT',
        '0013600001YStvqAAD', '0011Q00002NR42mQAD', '0011Q00002NR405QAD',
        '0011Q00002NR48sQAD', '0011Q00002NR45DQAT', '0011Q00002NR3kKQAT',
        '0011Q00002NR3kzQAD', '0011Q00002NR3rrQAD', '0011Q00002NR46sQAD',
        '0011Q00002NR42BQAT', '0011Q00002NR48UQAT', '0011Q00002NR47OQAT',
        '0013600001YSvESAA1', '0013600001YSvFoAAL', '0011Q00002NR3wWQAT',
        '0013600001YSvBQAA1', '0011Q00002NR3moQAD', '0011Q00002NR41ZQAT',
        '0013600001YSvDCAA1', '0011Q00002NR46fQAD', '0011Q00002NR44NQAT',
        '0011Q00002NR3rsQAD', '0011Q00002NR47kQAD', '0011Q00002NR3k5QAD',
        '0011Q00002NR3cLQAT', '0011Q00002NR42CQAT', '0011Q00002NR48FQAT',
        '0013600001YSvEPAA1', '0011Q00002NR3csQAD', '0013600001YSvJ1AAL',
        '0013600001YSvEQAA1', '0013600001YSvItAAL', '0011Q00002NR3fKQAT',
        '0011Q00002NR3ufQAD', '0011Q00002NR3izQAD', '0011Q00002NR3ypQAD',
        '0011Q00002NR3rtQAD', '0011Q00002NR432QAD', '0011Q00002NR43gQAD',
        '0011Q00002NR45KQAT', '0011Q00002NR46OQAT', '0011Q00002NR3mJQAT',
        '0011Q00002NR44mQAD', '0011Q00002NR439QAD', '0011Q00002NR44YQAT',
        '0011Q00002NR48IQAT', '0011Q00002NR44yQAD', '0011Q00002NR41aQAD',
        '0011Q00002NR44EQAT', '0011Q00002NR3k6QAD', '0011Q00002NR3mhQAD',
        '0011Q00002NR42uQAD', '0011Q00002NR42EQAT', '0011Q00002NR3xxQAD',
        '0011Q00002NR3srQAD', '0011Q00002NR43XQAT', '0011Q00002NR456QAD',
        '0011Q00002NR3xyQAD', '0011Q00002NR42aQAD', '0011Q00002NR43iQAD',
        '0011Q00002NR43jQAD', '0011Q00002NR486QAD', '0011Q00002NR3xzQAD',
        '0011Q00002NR47gQAD', '0011Q00002NR43DQAT', '0011Q00002NR45MQAT',
        '0011Q00002NR401QAD', '0011Q00002NR3ruQAD', '0011Q00002NR42hQAD',
        '0011Q00002NR3xbQAD', '0011Q00002NR43OQAT', '0011Q00002NR3w4QAD',
        '0013600001YT3wEAAT', '0011Q00002NR479QAD', '0011Q00002NR45rQAD',
        '0011Q00002NR3dxQAD', '0011Q00002NR41mQAD', '0013600001YSvHsAAL',
        '0011Q00002NR419QAD', '0011Q00002NR46uQAD', '0011Q00002NR3slQAD',
        '0011Q00002NR42bQAD', '0011Q00002NR44kQAD', '0011Q00002NR488QAD',
        '0011Q00002NR3hDQAT', '0011Q00002NR3y0QAD', '0013600001YSvLBAA1',
        '0011Q00002NR3rvQAD', '0011Q00002NR46KQAT', '0011Q00002NR40WQAT',
        '0011Q00002NR3y1QAD', '0011Q00002NR3oDQAT', '0011Q00002NR46aQAD',
        '0011Q00002NR3n7QAD', '0011Q00002NR3bwQAD', '0011Q00002NR43jQAD',
        '0011Q00002NR3zlQAD', '0011Q00002NR3yqQAD', '0011Q00002NR3vhQAD',
        '0011Q00002NR40tQAD', '0011Q00002NR42GQAT', '0011Q00002NR472QAD',
        '0011Q00002NR44ZQAT', '0011Q00002NR42rQAD', '0011Q00002NR41LQAT',
        '0011Q00002NR3kUQAT', '0013600001YSvSiAAL', '0011Q00002NR43lQAD',
        '0011Q00002NR42HQAT', '0011Q00002NR3y2QAD', '0011Q00002NR42IQAT',
        '0011Q00002NR408QAD', '0011Q00002NR3w6QAD', '0011Q00002NR42sQAD',
        '0011Q00002NR3tkQAD', '0011Q00002NR45SQAT', '0011Q00002NR41RQAT',
        '0011Q00002NR46bQAD', '0011Q00002NR3eYQAT', '0011Q00002NR3c8QAD',
        '0013600001YSvCzAAL', '0011Q00002NR48WQAT', '0011Q00002NR46RQAT',
        '0011Q00002NR42JQAT', '0011Q00002NR41SQAT', '0011Q00002NR3yNQAT',
        '0013600001YSvIIAA1', '0011Q00002NR3hOQAT', '0011Q00002NR3eZQAT',
        '0011Q00002NR45XQAT', '0011Q00002NR3vcQAD', '0011Q00002NR3zZQAT',
        '0011Q00002NR48RQAT', '0011Q00002NR468QAD', '0011Q00002NR43mQAD',
        '0011Q00002NR45tQAD', '0011Q00002NR3iDQAT', '0011Q00002NR44jQAD',
        '0011Q00002NR44aQAD', '0011Q00002NR474QAD', '0011Q00002NR44bQAD',
        '0011Q00002NR44oQAD', '0011Q00002NR3yJQAT', '0011Q00002NR40xQAD',
        '0011Q00002NR3eaQAD', '0011Q00002NR41GQAT', '0011Q00002NR433QAD',
        '0011Q00002NR3m9QAD', '0011Q00002NR3z1QAD', '0011Q00002NR450QAD',
        '0011Q00002NR3wFQAT', '0011Q00002NR45YQAT', '0011Q00002NR3hEQAT',
        '0011Q00002NR46PQAT', '0011Q00002NR48hQAD', '0011Q00002NR3rwQAD',
        '0011Q00002NR3iiQAD', '0011Q00002JMyM4QAL', '0011Q00002NR46hQAD',
        '0011Q00002NR47UQAT', '0011Q00002NR44FQAT', '0011Q00002NR3ogQAD',
        '0013600001YSvLIAA1', '0011Q00002JMyQVQA1', '0011Q00002NR45ZQAT',
        '0013600001juDRFAA2', '0011Q00002NR443QAD', '0011Q00002NR3wsQAD',
        '0011Q00002NR3y3QAD', '0011Q00002NR3dOQAT', '0011Q00002NR3hFQAT',
        '0011Q00002NR47AQAT', '0011Q00002NR3xXQAT', '0011Q00002NR46vQAD',
        '0011Q00002NR3wXQAT', '0011Q00002NR40KQAT', '0011Q00002NR3cWQAT',
        '0011Q00002NR489QAD', '0011Q00002NR45vQAD', '0011Q00002NR46wQAD',
        '0011Q00002NR44qQAD', '0011Q00002NR469QAD', '0011Q00002NR458QAD',
        '0011Q00002NR41MQAT', '0011Q00002NR46iQAD', '0011Q00002NR40MQAT',
        '0011Q00002NR47CQAT', '0011Q00002NR3luQAD', '0011Q00002NR42dQAD',
        '0011Q00002NR44cQAD', '0011Q00002NR426QAD', '0011Q00002NR43BQAT',
        '0011Q00002NR3fMQAT', '0011Q00002NR3v9QAD', '0011Q00002NR459QAD',
        '0011Q00002NR3dPQAT', '0011Q00002NR46xQAD', '0011Q00002NR3zaQAD',
        '0011Q00002NR444QAD', '0011Q00002NR47SQAT', '0011Q00002NR48JQAT',
        '0011Q00002NR3v1QAD', '0013600001YSvHoAAL', '0011Q00002NR473QAD',
        '0011Q00002NR40uQAD', '0011Q00002NR46IQAT', '0011Q00002NR3vWQAT',
        '0011Q00002NR3ewQAD', '0011Q00002NR42LQAT', '0011Q00002NR41bQAD',
        '0011Q00002NR3ozQAD', '0011Q00002NR3v2QAD', '0011Q00002NR3wtQAD',
        '0011Q00002NR46EQAT', '0011Q00002NR3cnQAD', '0011Q00002NR43PQAT',
        '0011Q00002NR46kQAD', '0011Q00002NR44dQAD', '0011Q00002NR42MQAT',
        '0011Q00002NR43qQAD', '0011Q00002NR45AQAT', '0011Q00002NR445QAD',
        '0011Q00002NR43vQAD', '0011Q00002NR48fQAD', '0011Q00002NR43YQAT',
        '0011Q00002NR3yOQAT', '0011Q00002NR45aQAD', '0011Q00002NR3mAQAT',
        '0011Q00002NR45bQAD', '0011Q00002NR3mBQAT', '0011Q00002NR47EQAT',
        '0011Q00002NR3jcQAD', '0011Q00002NR435QAD', '0011Q00002NR3nZQAT',
        '0011Q00002NR48qQAD', '0011Q00002NR45dQAD', '0011Q00002NR3hsQAD',
        '0011Q00002NR48gQAD', '0011Q00002NR3s0QAD', '0011Q00002NR40iQAD',
        '0011Q00002NR47VQAT', '0011Q00002NR46QQAT', '0011Q00002NR47IQAT',
        '0011Q00002NR45eQAD', '0011Q00002NR3naQAD', '0011Q00002NR3t9QAD',
        '0011Q00002NR3ysQAD', '0011Q00002NR41HQAT', '0011Q00002NR3y5QAD',
        '0011Q00002NR3x3QAD', '0011Q00002NR48DQAT', '0011Q00002NR42OQAT',
        '0011Q00002NR44BQAT', '0011Q00002NR3pjQAD', '0011Q00002NR3ihQAD',
        '0011Q00002NR47PQAT', '0011Q00002NR3ugQAD', '0011Q00002NR3ykQAD',
        '0011Q00002NR46FQAT', '0011Q00002NR45PQAT', '0011Q00002NR3p0QAD',
        '0011Q00002NR3i3QAD', '0011Q00002NR43QQAT', '0011Q00002NR48SQAT',
        '0011Q00002NR3x4QAD', '0011Q00002NR415QAD', '0011Q00002NR41TQAT',
        '0011Q00002NR42iQAD', '0011Q00002NR3p1QAD', '0011Q00002NR3ylQAD',
        '0011Q00002NR42PQAT', '0011Q00002NR44eQAD', '0011Q00002NR45kQAD',
        '0011Q00002NR48TQAT', '0011Q00002NR48HQAT', '0011Q00002NR46yQAD',
        '0011Q00002NR47FQAT', '0011Q00002NR45QQAT', '0011Q00002NR3cMQAT',
        '0011Q00002NR42QQAT', '0011Q00002NR45xQAD', '0011Q00002NR41iQAD',
        '0011Q00002NR3y6QAD', '0011Q00002NR44lQAD', '0011Q00002NR47JQAT',
        '0011Q00002NR47GQAT', '0011Q00002NR4G8QAL', '0011Q00002BvTViQAN',
        '0011Q00002NR45lQAD', '0011Q00002MLNs6QAH', '0011Q00002NR3dbQAD',
        '0011Q00002NR43nQAD', '0011Q00002NR3gHQAT', '0011Q00002NR47HQAT',
        '0011Q00002NR3kQQAT', '0011Q00002NR46zQAD', '0011Q00002JMyN8QAL',
        '0011Q00002NR3cuQAD', '0011Q00002NR3tRQAT', '0011Q00002NR3iEQAT',
        '0011Q00002NR462QAD', '0011Q00002NR3s1QAD', '0013600001baFYcAAM',
        '0011Q00002NR43aQAD', '0011Q00002NR3s2QAD', '0011Q00002NR47iQAD',
        '0011Q00002QhgOIQAZ', '0011Q00002NR3pXQAT', '0011Q00002NR3eFQAT',
        '0011Q00002NR40NQAT', '0011Q00002NR3xcQAD', '0011Q00002NR43wQAD',
        '0011Q00002NR42RQAT', '0011Q00002NR3lrQAD', '0011Q00002NR41IQAT',
        '0011Q00002NR3faQAD', '0011Q00002NR3vqQAD', '0011Q00002NR3kVQAT',
        '0011Q00002NR3twQAD', '0011Q00002NR3qEQAT', '0011Q00002NR41JQAT',
        '0011Q00002QhgZJQAZ', '0011Q00002NR41qQAD', '0011Q00002NR3wuQAD',
        '0011Q00002NR3t2QAD', '0013600001YSvECAA1', '0011Q00002NR3ytQAD',
        '0011Q00002NR48XQAT', '0011Q00002NR3v3QAD', '0011Q00002NR3cXQAT',
        '0011Q00002NR42SQAT', '0011Q00002NR42TQAT', '0011Q00002NR44rQAD',
        '0011Q00002NR416QAD', '0011Q00002NR3oUQAT', '0011Q00002NR3uDQAT',
        '0011Q00002NR3s3QAD', '0011Q00002NR47KQAT', '0011Q00002NR44fQAD',
        '0011Q00002NR3xRQAT', '0011Q00002NR3wZQAT', '0011Q00002NR45BQAT',
        '0011Q00002NR3jtQAD', '0011Q00002NR48VQAT', '0011Q00002NR43bQAD',
        '0011Q00002NR44LQAT', '0011Q00002NR48tQAD', '0011Q00002NR40OQAT',
        '0011Q00002NR42nQAD', '0011Q00002NR42UQAT', '0011Q00002NR42XQAT',
        '0011Q00002NR48nQAD', '0011Q00002NR47LQAT', '0011Q00002NR43pQAD',
        '0011Q00002NR48vQAD', '0011Q00002NR477QAD', '0011Q00002NR44PQAT',
        '0011Q00002NR3v4QAD', '0011Q00002NR46YQAT', '0011Q00002NR42wQAD',
        '0011Q00002NR3gIQAT', '0011Q00002NR3gTQAT', '0011Q00002NR40jQAD',
        '0011Q00002NR3wGQAT', '0011Q00002NR46AQAT', '0011Q00002NR44gQAD',
        '0011Q00002NR451QAD', '0011Q00002NR47NQAT', '0011Q00002NR44CQAT',
        '0011Q00002NR40kQAD', '0011Q00002NR3iWQAT', '0011Q00002QhgXSQAZ',
        '0011Q00002NR48AQAT', '0011Q00002NR42jQAD', '0011Q00002NR48BQAT',
        '0011Q00002NR41nQAD', '0011Q00002NR48PQAT', '0011Q00002NR48aQAD',
        '0011Q00002NR3ukQAD', '0011Q00002NR48uQAD', '0011Q00002NR3bjQAD',
        '0011Q00002NR46BQAT', '0011Q00002NR3u4QAD', '0011Q00002NR41jQAD',
        '0011Q00002NR40lQAD', '0011Q00002NR3o6QAD', '0011Q00002NR40AQAT',
        '0011Q00002NR3miQAD', '0011Q00002NR45cQAD', '0011Q00002NR47jQAD',
        '0011Q000028NZEAQA4', '0011Q00002NR3w8QAD', '0011Q00002NR3psQAD',
        '0011Q00002NR417QAD', '0011Q00002NR3s4QAD', '0011Q00002NR48kQAD',
        '0011Q00002NR3dyQAD', '0011Q00002NR3hGQAT', '0011Q00002NR41kQAD',
        '0011Q00002NR40PQAT', '0011Q00002NR47WQAT', '0011Q00002NR48EQAT',
        '0011Q00002NR44sQAD', '0011Q00002NR44IQAT', '0011Q00002NR3wvQAD',
        '0011Q00002NR48CQAT', '0011Q00002NR43cQAD', '0011Q00002NR46GQAT',
        '0011Q00002NR3dQQAT', '0011Q00002NR3hYQAT', '0011Q00002NR3hHQAT',
        '0011Q00002NR3fNQAT', '0011Q00002NR3l2QAD', '0011Q00002NR3xEQAT',
        '0011Q00002NR3vXQAT', '0011Q00002NR3lNQAT', '0011Q00002NR3ceQAD',
        '0011Q00002NR3s5QAD', '0011Q00002NR3s6QAD', '0011Q00002NR3nbQAD',
        '0011Q00002Qf05SQAR', '0011Q00002Qh2fLQAR', '0011Q00002QhgX9QAJ',
        '0011Q00002QhgOHQAZ', '0013600001YSv27AAD', '0013600001YT3qYAAT',
        '0011Q00002NR3fpQAD', '0011Q00002NR3tLQAT', '0013600001YSuotAAD',
        '0011Q00002NR3hIQAT', '0013600001YSvHgAAL', '0013600001YSvGSAA1',
        '0011Q00002NR3eLQAT', '0011Q00002NR3cxQAD', '0011Q00002NR3fqQAD',
        '0011Q00002NR41FQAT', '0011Q00002NR48KQAT', '0011Q00002NR3k4QAD',
        '0011Q00002NR3fJQAT', '0011Q00002NR48cQAD', '0011Q00002NR43VQAT',
        '0013600001YSvBPAA1', '0011Q00002NR48MQAT', '0011Q00002NR3vbQAD',
        '0011Q00002NR471QAD', '0011Q00002NR3gXQAT', '0011Q00002NR3zXQAT',
        '0011Q00002NR414QAD', '0011Q00002NR47QQAT', '0011Q00002NR3sqQAD',
        '0011Q00002NR47RQAT', '0011Q00002NR45WQAT', '0011Q00002NR3yDQAT',
        '0011Q00002NR442QAD', '0011Q00002NR487QAD', '0011Q00002NR3x2QAD',
        '0011Q00002NR43AQAT', '0011Q00002NR3w3QAD', '0011Q00002NR465QAD',
        '0011Q00002NR3vFQAT', '0011Q00002NR43kQAD', '0011Q00002NR407QAD',
        '0011Q00002NR48rQAD', '0011Q00002NR45TQAT', '0011Q00002NR48mQAD',
        '0011Q00002NR43uQAD', '0011Q00002NR3ltQAD', '0011Q00002NR48NQAT',
        '0011Q00002NR3v0QAD', '0011Q00002NR473QAD', '0011Q00002NR475QAD',
        '0011Q00002NR3fLQAT', '0011Q00002NR3yrQAD', '0011Q00002NR3hiQAD',
        '0011Q00002QhgXSQAZ', '0011Q00002NR3tOQAT', '0011Q00002NR3jbQAD',
        '0011Q00002NR45uQAD', '0011Q00002NR3tPQAT', '0013600001YSvKtAAL',
        '0011Q00002NR42KQAT', '0011Q00002NR3wYQAT', '0011Q00002NR40LQAT',
        '0011Q00002NR45UQAT', '0011Q00002NR46jQAD', '0011Q00002NR47DQAT',
        '0011Q00002NR3jAQAT', '0011Q00002NR3ukQAD', '0011Q00002NR42NQAT',
        '0011Q00002NR3wMQAT', '0011Q00002NR3vGQAT', '0011Q00002NR3yEQAT',
        '0011Q00002NR48ZQAT', '0011Q00002NR43RQAT'
    ]

    ids_text = "'" + "','".join(venues_to_update_primary_office) + "'"

    staged = sf.fupdate(f"""
    UPDATE Account
    SET PrimaryOffice__c = '0011Q00002NPfYwQAL'
    WHERE (PrimaryOffice__r.Name = 'Toronto' OR PrimaryOffice__r.Name = 'Vancouver')
    --WHERE (PrimaryOffice__r.Name != 'Toronto' AND PrimaryOffice__r.Name != 'Vancouver')
    AND PrimaryOffice__r.Name != 'Evenko'
    AND Id IN ({ids_text})
    """,
                        show_columns=['Name', 'PrimaryOffice__r.Name'])

    not_to_update = sf.fupdate(f"""
    UPDATE Account
    SET PrimaryOffice__c = '0011Q00002NPfYwQAL'
    --WHERE (PrimaryOffice__r.Name = 'Toronto' OR PrimaryOffice__r.Name = 'Vancouver')
    WHERE (PrimaryOffice__r.Name != 'Toronto' AND PrimaryOffice__r.Name != 'Vancouver')
    AND PrimaryOffice__r.Name != 'Evenko'
    AND Id IN ({ids_text})
    """,
                               show_columns=['Name', 'PrimaryOffice__r.Name'])

    pdh.to_excel({
        'To Update': staged,
        'NOT UPDATING': not_to_update
    }, 'Staged Evenko venue updates')

    if prompt('Update?', boolean=True):
        sf.update(staged, batch_size=10)

    return
def main():
    re_run = prompt('Re-run On-Sale?', boolean=True)
    costing_data_by_tour = threading.new(get_costing_data_by_tour, re_run)
    offers_to_migrate = sql.query("""
    SELECT CAST(OfferId AS VARCHAR) AS EOSId__c, ArtistName, OfferStatusName, Company, OracleCode, PromoterName, USVenueType, (SELECT CreatedDateTime FROM Offer WHERE Id = OfferId) AS OfferCreatedDate
        , FORMAT(MIN(ShowDate), 'yyyy-MM-dd') AS FirstDate
        , FORMAT(MAX(ShowDate), 'yyyy-MM-dd') AS LastDate
        , COUNT(DISTINCT ShowId) AS ShowCount
        , COUNT(DISTINCT (CASE WHEN PostponedDateTBC = 1 THEN ShowId ELSE NULL END)) AS PostponedDateTBCShows
    FROM vwEOSData
    WHERE (ShowDate>=GetDate() OR PostponedDateTBC=1)
    AND CountryId = 1
    AND OfferStatusName IN ('Confirmed','On Sale','Settled','Draft')
    AND PromoterId NOT IN (1286)
    GROUP BY OfferId, ArtistName, OfferStatusName, CountryName, Company, OracleCode, PromoterName, USVenueType
    ORDER BY MAX(ShowDate) ASC
    """)
    offer_ids = [item['EOSId__c'] for item in offers_to_migrate]
    shows_to_migrate = uk.query_Event__c(sql, offer_ids)
    shows_by_offer_id = pdh.groupby_unsorted(shows_to_migrate,
                                             lambda x: x['TourEOSId'])
    migrated_offers = lne.select("""
    SELECT Id, Status__c, EOSId__c, OracleProjectCode__c, TourName__c, ShowCount__c, CreatedBy.Name
    FROM Tour__c
    WHERE EOSId__c <> NULL
    """,
                                 return_type='flat')
    migrated_offers_uat = uat.select("""
    SELECT Id, Status__c, EOSId__c, OracleProjectCode__c, TourName__c, ShowCount__c, CreatedBy.Name
    FROM Tour__c
    WHERE EOSId__c <> NULL
    """,
                                     return_type='flat')

    migrated_offer_ids = {item['EOSId__c'] for item in migrated_offers}
    migrated_offer_ids_uat = {item['EOSId__c'] for item in migrated_offers_uat}

    remaining_to_migrate = [
        item for item in offers_to_migrate
        if item['EOSId__c'] not in migrated_offer_ids
    ]
    offer_ids_remaining_to_migrate = [
        item['EOSId__c'] for item in remaining_to_migrate
    ]
    analysis_missing_eos_ids_by_tour = threading.new(
        uk.analysis_missing_eos_ids_by_tour, sql, sf,
        offer_ids_remaining_to_migrate)

    costing_show_filenames = set(os.listdir(loc.uk_onsale_data_cache_path))
    for item in migrated_offers + remaining_to_migrate:
        shows = shows_by_offer_id.get(item['EOSId__c'], [])
        shows_with_costing = [
            item for item in shows
            if f"{item['EOSId__c']}.pickle" in costing_show_filenames
        ]
        item['Migrated in UAT?'] = item['EOSId__c'] in migrated_offer_ids_uat
        item['# EOS EVT (w cancelled)'] = len(shows)
        item['# Shows w Excel Costing'] = len(shows_with_costing)

    for item in remaining_to_migrate:
        item['Missing Master Data'] = ', '.join(
            analysis_missing_eos_ids_by_tour.get(item['EOSId__c'], []))

    def get_multiple_file_issues(df):
        if len(df) == 0:
            return []
        groupby = df.groupby(['EventEOSId', 'Source_File'])
        df2 = pd.DataFrame(groupby.groups.keys())
        duplicated = df2.duplicated(subset=[0], keep=False)
        df3 = df2[duplicated]
        chunks = []
        if len(df3) > 0:
            groupby2 = df3.groupby(0)
            for eosid in groupby2.groups:
                files = df2.groupby(0).get_group(eosid)[1].tolist()
                chunks.append(f'Show {eosid} in {", ".join(files)}')
        return chunks

    for item in remaining_to_migrate:
        costing = costing_data_by_tour.get(item['EOSId__c'], None)
        if costing:
            tour = costing.Tour__c.iloc[0].to_dict()
            lebs = costing.LedgerEntryBreakout__c
            les = costing.LedgerEntry__c
            ts = costing.TicketScale__c
            item['Err_HasNegativeLEBs'] = True if (
                lebs.OfferRate__c < 0).any() == True else None
            # item['Err_TourHasNoShows'] = item['EOSId__c'] not in file_events_by_tour.groups
            item['Err_OracleCodeMismatch'] = (
                None if
                (tour['OracleProjectCode__c'] == item['OracleCode']) else
                f"{item['OracleCode']} in EOS, {tour['OracleProjectCode__c']} in file"
            )
            item['Err_DupePriceLevels'] = True if ts.duplicated(
                subset=['SourceSystemId__c']).any() == True else None
            item['Err_MultipleFileSources'] = ' | '.join(
                get_multiple_file_issues(les) +
                get_multiple_file_issues(lebs) + get_multiple_file_issues(ts))
    remaining_to_migrate = pd.DataFrame(remaining_to_migrate)
    for col in remaining_to_migrate.columns.values:
        vals = remaining_to_migrate[col]
        if col.startswith('Err') and ((vals.isna()) | (vals == '')).all():
            del remaining_to_migrate[col]

    file_name = 'UK On-Sale Migration Status ' + datetime.now().strftime(
        '%Y-%m-%d') + '.xlsx'
    file_name_2 = loc.uk_onsale_migration_status + 'UK On-Sale Migration Status ' + datetime.now(
    ).strftime('%Y-%m-%d %H.%M') + '.xlsx'
    threading.new(
        pdh.to_excel, {
            'Remaining to Migrate': remaining_to_migrate,
            'Migrated Offers': migrated_offers,
        }, file_name)
    threading.new(
        pdh.to_excel, {
            'Remaining to Migrate': remaining_to_migrate,
            'Migrated Offers': migrated_offers,
        }, file_name_2)
    threading.wait()

    return
def main():
    dist_path = 'output/python-salesforce/'
    dist_zip_path = 'output/python-salesforce.zip'
    inclusions = [
        r'example-credentials.json',
        r'.vscode/launch.json',
        r'.vscode/settings.json',
        r'locations.py',
        r'classes/',
        r'functions/',
        r'dataloader.py',
        r'clone_utility.py',
        r'utility_sf_tasks.py',
        r'requirements.txt',
        r'resources/ant/(?:build|fullpullpackage).xml',
        # r'resources/chromedriver'
    ]
    exclusions = [
        r'.*?__pycache__',
        r'.*?.DS_Store',
        r'classes/gui.py',
        r'classes/concept_parsing_sql_command',
        r'classes/salesforce_api_helpers.py',
        r'classes/csv_file.py',
        r'classes/formula_parser.py',
        r'classes/table_file.py',
    ]

    inclusions = ['^' + s for s in inclusions]
    exclusions = ['^' + s for s in exclusions]

    all_files = [
        f[f.find('python-salesforce') + len('python-salesforce') + 1:]
        for f in dir.get_files_list('./', True)
    ]
    filtered_files = []
    for f in all_files:
        matches_a_path = any([re.match(reg, f) != None for reg in inclusions])
        matches_no_exclusion = all(
            [re.match(reg, f) == None for reg in exclusions])
        if matches_a_path and matches_no_exclusion:
            filtered_files.append(f)
    files = [(f, os.path.join(dist_path, f)) for f in filtered_files]

    dir.del_dir(dist_path)
    for src, tar in files:
        s = dir.read_file(src)
        if src.endswith('.py'):
            s = s.replace('*****@*****.**', new_user_email)
        if src.endswith('.py'):
            s = s.replace('daniel.hicks_1', new_user_folder_name)
        if 'example-credentials' in src:
            tar = tar.replace('example-credentials', 'credentials')
        dir.create_file(tar, s)

    prompt(
        f'\nFiles have been written to {dist_path}. When ready, press Enter to zip the folder'
    )

    # writing files to a zipfile
    with zipfile.ZipFile(dist_zip_path, 'w') as zip:
        files_to_zip = dir.listdir(dist_path, recursive=True)
        for f in files_to_zip:
            zip.write(f, f.replace('output/python-salesforce/', ''))
    dir.del_dir(dist_path)
    print(f'Zip file created:\n{os.path.abspath(dist_zip_path)}')
    return
def migrate_uk_data(filepaths, multi=False):
    issues = []
    filedata = get_file_data(filepaths, multi)
    offer_ids_list = filedata.Tour__c1.EOSId__c.tolist()
    data = ObjDict(get_eos_data(offer_ids_list))

    duplicate_tourids = filedata.Tour__c1[filedata.Tour__c1.duplicated('EOSId__c', False)][['EOSId__c', 'Source_File']]
    duplicate_tourids['Issue'] = 'Tour EOS Id in >1 Source File'
    issues.append(duplicate_tourids)
    filedata.Tour__c1.drop_duplicates(subset='EOSId__c', inplace=True)

    data.update(filedata)

    data.Tour__c = data.Tour__c.merge(data.Tour__c1, on=['EOSId__c'], how='outer', suffixes=['_EOS',None], indicator=True)
    data.Tour__c['RecordTypeId'] = data.Tour__c['RecordType.Name'].apply(lambda x: session.record_type_id_mapper()('Tour__c', x).Id)
    data.TourDeal__c = (
        data.TourDeal__c.merge(data.TourDeal__c1, on=['SourceSystemId__c'], how='outer', suffixes=['_EOS',None], indicator=True)
        .query("_merge != 'left_only'")
        .drop(columns='_merge')
    )
    data.TourLeg__c = (
        data.TourLeg__c.merge(data.TourLeg__c1, on=['SourceSystemId__c'], how='outer', suffixes=['_EOS',None], indicator=True)
        .query("_merge != 'left_only'")
        .drop(columns='_merge')
    )
    data.Event__c = data.Event__c.merge(data.Event__c1, on='EOSId__c', suffixes=['_EOS',None], indicator=True)
    data.Deal__c = data.Deal__c.merge(data.Deal__c1, on='Event__r.EOSId__c', suffixes=['_EOS',None], indicator=True)
    
    del data['Tour__c1']
    del data['TourDeal__c1']
    del data['TourLeg__c1']
    del data['Event__c1']
    del data['Deal__c1']

    set_computed_fields(data)
    data.Issues = pd.concat(data.Issues + issues + migration_data_issues(data))

    pdh.to_excel(data, f'{folder}OUTPUT - UK Data to Migrate.xlsx')

    # Questions:
    # Add Deductions?

    if True or prompt('Ready to migrate data to Rome?', boolean=True):
        session.add_bypass_settings()
        results = {}
        def upsert(func, *args, **kwargs):
            results[args[0]] = func(*args, **kwargs)
        upsert(session.upsert, 'Tour__c', data.Tour__c, 'EOSId__c')
        upsert(session.upsert, 'TourDeal__c', data.TourDeal__c, 'SourceSystemId__c')
        upsert(session.upsert, 'TourLeg__c', data.TourLeg__c, 'SourceSystemId__c')
        upsert(session.upsert, 'TourOnSale__c', data.TourOnSale__c, 'SourceSystemId__c')
        event_results = session.create_events(data, upsert_field_names={
            'Event__c': 'EOSId__c'
            , 'EventDateTime__c': 'SourceSystemId__c'
            , 'Deal__c': 'SourceSystemId__c'
            , 'TicketScale__c': 'EOSId__c'
            # , 'Deduction__c': 'SourceSystemId__c'
            , 'LedgerEntry__c': 'SourceSystemId__c'
            , 'LedgerEntryBreakout__c': 'SourceSystemId__c'
        })
        upsert(session.upsert, 'EventOnSale__c', data.EventOnSale__c, 'SourceSystemId__c')
        session.remove_bypass_settings()

        success_results = {obj: result.success for obj, result in results.items()}
        success_results.update(event_results.success)
        error_results = {obj: result.errors for obj, result in results.items()}
        error_results.update(event_results.errors)

        pdh.to_excel({f'{key} ({len(val)})':val for key,val in success_results.items()}, f'{folder}OUTPUT - UK Data Migration Results - SUCCESS.xlsx')
        pdh.to_excel({f'{key} ({len(val)})':val for key,val in error_results.items()}, f'{folder}OUTPUT - UK Data Migration Results - ERROR.xlsx')
        print('')
    return