def main():

    psdev = Salesforce_API('*****@*****.**')
    sit = Salesforce_API('*****@*****.**')
    lne = Salesforce_API('*****@*****.**')
    session = lne
    session.save_record_snapshot_on_select = True
    
    tours = session.select("SELECT Id, TourName__c FROM Tour__c WHERE LastModifiedDate >= LAST_WEEK", contentType='JSON')
    tourlegs = session.select("""
        SELECT Id, Tour__c, LegName__c, Order__c, TicketScalePriceLevels__c 
        FROM TourLeg__c 
        WHERE Tour__c IN ('{}')
        ORDER BY Tour__c, Order__c ASC
    """.format("','".join([item.Id for item in tours])), contentType='JSON')
    ticketscales = session.select("""
        SELECT Id, Event__r.TourLeg__r.Tour__r.TourTitle__c, Event__r.TourLeg__r.LegName__c, Event__r.EventName__c, Event__r.TourLeg__c, Event__c, Type__c, Label__c
        FROM TicketScale__c 
        WHERE Event__r.TourLeg__r.Tour__c IN ('{}')
    """.format("','".join([item.Id for item in tours])), contentType='JSON')

    issues = []

    for tourleg in tourlegs:
        tourleg.ticketscales = [item for item in ticketscales if item.Event__r.TourLeg__c == tourleg.Id]
        labels = None if tourleg.TicketScalePriceLevels__c is None else ObjDict.deepclone(json.loads(tourleg.TicketScalePriceLevels__c))
        label_map = {item['type']: item for item in labels} if labels is not None else None
        for ts in tourleg.ticketscales:
            config = label_map[ts.Type__c] if labels is not None and ts.Type__c in label_map else None
            ts.tsOriginalLabel = ts.Label__c
            ts.tourLegLabel = config.label if config is not None else None
            if config is not None and config.type != config.label and ts.Label__c is None:
                issues.append({'ts':ts, 'issue': "Ticket Scale missing label when custom label is set"})
                ts.Label__c = config.label
            elif config is not None and ts.Label__c != config.label and ts.Label__c is not None:
                issues.append({'ts':ts, 'issue': "Ticket Scale label doesn't match Tour Leg Type-Label"})
                ts.Label__c = config.label
            elif config is None and ts.Label__c is not None:
                if ts.Label__c == ts.Type__c:
                    issues.append({'ts':ts, 'issue': "Tour Leg has no Type-Label maps, but Ticket Scale has a value in Label__c that matches Type__c"})
                else:
                    issues.append({'ts':ts, 'issue': "Tour Leg has no Type-Label maps, but Ticket Scale has a custom value in Label__c that is different than Type__c"})
                    # ts.Label__c = None # Need to use simple api
        tourleg.labels = labels
        pass
    print('\n'.join([item['issue'] for item in issues]))
    
    if len(issues) > 0:

        pdh.multiple_df_to_excel({'Sheet1': pd.DataFrame(pdh.flatten(issues))}, 'Data Issues - TS Label__c.xlsx')

        session.add_bypass_settings()
        session.update(ticketscales)
        session.remove_bypass_settings()

    return
Exemple #2
0
def main():

    data = session.select("""
    SELECT Id, ActualGrossToDate__c, ActualSalesToDate__c, ActualSalestoDateFormula__c, AfterAdjustedDeductions__c, AverageLiftPerTicket__c, BeforeAdjustedDeductions__c, Capacity__c, Comps__c, Division__c, EventDateTime__c, EventIDStageTypeDateTime__c, EventIDStageType__c, Event__c, FaceValue__c, FirstDayCount__c, FirstDayGross__c, ForecastGrossToGo__c, ForecastSalesToGo__c, Geography__c, GrossSales__c, Holds__c, Kills__c, LiftAfterTaxes__c, LiftBeforeFee__c, LiftBeforeTaxes__c, MatchesEventStage__c, NeedsNotification__c, NetCapacity__c, Notes__c, OfficeName__c, Opens__c, ParentISOCode__c, PlatinumAfterAdjusted_Deductions__c, PlatinumBeforeAdjustedDeductions__c, PriceLevelID__c, PriceLevel__c, Price__c, PrimaryVenueOffice__c, ProjectedGrossSales__c, ProjectedPaidTicketsAdjustedForZeroPrice__c, ProjectedPaidTickets__c, RecordModificationCheck__c, SecondDayCount__c, SecondDayGross__c, SellableCapacity__c, ShiftDate__c, ShowOnOffer__c, ShowPlatinumOnOffer__c, SoldValidation__c, SourceSystemId__c, StageType__c, TMFeePercent__c, TMFee__c, ThirdDayCount__c, ThirdDayGross__c, TicketCountDate1__c, TicketCountDate2__c, TicketCountDate3__c, TicketCountDate4__c, TicketCountDay1__c, TicketCountDay2__c, TicketCountDay3__c, TicketCountDay4__c, TicketGrossDay1__c, TicketGrossDay2__c, TicketGrossDay3__c, TicketGrossDay4__c, TicketScaleType__c, TicketsSoldLast4Days__c, TotalPlatinumSeatProceeds__c, Type__c, Unsold__c, UsingAudit__c, VarianceGrossSales__c, VariancePaidTickets__c, VenueOwnership__c, fxActualGrossToDate__c, fxAfterAdjustedDeductions__c, fxAverageLiftPerTicket__c, fxBeforeAdjustedDeductions__c, fxFaceValue__c, fxFirstDayGross__c, fxForecastGrossToGo__c, fxGrossSales__c, fxLiftAfterTaxes__c, fxLiftBeforeFee__c, fxLiftBeforeTaxes__c, fxPlatinumAfterAdjusted_Deductions__c, fxPlatinumBeforeAdjustedDeductions__c, fxPrice__c, fxProjPaidTicketsAdjForZeroPrice__c, fxProjectedGrossSales__c, fxSecondDayGross__c, fxTMFee__c, fxThirdDayGross__c, fxTicketGrossDay1__c, fxTicketGrossDay2__c, fxTicketGrossDay3__c, fxTicketGrossDay4__c, fxTotalPlatinumSeatProceeds__c, LastSaveContext__c, Price_Level_Formula__c, TicketPriceLevelforASR__c, fxProjPaidTicketsAdjustedForZeroPrice__c, fxVarianceGrossSales__c, MatchedEventStageOnly__c, fxHolds_Plus_Unsold__c, IsATemplateFormula__c, IsATemplate__c, DataIntegrityCheck__c, NumOfDaysAppliesTo__c, CapacityInputFormula__c, IsTouringApp__c, Label__c, HiddenVenueOffice__c, EOSId__c, LinkedTicketBand__c, NewPrice__c, TicketBandType__c, TicketHoldType__c, ForecastedTicketsPercentage__c FROM TicketScale__c
    WHERE StageType__c IN ('Settlement','Audit')
    AND Event__r.Status__c = 'Flash Complete'
    AND Event__r.EventFirstDate__c > 2019-06-01
    AND Event__r.EventFirstDate__c < 2019-07-01
    ORDER BY Event__r.EventFirstDate__c DESC, Event__c
    """)

    import functions.pandas_helpers as pdh
    import pandas as pd
    pdh.multiple_df_to_excel({'d': pd.DataFrame(data)},
                             'Sample Settlement Data.xlsx')

    # session.add_bypass_settings(pbflow=False,automation=False)
    # session.remove_bypass_settings()
    return
Exemple #3
0
def main():
    session = Salesforce_API(username)
    queries = {
        'Venues':
        """
            SELECT Id, VenueName__c, BillingCity, BillingState, BillingCountry
            FROM Account
            WHERE RecordType.Name = 'Venue'
            AND Status__c = 'Active'
            AND BillingCountry IN ('United States','Canada')
            ORDER BY BillingCountry DESC, BillingState, BillingCity, VenueName__c
        """,
        'Artists':
        """
            SELECT Id, Name
            FROM Account
            WHERE RecordType.Name = 'Artist'
            AND Status__c = 'Active'
            ORDER BY Name
        """,
        'Co-Promoters':
        """
            SELECT Id, Name
            FROM Account
            WHERE RecordType.DeveloperName = 'CoPromoter'
            AND Status__c = 'Active'
            ORDER BY Name
        """,
        'Record Types':
        """
            SELECT Id, SobjectType, Name
            FROM RecordType
            WHERE SobjectType IN('Account','Event__c','LedgerEntry__c')
            ORDER BY SobjectType, Name
        """
    }
    datasets = {key: session.select(val) for key, val in queries.items()}
    pdh.multiple_df_to_excel(datasets, 'ROME Master Data.xlsx')
def main():
    def get_global_value_sets(session):
        globalvaluesets = session.tooling_query("SELECT DeveloperName FROM GlobalValueSet")
        allpicklistoptions = []

        for item in globalvaluesets:
            result = session.tooling_query("SELECT Metadata FROM GlobalValueSet WHERE DeveloperName = '{}'".format(item.DeveloperName))[0]
            metadata = result.Metadata
            item.Metadata = metadata
            item.picklistoptions = metadata.customValue
            for p in item.picklistoptions:
                p.GlobalValueSetName = item.DeveloperName
            allpicklistoptions.extend(metadata.customValue)

        picklistoptions_df = pd.DataFrame(allpicklistoptions)
        filtered_df = picklistoptions_df[['GlobalValueSetName', 'label', 'valueName', 'default', 'description', 'isActive']]
        return filtered_df

    def compare(master_df, other_df, key_fields):
        import numpy as np
        shared_fields = [f for f in master_df.columns.values if f in other_df.columns.values if f not in key_fields]
        master_df['exists'] = True
        other_df['exists'] = True
        df = pd.merge(master_df, other_df,  how='outer', left_on=key_fields, right_on=key_fields)

        def calc_change(row):
            if row['exists_x'] is True and pd.isnull(row['exists_y']):
                return "NEW"
            if row['exists_y'] is True and pd.isnull(row['exists_x']):
                return "DELETED"
            changed_fields = []
            for f in shared_fields:
                if row[f + '_x'] != row[f + '_y']:
                    # changed_fields.append('{} to {}'.format(f, row[f + '_x']))
                    changed_fields.append(f)
            if len(changed_fields) > 0:
                return "Changed fields: {}".format(', '.join(changed_fields))
            return ""
        df['Change'] = df.apply(calc_change, axis=1)
        df = df[df['Change'] != ""]
        # df = df[df['GlobalValueSetName'] == 'VenueSetup']
        return df

    dev2_session = Salesforce_API('[email protected]')
    qa2_session = Salesforce_API('[email protected]')
    uat_session = Salesforce_API('*****@*****.**')
    prod_session = Salesforce_API('*****@*****.**')

    dev2_records = get_global_value_sets(dev2_session)
    qa2_records = get_global_value_sets(qa2_session)
    uat_records = get_global_value_sets(uat_session)
    prod_records = get_global_value_sets(prod_session)

    comparisons = {
          "DEV2 to PROD": compare(dev2_records, prod_records, ['GlobalValueSetName', 'valueName'])
        , "DEV2 to QA2": compare(dev2_records, qa2_records, ['GlobalValueSetName', 'valueName'])
        , "DEV2 to UAT": compare(dev2_records, uat_records, ['GlobalValueSetName', 'valueName'])
    }
    
    pdh.multiple_df_to_excel(comparisons, "Global Picklist Changes.xlsx")


    metadata_entities = [
        'EntityDefinition'
        , 'CustomObject'
        # , 'EntityLimit'
        , 'Flow'
        , 'GlobalValueSet'
        # , 'StandardValueSet'
        , 'MatchingRule'
        , 'Profile'

    ]
    
    metadata_fields = session.tooling_query("""
    SELECT EntityDefinition.QualifiedApiName, DurableId, QualifiedApiName, EntityDefinitionId, NamespacePrefix, DeveloperName, MasterLabel, Label, Length, DataType, ValueTypeId, ExtraTypeInfo, IsCalculated, IsHighScaleNumber, IsHtmlFormatted, IsNameField, IsNillable, IsWorkflowFilterable, IsCompactLayoutable, Precision, Scale, IsFieldHistoryTracked, IsIndexed, IsApiFilterable, IsApiSortable, IsListFilterable, IsListSortable, IsApiGroupable, IsListVisible, ControllingFieldDefinitionId, LastModifiedDate, LastModifiedById, PublisherId, RunningUserFieldAccessId, RelationshipName, ReferenceTo, ReferenceTargetField, IsCompound, IsSearchPrefilterable, IsPolymorphicForeignKey, BusinessOwnerId, BusinessStatus, SecurityClassification, Description
    FROM FieldDefinition 
    WHERE EntityDefinition.QualifiedApiName IN('{}')
    """.format("','".join(metadata_entities)))
    for item in metadata_fields:
        del item['attributes']
        del item['EntityDefinition']['attributes']
    
    
    
    # objects = []
    # objects.extend(session.tooling_query("SELECT Id, DeveloperName FROM CustomObject"))
    # objects.extend(session.tooling_query("SELECT Id, DeveloperName FROM EntityDefinition"))
    # fields = session.tooling_query("SELECT Id, DeveloperName, ManageableState, NamespacePrefix, TableEnumOrId FROM CustomField")
    # df = pd.DataFrame(fields)
    # object_lookup = {}
    # object_lookup.update({item.DeveloperName: item.Id for item in objects})
    # object_lookup.update({item.Id: item.Id for item in objects})

    # for item in fields:
    #     item.ObjectName = object_lookup[item.TableEnumOrId]

    d = {
        "Metadata Fields": pd.DataFrame(pdh.flatten(metadata_fields))
        # ,"Objects": pd.DataFrame(objects)
        # ,"Fields": pd.DataFrame(fields)
    }

    for m in metadata_entities:
        query = "SELECT {} FROM {}{}".format(
            ','.join([
                f.QualifiedApiName for f in metadata_fields
                if f.EntityDefinition.QualifiedApiName == m
                and f.QualifiedApiName != 'FullName'
                and (f.QualifiedApiName != 'Metadata' or m == 'GlobalValueSet')])
            , m
            , " WHERE DeveloperName = 'BuyingGroup'" if m == 'GlobalValueSet' else ""
        )
        data = session.tooling_query(query)
        for item in data:
            del item['attributes']
        d[m] = pd.DataFrame(data)



    pdh.multiple_df_to_excel(d, pdh.get_download_folder()+"/Fields.xlsx")
    # prod = Salesforce_API('*****@*****.**')
    # sand = session

    # records = prod.select_records("SELECT AccessLevel__c, Role__c, User__c FROM TourTeamMember__c WHERE Tour__c = 'a1s1Q000005Mss1QAC'")

    # for item in records:
    #     item.Tour__c = 'a1sJ0000005BRRFIA4'


    # sand.default_batch_size = 1
    # sand.insert_records('TourTeamMember__c', records)
    return
Exemple #5
0
def diff_instances(metadata, sessions, dataset_functions, multi, master_instance, other_instance):
    # if all([item in datasets[list(datasets)[0]] for item in [master_instance, other_instance]]) is False:
    #     return
    timenow = datetime.datetime.now().strftime("%Y-%m-%d %H.%M")
    output = {}

    diffs_to_run = [
        (func, sessions[master_instance], sessions[other_instance], metadata[master_instance].result(), metadata[other_instance].result())
        for func in dataset_functions
    ]
    # pool = mp.Pool()
    results = mp.starmap(diff_dataset, diffs_to_run, multi)
    # pool.close()
    # pool.join()
    for item in results:
        output.update(item)

    # for name, fields in dataset_key_fields.items():
    #     if name not in datasets:
    #         continue
    #     df1 = datasets[name][master_instance].result()
    #     df2 = datasets[name][other_instance].result()
    #     diff = pdh.diff(df1, df2, fields, suffixes=[f'_{master_instance}', f'_{other_instance}'])
    #     diffs[name] = diff
    #     if 'FileData' in diff:
    #         diff['FileData'] = '(Binary data not shown)'
    #     output['{} ({})'.format(name, len(diff))] = diff
    
    # def add_to_destructive_changes(name, items):
    #     if len(items) == 0:
    #         return ''
    #     output = '        <types>'
    #     for item in items:
    #         output += '\n            <members>{}</members>'.format(item)
    #     output += '\n            <name>{}</name>'.format(name)
    #     output += '\n        </types>'
    #     return output

    # sharing_df = output['Sharing Rules'][output['Sharing Rules']["Diff"] == f'{other_instance}_only']
    # sharing_criteria_rules_to_delete = sharing_df[sharing_df['RuleType'] == 'SharingCriteriaRule']['ObjectName.Name'].tolist()
    # sharing_owner_rules_to_delete = sharing_df[sharing_df['RuleType'] == 'SharingOwnerRule']['ObjectName.Name'].tolist()
    # sharing_criteria_rules_to_delete.sort()
    # sharing_owner_rules_to_delete.sort()

    # custom_metadata_df = output['Custom Metadata'][output['Custom Metadata']["Diff"] == f'{other_instance}_only']
    # custom_metadata_to_delete = custom_metadata_df['ObjectName.Name'].tolist()
    # custom_metadata_to_delete.sort()

    sharing_criteria_rules_to_delete = output['Sharing Rules'].query(f"RuleType == 'SharingCriteriaRule' and Diff == '{other_instance}_only'")['ObjectName.Name'].tolist()
    sharing_owner_rules_to_delete    = output['Sharing Rules'].query(f"RuleType == 'SharingOwnerRule'    and Diff == '{other_instance}_only'")['ObjectName.Name'].tolist()
    custom_metadata_to_delete = sorted(output['Custom Metadata'].query(f"Diff == '{other_instance}_only'")['ObjectName.Name'].tolist())

    destructive_changes = AntMigrationTool.build_destructive_changes_xml({
        'SharingCriteriaRule': sharing_criteria_rules_to_delete
        , 'SharingOwnerRule': sharing_owner_rules_to_delete
        , 'CustomMetadata': custom_metadata_to_delete
    })

    diffcount = sum([len(diff) for diff in output.values()])

    file_name = "{} SF DIFF ({}) - {} to {}.xlsx".format(timenow, diffcount, master_instance.upper(), other_instance.upper())
    final_output = {
        "Summary": [
            {"Data Set Name": datasetname, "# of Diffs": len(data)}
            for datasetname, data in output.items()
        ]
    }
    if "<members>" in destructive_changes:
        final_output["destructivechanges.xml"] = destructive_changes.split('\n')
    final_output.update({key[:30]:val for key,val in output.items() if len(val) > 0})

    f = pdh.multiple_df_to_excel(final_output
        , file_name
        , max_col_width=100)
    print(f['location'])
    return
def main():
    session = Salesforce_API(username)
    sql = SQL_Server_API('EOS-prod')
    sql = SQL_Server_API('EOS-stage')
    

    # whereClause = """
    # WHERE o.OracleCode = '1573007431'
    # -- AND o.RomeId IS NULL
    # """

    # whereClause = """
    # WHERE o.Id IN(43748
    #     ,22660
    #     ,49573)
    # """

    artistsSQL = open(sqlFileArtists, 'r', encoding='utf-8-sig').read()
    coPromotersSQL = open(sqlFileCoPromoters, 'r', encoding='utf-8-sig').read()
    venuesSQL = open(sqlFileVenues, 'r', encoding='utf-8-sig').read()
    artistAgentsSQL = open(sqlFileArtistAgents, 'r', encoding='utf-8-sig').read()
    artistAgenciesSQL = open(sqlFileArtistAgencies, 'r', encoding='utf-8-sig').read()
    ticketAgenciesSQL = open(sqlFileTicketAgencies, 'r', encoding='utf-8-sig').read()

    # stageSQL = open(stageSQLFile, 'r', encoding='utf-8-sig').read().replace("QUERY_WHERE_CLAUSE_HERE", whereClause)
    # querySQL = open(querySQLFile, 'r', encoding='utf-8-sig').read()
    # tourOnSalesQuerySQL = open(tourOnSalesQuerySQLFile, 'r', encoding='utf-8-sig').read().replace("QUERY_WHERE_CLAUSE_HERE", whereClause)
    # eventOnSalesQuerySQL = open(eventOnSalesQuerySQLFile, 'r', encoding='utf-8-sig').read().replace("QUERY_WHERE_CLAUSE_HERE", whereClause)
    
    print("EOS queries in progress")
    eosArtists =        sql.query(artistsSQL)
    eosCoPromoters =    sql.query(coPromotersSQL)
    eosVenues =         sql.query(venuesSQL)
    eosArtistAgents =   sql.query(artistAgentsSQL)
    eosArtistAgencies = sql.query(artistAgenciesSQL)
    eosTicketAgencies = sql.query(ticketAgenciesSQL)
    print("EOS queries complete")

    eos_accounts = eosArtists + eosCoPromoters + eosVenues + eosArtistAgencies + eosTicketAgencies
    eos_contacts = eosArtistAgents

    eos_dfs = {
        'Artists': pd.DataFrame(eosArtists)
        ,'Co-Promoters': pd.DataFrame(eosCoPromoters)
        ,'Venues': pd.DataFrame(eosVenues)
        ,'ArtistAgents': pd.DataFrame(eosArtistAgents)
        ,'ArtistAgencies': pd.DataFrame(eosArtistAgencies)
        ,'TicketAgencies': pd.DataFrame(eosTicketAgencies)
    }
    eos_dfs = {'{} ({})'.format(key, len(val)): val for key,val in eos_dfs.items()}
    pdh.multiple_df_to_excel(eos_dfs, '/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/EOS Master Data.xlsx')

    # df_eosArtists =        eosArtists
    # df_eosCoPromoters =    eosCoPromoters
    # df_eosVenues =         eosVenues
    # df_eosArtistAgents =   eosArtistAgents
    # df_eosArtistAgencies = eosArtistAgencies
    # df_eosTicketAgencies = eosTicketAgencies
    # df_eosArtists['RecordType.Name'] = 'Artist'
    # df_eosCoPromoters['RecordType.Name'] = 'Co-Promoter'
    # df_eosVenues['RecordType.Name'] = 'Venue'
    # df_eosArtistAgents['RecordType.Name'] = 'Artist Agent'
    # df_eosArtistAgencies['RecordType.Name'] = 'Agency'
    # df_eosTicketAgencies['RecordType.Name'] = 'Ticket Agency'

    # df_eosaccounts = pd.concat(df_eosArtists, df_eosCoPromoters, df_eosVenues, df_eosArtistAgencies, df_eosTicketAgencies)


    accounts = session.select_records("""SELECT Id, Name, SourceSystemId__c, EOSId__c, RecordType.Name
    FROM Account
    WHERE RecordType.Name IN ('Artist','Co-Promoter','Venue','Agency','Ticket Agency')
    """)
    contacts = session.select_records("""SELECT Id, Name, SourceSystemId__c, EOSId__c
    FROM Contact
    WHERE Account.RecordType.Name = 'Agency'
    OR EOSId__c <> NULL
    """)

    # df_romeaccounts = pd.DataFrame(accounts)

    account_eos_ids = {item['RecordType.Name'] + item["EOSId__c"]: item["Id"] for item in accounts if item["EOSId__c"] != ""}
    contact_eos_ids = {item["EOSId__c"]: item["Id"] for item in contacts if item["EOSId__c"] != ""}

    for item in eos_accounts:
        key = item["RecordType"] + item["EOSId__c"]
        item["RomeId"] = account_eos_ids[key] if key in account_eos_ids else ""
    for item in eos_contacts:
        item["RomeId"] = contact_eos_ids[item["EOSId__c"]] if item["EOSId__c"] in contact_eos_ids else ""

    account_ids_map = {
        item.Id: item
        for item in accounts
    }
    account_names_map = {}
    for item in accounts:
        key = (item['RecordType.Name'] + item["Name"]).lower()
        if key not in account_names_map:
            account_names_map[key] = []
        account_names_map[key].append(item)

    for item in eos_accounts:
        key = (item['RecordType'] + item["Name"]).lower()
        if item['RomeId'] != '':
            item['Note'] = 'Mapped'
            item['Case Mismatch'] = True if account_ids_map[item['RomeId']]['Name'] != item['Name'] else ''
        elif item["RomeId"] == '' and key in account_names_map and len(account_names_map[key]) == 1:
            item['Note'] = '1-1 mapping found'
            item['Case Mismatch'] = True if account_names_map[key][0]['Name'] != item['Name'] else ''
            item['Matched record'] = account_names_map[key][0]['Id']
        elif item["RomeId"] == '' and key in account_names_map and len(account_names_map[key]) > 1:
            item['Note'] = '1-many mapping found'
            item['Matched record'] = ', '.join([item['Name'] for item in account_names_map[key]])
        else:
            item['Note'] = 'No match found'
    
    record_types = set({item['RecordType'] for item in eos_accounts})
    account_dfs = {
        key: pd.DataFrame([
            item for item in eos_accounts 
            if item['RecordType'] == key 
            # and item['Note'] != 'Mapped'
        ])
        for key in record_types
    }
    pdh.multiple_df_to_excel(account_dfs, '/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/EOS Master Data Diff to Rome.xlsx')
    

    print("Push data to {}?".format(session.instance))
    if str(input()).upper() == 'Y':
        session.add_bypass_settings()
        for key, df in account_dfs.items():
            matched = df[df['Note'] == '1-1 mapping found']
            matched['Id'] = matched['Matched record']
            matched = matched[['Id','EOSId__c']].to_dict('records')
            new = df[df['Note'] == 'No match found'].to_dict('records')
            # print("Push {} {} to {}?".format(len(matched + new), key, session.instance))
            # if str(input()).upper() == 'Y':
            session.insert('Account', new)
            session.update(matched)
        session.remove_bypass_settings()




    # session.write_file(sqlFileArtists.replace("EOS Data/EOS", "EOS Data/Data | EOS").replace(".sql",".csv"), eosArtists)
    # session.write_file(sqlFileCoPromoters.replace("EOS Data/EOS", "EOS Data/Data | EOS").replace(".sql",".csv"), eosCoPromoters)
    # session.write_file(sqlFileVenues.replace("EOS Data/EOS", "EOS Data/Data | EOS").replace(".sql",".csv"), eosVenues)
    # session.write_file(sqlFileArtistAgents.replace("EOS Data/EOS", "EOS Data/Data | EOS").replace(".sql",".csv"), eosArtistAgents)
    # session.write_file(sqlFileArtistAgencies.replace("EOS Data/EOS", "EOS Data/Data | EOS").replace(".sql",".csv"), eosArtistAgencies)
    # session.write_file(sqlFileTicketAgencies.replace("EOS Data/EOS", "EOS Data/Data | EOS").replace(".sql",".csv"), eosTicketAgencies)



    for item in contacts:
        name = item["Name"]
        item["RecordType.Name"] = "Contact"
        if name not in account_names_map:
            account_names_map[name] = []
        account_names_map[name].append(item)

    # session.write_file(sqlFileArtists.replace("EOS Data/EOS", "EOS Data/Data Unmapped | EOS").replace(".sql",".csv"), [item for item in eosArtists if item["RomeId"] == ""])
    # session.write_file(sqlFileCoPromoters.replace("EOS Data/EOS", "EOS Data/Data Unmapped | EOS").replace(".sql",".csv"), [item for item in eosCoPromoters if item["RomeId"] == ""])
    # session.write_file(sqlFileVenues.replace("EOS Data/EOS", "EOS Data/Data Unmapped | EOS").replace(".sql",".csv"), [item for item in eosVenues if item["RomeId"] == ""])
    # session.write_file(sqlFileArtistAgencies.replace("EOS Data/EOS", "EOS Data/Data Unmapped | EOS").replace(".sql",".csv"), [item for item in eosArtistAgencies if item["RomeId"] == ""])
    # session.write_file(sqlFileArtistAgents.replace("EOS Data/EOS", "EOS Data/Data Unmapped | EOS").replace(".sql",".csv"), [item for item in eosArtistAgents if item["RomeId"] == ""])
    # session.write_file(sqlFileTicketAgencies.replace("EOS Data/EOS", "EOS Data/Data Unmapped | EOS").replace(".sql",".csv"), [item for item in eosTicketAgencies if item["RomeId"] == ""])


    # ################################################################################################
    # exactMatchArtists = [
    #     {
    #         "Id": account_names_map[item["Name"].lower()][0]["Id"]
    #         , "EOSId__c": item["EOSId__c"]
    #         , "Rome Name": account_names_map[item["Name"].lower()][0]["Name"]
    #         , "EOS Name": item["Name"]
    #         , "Already Mapped": account_names_map[item["Name"].lower()][0]["EOSId__c"] != ""
    #         , "Case Mismatch": item["Name"] != account_names_map[item["Name"].lower()][0]["Name"]
    #     } 
    #     for item in eosArtists
    #     if item["Name"].lower() in account_names_map and len(account_names_map[item["Name"].lower()]) == 1 and account_names_map[item["Name"].lower()][0]["RecordType.Name"] == "Artist"
    # ]

    # session.write_file(sqlFileArtists.replace("EOS Data/EOS", "EOS Data/Data Matches | EOS").replace(".sql",".csv"), exactMatchArtists)

    # # session.add_bypass_settings()
    # # session.update_records([item for item in exactMatchArtists if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # # For first insert attempt, try to insert Active. Then use In Review for the remaining rows which failed due to duplicate checker
    # # for item in eosArtists:
    # #     item["Status__c"] = 'Active'
    # # session.add_bypass_settings()
    # # session.insert_records("Account", [item for item in eosArtists if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # For remaining failure rows, attempt to find other partial matches before doing forced insert

    # ################################################################################################
    # exactMatchCoPromoters = [
    #     {
    #         "Id": account_names_map[item["Name"]][0]["Id"]
    #         , "EOSId__c": item["EOSId__c"]
    #         , "Rome Name": account_names_map[item["Name"]][0]["Name"]
    #         , "EOS Name": item["Name"]
    #         # , "RomeId": item["RomeId"]
    #         , "Already Mapped": account_names_map[item["Name"].lower()][0]["EOSId__c"] != ""
    #         , "Case Mismatch": item["Name"] != account_names_map[item["Name"].lower()][0]["Name"]
    #     } 
    #     for item in eosCoPromoters
    #     if item["Name"] in account_names_map and len(account_names_map[item["Name"]]) == 1 and account_names_map[item["Name"]][0]["RecordType.Name"] == "Co-Promoter"
    # ]

    # session.write_file(sqlFileCoPromoters.replace("EOS Data/EOS", "EOS Data/Data Matches | EOS").replace(".sql",".csv"), exactMatchCoPromoters)

    # # session.add_bypass_settings()
    # # session.update_records([item for item in exactMatchCoPromoters if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # # For first insert attempt, try to insert Active. Then use In Review for the remaining rows which failed due to duplicate checker
    # # # for item in eosCoPromoters:
    # # #     item["Status__c"] = 'Active'
    # # session.add_bypass_settings()
    # # session.insert_records("Account", [item for item in eosCoPromoters if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # For remaining failure rows, attempt to find other partial matches before doing forced insert

    # ################################################################################################
    # exactMatchArtistAgencies = [
    #     {
    #         "Id": account_names_map[item["Name"]][0]["Id"]
    #         , "EOSId__c": item["EOSId__c"]
    #         , "Rome Name": account_names_map[item["Name"]][0]["Name"]
    #         , "EOS Name": item["Name"]
    #         # , "RomeId": item["RomeId"]
    #         , "Already Mapped": account_names_map[item["Name"].lower()][0]["EOSId__c"] != ""
    #         , "Case Mismatch": item["Name"] != account_names_map[item["Name"].lower()][0]["Name"]
    #     } 
    #     for item in eosArtistAgencies
    #     if item["Name"] in account_names_map and len(account_names_map[item["Name"]]) == 1 and account_names_map[item["Name"]][0]["RecordType.Name"] == "Agency"
    # ]

    # session.write_file(sqlFileArtistAgencies.replace("EOS Data/EOS", "EOS Data/Data Matches | EOS").replace(".sql",".csv"), exactMatchArtistAgencies)

    # # session.add_bypass_settings()
    # # session.update_records([item for item in exactMatchArtistAgencies if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # # For first insert attempt, try to insert Active. Then use In Review for the remaining rows which failed due to duplicate checker
    # # for item in eosArtistAgencies:
    # #     item["Status__c"] = 'Active'
    # # session.add_bypass_settings()
    # # session.insert_records("Account", [item for item in eosArtistAgencies if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # For remaining failure rows, attempt to find other partial matches before doing forced insert

    # ################################################################################################
    # exactMatchTicketAgencies = [
    #     {
    #         "Id": account_names_map[item["Name"]][0]["Id"]
    #         , "EOSId__c": item["EOSId__c"]
    #         , "Rome Name": account_names_map[item["Name"]][0]["Name"]
    #         , "EOS Name": item["Name"]
    #         # , "RomeId": item["RomeId"]
    #         , "Already Mapped": account_names_map[item["Name"].lower()][0]["EOSId__c"] != ""
    #         , "Case Mismatch": item["Name"] != account_names_map[item["Name"].lower()][0]["Name"]
    #     } 
    #     for item in eosTicketAgencies
    #     if item["Name"] in account_names_map and len(account_names_map[item["Name"]]) == 1 and account_names_map[item["Name"]][0]["RecordType.Name"] == "Ticket Agency"
    # ]

    # session.write_file(sqlFileTicketAgencies.replace("EOS Data/EOS", "EOS Data/Data Matches | EOS").replace(".sql",".csv"), exactMatchTicketAgencies)

    # # session.add_bypass_settings()
    # # session.update_records([item for item in exactMatchTicketAgencies if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # # For first insert attempt, try to insert Active. Then use In Review for the remaining rows which failed due to duplicate checker
    # # for item in eosTicketAgencies:
    # #     item["Status__c"] = 'Active'
    # # session.add_bypass_settings()
    # # session.insert_records("Account", [item for item in eosTicketAgencies if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # For remaining failure rows, attempt to find other partial matches before doing forced insert

    # ################################################################################################
    # exactMatchArtistAgents = [
    #     {
    #         "Id": account_names_map[item["FirstName"] + " " + item["LastName"]][0]["Id"]
    #         , "EOSId__c": item["EOSId__c"]
    #         , "Rome Name": account_names_map[item["FirstName"] + " " + item["LastName"]][0]["Name"]
    #         , "EOS Name": item["FirstName"] + " " + item["LastName"]
    #         , "RomeId": item["RomeId"]
    #     } 
    #     for item in eosArtistAgents
    #     if item["FirstName"] + " " + item["LastName"] in account_names_map and len(account_names_map[item["FirstName"] + " " + item["LastName"]]) == 1 and account_names_map[item["FirstName"] + " " + item["LastName"]][0]["RecordType.Name"] == "Contact"
    # ]

    # session.write_file(sqlFileArtistAgents.replace("EOS Data/EOS", "EOS Data/Data Matches | EOS").replace(".sql",".csv"), exactMatchArtistAgents)

    # # session.add_bypass_settings()
    # # session.update_records([item for item in exactMatchArtistAgents if item["RomeId"] == ""])
    # # session.remove_bypass_settings()

    # # session.add_bypass_settings()
    # # session.upsert_records("Contact", [item for item in eosArtistAgents if item["RomeId"] == ""], "EOSId__c")
    # # session.upsert_records("Contact", eosArtistAgents, "EOSId__c")
    # # session.remove_bypass_settings()

    # return

    pass
Exemple #7
0
def main():
    output = {}

    for object_name in object_list:
        javascript_file_name = source_control_functions_directory + '/{}.tsx'.format(
            object_name.replace('__c', ''))
        result = []
        if os.path.exists(javascript_file_name):
            javascript_file_text = open(javascript_file_name,
                                        'r',
                                        encoding='utf-8').read()
            result = re.findall(javascript_function_regex,
                                javascript_file_text)

        functions_map = {res[1].lower(): res for res in result}
        function_names_map = {res[1].lower(): res[1] for res in result}

        # Salesforce field info
        desc = session.get_object_description(object_name)
        field_info = [
            {
                'API Name': field.name,
                'Name': field.label,
                'Datatype': get_datatype(field).datatype,
                'Datatype Info': get_datatype(field).info,
                'External ID?': True if field.externalId else None,
                'Comments': None
            } for field in desc.fields
            if field.calculatedFormula is None and 'Rollup field' not in str(
                get_datatype(field).info) and field.name not in [
                    'IsDeleted', 'Name', 'CreatedDate', 'LastModifiedDate',
                    'SystemModstamp', 'MayEdit', 'IsLocked', 'LastViewedDate',
                    'LastReferencedDate', 'CreatedById', 'LastModifiedById',
                    'OwnerId', 'OfficeName__c', 'PrimaryVenueOffice__c',
                    'VenueOwnership__c', 'Geography__c', 'Division__c',
                    'LastSaveContext__c', 'IsTouringApp__c',
                    'HiddenVenueOffice__c', 'IsATemplate__c'
                ] and field.name.lower().endswith('sellout__c') is False
            and field.name.lower().endswith('projection__c') is False
            and field.name.lower().endswith('variance__c') is False
            and field.name.lower().endswith('potential__c') is False
            and field.name.lower().endswith('VarFC__c') is False
            and field.name.lower().endswith('SellFC__c') is False
            and field.name.lower().endswith('ProjFC__c') is False
            and field.name.lower().startswith('combined') is False
            and field.name.lower().startswith('projected') is False
            and field.name.lower().startswith('sellout') is False
            and field.name.lower().startswith('settlement') is False
            and field.name.lower().startswith('potential') is False
        ]
        field_info.sort(key=lambda x: x['Name'])
        field_info.sort(key=lambda x: 0 if x['Datatype'] == 'reference' else 1)
        field_info.sort(key=lambda x: x['API Name'] if x['API Name'] in [
            'Id', 'SourceSystemId__c', 'ExternalID__c', 'ExternalId__c'
        ] else 'Z')
        output[object_name] = field_info

    timenow = datetime.datetime.now().strftime("%Y-%m-%d %H.%M")
    pdh.multiple_df_to_excel(
        output,
        "/Users/daniel.hicks_1/downloads/{} {} Object Model.xlsx".format(
            timenow, session.instance.upper()),
        word_wrap=True,
        font_size=14,
        max_col_width=100)

    return
Exemple #8
0
def main():

    # data = session.helpers.get_tours_data("SELECT Id FROM Tour__c WHERE Id = 'a1sJ0000005BDMjIAO'")

    # getattr(data.ticketscales[0], 'Event__r.Name')

    # output = ObjDict()
    event_data_manual = session.helpers.get_events_data(
        "SELECT Id FROM Event__c WHERE TourLeg__r.Tour__c = 'a1sJ0000005BP5OIAW'"
    )

    # Clone In 1
    # event_data_cloned = session.helpers.get_events_data("SELECT Id FROM Event__c WHERE TourLeg__r.Tour__c = 'a1sJ0000005BP5JIAW'")

    # Clone In 2
    event_data_cloned = session.helpers.get_events_data(
        "SELECT Id FROM Event__c WHERE TourLeg__r.Tour__c = 'a1sJ0000005BPp0IAG'"
    )

    def get_populated_fields(data):
        output = ObjDict()
        for p, d in data.items():
            if len(d) == 0:
                continue
            output[p] = set()
            object_name = session.get_object_name(d[0].Id)
            editable_fields = [
                item['name']
                for item in session.get_object_description(object_name).fields
                if item['calculated'] is False and item['nillable'] is True
            ]
            default_false_fields = [
                item['name']
                for item in session.get_object_description(object_name).fields
                if item['type'] == 'boolean' and item['defaultValue'] is False
            ]
            default_true_fields = [
                item['name']
                for item in session.get_object_description(object_name).fields
                if item['type'] == 'boolean' and item['defaultValue'] is True
            ]
            for r in d:
                for f in editable_fields:
                    if f in r and r[f] is not None:
                        output[p].add(f)
                for f in default_false_fields:
                    if f in r and r[f] is not False:
                        output[p].add(f)
                for f in default_true_fields:
                    if f in r and r[f] is not True:
                        output[p].add(f)
        return output

    manual_event_fields = get_populated_fields(event_data_manual)
    cloned_event_fields = get_populated_fields(event_data_cloned)

    possible_issue_fields = []
    possible_issue_fields.extend([
        'Event__c.' + item for item in cloned_event_fields.events
        if item not in manual_event_fields.events
    ])
    possible_issue_fields.extend([
        'EventDateTime__c.' + item
        for item in cloned_event_fields.eventdatetimes
        if item not in manual_event_fields.eventdatetimes
    ])
    possible_issue_fields.extend([
        'Deal__c.' + item for item in cloned_event_fields.deals
        if item not in manual_event_fields.deals
    ])
    possible_issue_fields.extend([
        'Ticketscale__c.' + item for item in cloned_event_fields.ticketscales
        if item not in manual_event_fields.ticketscales
    ])
    possible_issue_fields.extend([
        'Deduction__c.' + item for item in cloned_event_fields.deductions
        if item not in manual_event_fields.deductions
    ])
    possible_issue_fields.extend([
        'LedgerEntry__c.' + item for item in cloned_event_fields.ledgerentries
        if item not in manual_event_fields.ledgerentries
    ])
    possible_issue_fields.extend([
        'LedgerEntryBreakout__c.' + item
        for item in cloned_event_fields.ledgerentrybreakouts
        if item not in manual_event_fields.ledgerentrybreakouts
    ])

    # Filter out fields from "possible issues" list that I have already noted in a story
    possible_issue_fields = [
        f for f in possible_issue_fields if f not in acknowledged_issue_fields
    ]

    print('\n'.join(possible_issue_fields))

    def flatten(records):
        return [{
            key: val
            for key, val in record.items()
            if type(val) in [str, bool, None, int, float]
        } for record in records]

    data_out = {}
    data_out['PossibleIssueFields'] = pd.DataFrame(possible_issue_fields)
    data_out.update({
        key: pd.DataFrame(flatten(val))
        for key, val in event_data_cloned.items()
    })

    # Optional step to filter the columns to only "possible issue fields" columns
    for obj, df in data_out.items():
        if len(df) == 0 or 'Id' not in df:
            continue
        object_name = session.get_object_name(df.loc[0]['Id'])
        issue_fields = [
            f[f.find('.') + 1:] for f in possible_issue_fields
            if f.startswith(object_name)
        ]

        data_out[obj] = df[issue_fields]
    data_out = {
        key: val
        for key, val in data_out.items() if len(val.columns) > 0
    }
    ##

    pdh.multiple_df_to_excel(data_out, 'Cloned Event Fields.xlsx')

    # excel_writer = pd.ExcelWriter('/Users/daniel.hicks_1/Downloads/Cloned Event Fields.xlsx')
    # pdh.df_to_excel(pd.DataFrame(possible_issue_fields), excel_writer, sheet_name='PossibleIssueFields')
    # for key, data in event_data_cloned.items():
    #     pdh.df_to_excel(pd.DataFrame(flatten(data)), excel_writer, sheet_name=key)
    # excel_writer.close()
    return

    # data = get_tour_data("SELECT Id FROM Tour__c WHERE Id = 'a1sJ0000005BDMPIA4'")

    # AverageNetTicketForBreakEven = sum([
    #     item.ProjectedNetGross__c * item.TourExchangeRate__c for item in data.events
    #     if item.BusinessPlanOption__c == False
    # ]) / data.tours[0].ruProjectedPaidTickets__c
    # print(AverageNetTicketForBreakEven)

    # AverageAncilPerHeadForBreakEven = data.tours[0].ruProjectedLNAncillaryRevenue__c / data.tours[0].ruProjectedPaidTickets__c
    # print(AverageAncilPerHeadForBreakEven)

    # ArtistBonusPerHeadForBreakEven = sum([
    #     item.BonusPotentialatProjection__c * item.Deal__r.DealExchangeRate__c * item.Deal__r.Event__r.TourExchangeRate__c
    #     for item in data.bonusdetails
    #     if item.BonusType__c == 'Per Ticket' and item.ApplyTo__c == 'Regardless' and item.Deal__r.Event__r.BusinessPlanOption__c == False
    # ]) / data.tours[0].ruProjectedPaidTickets__c
    # print(ArtistBonusPerHeadForBreakEven)

    return
Exemple #9
0
def main():
    fullpullpackagexmltext = open('./resources/ant/fullpullpackage.xml').read()
    path = './resources/ant/dev1 retrieve 2021-03-02 10.29.15.095199'

    # results = session.ant.retrieve(fullpullpackagexmltext)
    results = session.ant.retrieve(path)
    metadata = results.get_results('dataframe')
    # b = results('binaryfilelist')

    obj = metadata.objects
    obj_data = obj['Event__c.object']

    import pandas as pd

    dfs = []
    for file_name, data in metadata.permissionsets.items():
        for attr in data:
            if isinstance(data[attr], list) is False:
                continue
            df = pd.DataFrame(data[attr])
            df['PermissionSetName'] = file_name if '.' not in file_name else file_name[:file_name.find(
                '.')]
            df['Attribute'] = attr
            dfs.append(df)

    pdh.multiple_df_to_excel({"Sheet": pd.concat(dfs)}, 'Test.xlsx')

    return
    # import os
    # from classes.xmltodictconfig import XmlParser
    # resultspath = './resources/ant/dev2 2021-02-10 SAVE/pullResults'
    # results = {}
    # for item in os.listdir(resultspath):
    #     itempath = resultspath + '/' + item
    #     if item == 'package.xml':
    #         continue
    #     results[item] = {}
    #     for item2 in os.listdir(itempath):
    #         itempath2 = itempath + '/' + item2
    #         if os.path.isdir(itempath2) is False:
    #             try:
    #                 results[item][item2] = XmlParser(file_path=itempath2)
    #             except:
    #                 results[item][item2] = open(itempath2, 'rb').read()
    #         else:
    #             # Skip
    #             # print(itempath2)
    #             pass

    # flatresults = []
    # groupedresults = {}
    # for metadatatype, files in results.items():
    #     groupedresults[metadatatype] = []
    #     for file, data in files.items():
    #         if isinstance(data, bytes):
    #             row = {'MetadataType': metadatatype, 'FileName': file, 'FileData': data}
    #             flatresults.append(row)
    #             groupedresults[metadatatype].append(row)
    #             continue
    #         list_properties = {key: val for key, val in data.items() if isinstance(val, list)}
    #         other_properties = {key: val for key, val in data.items() if key not in list_properties}
    #         if len(other_properties) > 0:
    #             row = {'MetadataType': metadatatype, 'FileName': file}
    #             row.update(other_properties)
    #             flatresults.append(row)
    #             groupedresults[metadatatype].append(row)
    #         for p, l in list_properties.items():
    #             for item in l:
    #                 row = {'MetadataType': metadatatype, 'FileName': file, 'ChildProperty1': p}
    #                 if isinstance(item, dict):
    #                     row.update(item)
    #                 else:
    #                     row['Value'] = item
    #                 flatresults.append(row)
    #                 groupedresults[metadatatype].append(row)
    # # import pandas as pd
    # # {
    # #     key:pd.DataFrame(val)
    # #     for key,val in groupedresults.items()
    # # }

    # # import functions.pandas_helpers as pdh
    # # pdh.multiple_df_to_excel({'test':pd.DataFrame(flatresults)}, 'test.xlsx')

    # # venues = session.select_records("""
    # # SELECT Id, Name, PrimaryOffice__c, PrimaryOffice__r.Name, CreatedBy.Name, SourceSystemId__c
    # # FROM Account
    # # WHERE RecordType.Name = 'Venue'
    # # AND Id NOT IN (SELECT Venue__c FROM VenueOffice__c)
    # # """)

    # # new_venue_offices = [
    # #     {"Venue__c": item.Id
    # #     , "Office__c": item.PrimaryOffice__c
    # #     , "PrimaryOffice__c": True}
    # #     for item in venues
    # # ]

    # # session.insert_records("VenueOffice__c", new_venue_offices)

    return
Exemple #10
0
def main():

    valid_types = dev2_session.select_records("""
    SELECT Label FROM PicklistOption__mdt
    WHERE PicklistName__c = 'Ticket Scale Type'
    AND AppScope__c = 'NAC'
    """)

    valid_types = [item.Label for item in valid_types]

    ticketscales = session.select_records("""
    SELECT Id, Event__r.TourLeg__r.Tour__c, Event__r.TourLeg__r.Tour__r.TourTitle__c, Event__c, Type__c, Label__c, Event__r.TourLeg__r.Tour__r.CreatedBy.Name
    FROM TicketScale__c
    WHERE IsTouringApp__c = TRUE
    AND Event__r.TourLeg__r.Tour__r.RecordType.Name = 'Booking'
    AND Type__c NOT IN ('{}')
    ORDER BY Event__r.TourLeg__r.Tour__c, Event__c, Type__c
    """.format("','".join(valid_types)))

    tour_ids = set(
        [item['Event__r.TourLeg__r.Tour__c'] for item in ticketscales])
    tours = session.select_records(
        "SELECT Id, TicketScaleTypeLabels__c FROM Tour__c WHERE Id IN ('{}')".
        format("','".join(tour_ids)),
        mode='simple')
    tourlegs = session.select_records(
        "SELECT Id, PriceLevelForecastedPercentages__c, TicketScaleTypeFilter__c FROM TourLeg__c WHERE Tour__c IN ('{}')"
        .format("','".join(tour_ids)),
        mode='simple')

    # Filter out ticket scales we aren't worried about
    keepinvalid_ticketscaletypes = [
        'VIP', 'Platinum', 'Reserved', 'P4/P5 Flex', 'Lawn - Tier 4'
    ]
    # ticketscales = [item for item in ticketscales if item.Type__c not in keepinvalid_ticketscaletypes]

    TableFile('Downloads/TouringTicketScalesNeedUpdating.csv').write_csv(
        ticketscales)

    re_label_map = {
        'GA 2': 'GA Floor',
        'Lawn - 1st Week': 'Lawn - 1st Weekend',
        'P1/P2 Flex': 'Flex - P1 / P2',
        'P2/P3 Flex': 'Flex - P2 / P3',
        'P3/P4 Flex': 'Flex - P3 / P4',
        'First 5 Rows': 'Gold Circle',
        'Row 2': 'Second Row',
        'Row 3': 'Rows 3 - 5',
        'Row 4': 'Rows 3 - 5',
        'Row 5': 'Rows 3 - 5',
        'Rsvd - 4 Pack': 'Reserved - Value Channel',
        'Rsvd - Value Channel': 'Reserved - Value Channel'
    }
    for row in ticketscales:
        row.OldType = row.Type__c
    for row in tourlegs:
        row.OldPriceLevelForecastedPercentages = row.PriceLevelForecastedPercentages__c
        row.OldTicketScaleTypeFilter = row.TicketScaleTypeFilter__c
    for row in tours:
        row.OldTicketScaleTypeLabels = row.TicketScaleTypeLabels__c

    for old, new in re_label_map.items():
        for row in ticketscales:
            if row.Type__c == old:
                row.Type__c = new

    for row in tourlegs:
        row.TicketScaleTypeFilter__c = None
        if row.PriceLevelForecastedPercentages__c is not None:
            parsed_forecasted_percentages = {
                s.split(":")[0]: s.split(":")[1]
                for s in row.PriceLevelForecastedPercentages__c.split("|")
            }
            for old, new in re_label_map.items():
                if old in parsed_forecasted_percentages:
                    parsed_forecasted_percentages[
                        new] = parsed_forecasted_percentages[old]
                    del parsed_forecasted_percentages[old]
            filtered_forecasted_percentages = {
                key: val
                for key, val in parsed_forecasted_percentages.items()
                if key not in ['Forecast', 'Revenue Lift', 'Deductions']
            }
            new_val = "|".join([
                key + ":" + val
                for key, val in filtered_forecasted_percentages.items()
            ])
            row.PriceLevelForecastedPercentages__c = new_val
    for row in tours:
        if row.TicketScaleTypeLabels__c is not None:
            parsed_type_labels = {
                s.split("|")[0]: s.split("|")[1]
                for s in row.TicketScaleTypeLabels__c.split("\n")
            }
            for old, new in re_label_map.items():
                if old in parsed_type_labels:
                    parsed_type_labels[new] = parsed_type_labels[old]
                    del parsed_type_labels[old]
            filtered_type_labels = {
                key: val
                for key, val in parsed_type_labels.items()
                if key in valid_types
            }
            new_val = "\n".join(
                [key + "|" + val for key, val in parsed_type_labels.items()])
            row.TicketScaleTypeLabels__c = new_val

    pdh.multiple_df_to_excel(
        {
            'tours': tours,
            'tourlegs': tourlegs,
            'ticketscales': ticketscales
        }, 'Touring Ticket Scale Types Update.xlsx')

    tourlegs = [{
        "Id": item.Id,
        "PriceLevelForecastedPercentages__c":
        item.PriceLevelForecastedPercentages__c,
        "TicketScaleTypeFilter__c": item.TicketScaleTypeFilter__c
    } for item in tourlegs]

    session.add_bypass_settings()
    session.update_records(ticketscales)
    session.upsert_records('TourLeg__c', tourlegs, 'Id', mode='simple')
    session.update_records(tours)
    session.remove_bypass_settings()

    #     a1s1Q000005MrBkQAK
    #     a1s1Q000005MrCTQA0
    #     a1s1Q000005MssfQAC

    return
def main():
    output = {}

    for object_name in object_list:
        javascript_file_name = source_control_functions_directory + '/{}.tsx'.format(
            object_name.replace('__c', ''))
        result = []
        if os.path.exists(javascript_file_name):
            javascript_file_text = open(javascript_file_name,
                                        'r',
                                        encoding='utf-8').read()
            result = re.findall(javascript_function_regex,
                                javascript_file_text)

        functions_map = {res[1].lower(): res for res in result}
        function_names_map = {res[1].lower(): res[1] for res in result}

        # Salesforce field info
        desc = session.get_object_description(object_name)

        field_info = [{
            'API Name':
            field.name,
            'Name':
            field.label,
            'Javascript Function Name':
            functions_map[field.name.replace('__c', '').lower()][1]
            if field.name.replace('__c', '').lower() in functions_map else '',
            'Datatype':
            get_datatype(field).datatype,
            'Datatype Info':
            get_datatype(field).info,
            'Default Value':
            field.defaultValue,
            'Formula':
            field.calculatedFormula,
            'Javascript App Calculation':
            functions_map[field.name.replace('__c', '').lower()][0]
            if field.name.replace('__c', '').lower() in functions_map else '',
            'External ID?':
            field.externalId,
            'Picklist Options':
            get_picklist_options(desc, field)
        } for field in desc.fields]
        if include_javascript_functions:
            # Add functions in file which were not mapped to Salesforce field
            field_info.extend(
                [{
                    'Javascript Function Name': functions_map[lowername][1],
                    'Datatype': functions_map[lowername][2],
                    'Javascript App Calculation': functions_map[lowername][0]
                } for lowername, name in function_names_map.items()
                 if name not in
                 [item['Javascript Function Name'] for item in field_info]])
        df = pd.DataFrame(field_info)
        if include_javascript_functions is False:
            df.drop(columns=['Javascript App Calculation'], inplace=True)
        output[object_name] = df

    timenow = datetime.datetime.now().strftime("%Y-%m-%d %H.%M")
    pdh.multiple_df_to_excel(
        output,
        "/Users/daniel.hicks_1/downloads/{} {} Object Model.xlsx".format(
            timenow, session.instance.upper()),
        word_wrap=True,
        font_size=14,
        max_col_width=100)

    return