def main():
    session = Salesforce_API("*****@*****.**")

    query = """
    SELECT Id, CreatedDate, CreatedBy.Name, SourceSystemId__c
    FROM {}
    WHERE IsTouringApp__c = True
    AND Event__c = NULL
    AND CreatedDate >= 2020-09-01T00:00:00Z
    ORDER BY CreatedDate DESC
    """
    objects = [
        'EventDateTime__c', 'Deal__c', 'TicketScale__c', 'Deduction__c',
        'LedgerEntry__c', 'LedgerEntryBreakout__c'
    ]
    data = {}
    session.bulk
    for o in objects:
        data[o] = threading.new(session.select, query.format(o))

    for key, val in data.items():
        val = val.result()
        prefix = session.get_object_description(key).keyPrefix
        for row in val:
            if str(row.SourceSystemId__c)[:3] == prefix:
                row.IsLikelyClone = True
        data[key] = val
    pdh.to_excel(data,
                 'Orphaned child records {}.xlsx'.format(session.instance))
    return
def example9():
    import json
    gb_settings = uat.select("SELECT * FROM GBLite__GridBuddySettings__c")
    gb_meta = uat.select("SELECT * FROM GBLite__GB_Global_Meta__c")

    for item in gb_meta:
        try:
            item.meta = json.loads(item.GBLite__Picklist_Meta__c)
            item.update(item.meta)
        except:
            item.meta = None
    actions = [
        item for item in gb_meta if item.get('actionLabel', None) is not None
    ]

    pdh.to_excel(
        {
            'GBLite__GridBuddySettings__c': gb_settings,
            'GBLite__GB_Global_Meta__c': gb_meta,
        }, 'GBLite Data.xlsx')

    # monitor = DataChangeMonitor()
    # monitor.set_frequency(5)
    # monitor.add_job('GBLite__GridBuddySettings__c', "Id", uat.select, "SELECT * FROM GBLite__GridBuddySettings__c", mute=True)
    # monitor.set_log_destination(None, 'GBLite__GridBuddySettings__c Changes')
    # monitor.start()
    # print('Monitor started')
    return


# example9()
def find_artist_dupes():
    rome_artists = sf.select("""
    SELECT Id, EOSId__c, Name
    FROM Account
    WHERE EOSId__c <> NULL
    AND RecordType.Name = 'Artist'
    """,
                             mode='bulk')
    eos_artists = sql.query("""
    SELECT 'Artist-' + CAST(Id AS VARCHAR) AS EOSId__c, Name
    FROM Artist
    """)

    eos_artists_by_name = {item['Name']: item for item in eos_artists}

    issues = []
    for artist in rome_artists:
        issue = eos_artists_by_name.get(artist.Name, None)
        if issue is not None and issue['EOSId__c'] != artist['EOSId__c']:
            issues.append({
                'RomeId': artist.Id,
                'RomeLink': f'https://{sf.simple.sf_instance}/{artist.Id}',
                'Name': artist.Name,
                'CurrRomeEOSId': artist.EOSId__c,
                'ChangeRomeEOSIdTo': issue['EOSId__c'],
            })
    if issues:
        pdh.to_excel(issues, f'{sf.instance} Rome EOS Artist Issues.xlsx')
    return
Ejemplo n.º 4
0
def fix(sf=None, sql=None, vars=None, data=None):
    import functions.pandas_helpers as pdh
    import pandas as pd
    venueownership = vars.get('venueownership')
    officename = vars.get('officename')
    primaryvenueoffice = vars.get('primaryvenueoffice')
    division = vars.get('division')
    geography = vars.get('geography')
    pdh.to_excel(
        {
            'VenueOwnership__c':
            venueownership,
            'OfficeName__c':
            officename,
            'PrimaryVenueOffice__c':
            primaryvenueoffice,
            'Division__c':
            division,
            'Geography__c':
            geography,
            'All':
            pd.concat([
                venueownership, officename, primaryvenueoffice, division,
                geography
            ])[['Id']].drop_duplicates('Id')
        }, 'Events Need Security Update.xlsx')

    sf.bypass_prod_operation_approval()
    with sf.bypass_settings():
        sf.update_event_security_fields(data.to_dict('records'))
Ejemplo n.º 5
0
def main(sessions, do_fix=False):
    import functions.pandas_helpers as pdh
    from functions.prompt import prompt
    from classes.py_threading import ThreadPool
    threading = ThreadPool()
    sessions = {
        username: sf
        for username, sf in sessions.items() if username in instances_to_run
    }
    return_string = ""

    objects = [
        'EventDateTime__c', 'Deal__c', 'TicketScale__c', 'Deduction__c',
        'LedgerEntry__c'
    ]
    if do_fix is True and prompt("Delete orphan records?",
                                 boolean=True) is False:
        do_fix = False

    @threading.run_async
    def inner(sf):
        if do_fix:
            sf.bypass_prod_operation_approval()
        results = {}
        for obj in objects:
            obj_fields = {
                f.name: f
                for f in sf.get_object_description(obj).fields
            }
            fields = ['Name', 'EventName__c']
            fields = [f for f in fields if f in obj_fields]
            fields_str = ', '.join(fields)
            query = f"""
            SELECT Id, {fields_str}, Event__c, CreatedBy.Name, CreatedDate, LastModifiedBy.Name, LastModifiedDate
            FROM {obj}
            WHERE IsTouringApp__c = True
            AND Event__c = NULL
            AND CreatedDate >= THIS_YEAR
            """
            results[obj] = threading.new(sf.select, query, mode='bulk')
        for obj in objects:
            records = results[obj].result()
            if len(records) > 0:
                if do_fix:
                    sf.add_bypass_settings()
                    sf.delete(records, mode='bulk')
                    sf.remove_bypass_settings()
        return results

    # Run for each Salesforce instance
    results = {sf.instance: inner(sf) for sf in sessions.values()}
    for sf in sessions.values():
        result = results[sf.instance].result()
        for obj in objects:
            records = result[obj].result()
            return_string += f"\n{sf.instance.upper()} has {len(records)} {obj} orphaned records with IsTouringApp__c == True"

        pdh.to_excel(results[sf.instance],
                     f'({sf.instance}) Orphaned Event Child Records.xlsx')
    return return_string
Ejemplo n.º 6
0
def main():

    tours = sf.select("""
    SELECT Id, TourName__c, TourBooker__r.Name, TourHeadliner__r.Name, TourCoHeadliner__r.Name, CreatedDate
    FROM Tour__c
    WHERE AppScope__c = 'UK'
    """,
                      return_type='flat')
    events = sf.select("""
    SELECT Id, TourLeg__r.Tour__c, EventFirstDate__c
    FROM Event__c
    WHERE TourLeg__r.Tour__c IN @tours.Id
    AND EventCancelled__c = False
    AND BusinessPlanOption__c = False
    """,
                       return_type='flat')
    events_by_tour = pdh.groupby_unsorted(events,
                                          lambda x: x['TourLeg__r.Tour__c'])
    for tour in tours:
        tour['FirstEventDate'] = min([
            item.EventFirstDate__c for item in events_by_tour.get(tour.Id, [])
            if item.EventFirstDate__c is not None
        ] or [''])
        tour['LastEventDate'] = max([
            item.EventFirstDate__c for item in events_by_tour.get(tour.Id, [])
            if item.EventFirstDate__c is not None
        ] or [''])

    pdh.to_excel(tours, 'Tour Renaming Workbook.xlsx')
    return
def main():
    ts = sf.select("""
    SELECT Id, Event__r.TourLeg__r.Tour__r.EOSId__c, Event__r.EOSId__c, Type__c, LastModifiedDate, LastModifiedBy.Name
    FROM TicketScale__c
    WHERE StageType__c IN ('Ticket Band','Ticket Hold','Ticket Upgrade')
    AND IsTouringApp__c = True
    AND Event__r.TourLeg__r.Tour__r.AppScope__c = 'UK'
    AND LastModifiedDate = TODAY
    ORDER BY LastModifiedDate DESC
    """,
                   return_type='flat')

    event_ids = [t['Event__r.EOSId__c'] for t in ts]
    all_events = sf.select("""
    SELECT Id, EOSId__c, TourLeg__r.Tour__r.EOSId__c
    FROM Event__c
    WHERE EOSId__c IN @event_ids
    """,
                           return_type='flat')

    last = None
    groups = []
    curr_group = []
    groups.append(curr_group)
    for t in ts:
        s = t.LastModifiedDate[11:19]
        time = int(s[0:2]) * 60 * 60 + int(s[3:5]) * 60 + int(s[6:8])
        t.time = time
        if last is None or last.time - 60 <= time:
            curr_group.append(t)
        else:
            curr_group = []
            curr_group.append(t)
            groups.append(curr_group)
        t.group_index = len(groups) - 1
        last = t

    out = []
    for i, group in enumerate(groups):
        events = {ts['Event__r.EOSId__c'] for ts in group}
        tours = {ts['Event__r.TourLeg__r.Tour__r.EOSId__c'] for ts in group}
        all_possible_events = {
            e.EOSId__c
            for e in all_events if e['TourLeg__r.Tour__r.EOSId__c'] in tours
        }
        missing_events = {e for e in all_possible_events if e not in events}
        num_events = len({ts['Event__r.EOSId__c'] for ts in group})
        num_tours = len(
            {ts['Event__r.TourLeg__r.Tour__r.EOSId__c']
             for ts in group})
        out.append({
            'index': i,
            'events': num_events,
            'tours': num_tours,
            'missing_events': missing_events
        })

    pdh.to_html(ts, 'TS Data')
    pdh.to_excel([ts, out], 'TS Data')
    return
def main():
    do_fix = False

    venues = sf.select("""
    SELECT Id, Name, PrimaryOffice__r.Name, LocalTerms__c
    FROM Account
    WHERE RecordType.Name = 'Venue'
    """,
                       return_type='flat')

    lookfor1 = 'the night of the show'
    lookfor2 = ''

    reg1 = re.compile(
        r'(([\n\r]*.*?)If the net gross ticket revenue on night of show is lower than the all-in talent fee, then the talent fee will be adjusted to the net gross ticket revenue. Net gross ticket revenue is after deducting taxes, fees, FMF, bundles and parking \(where applicable\)\.)'
    )

    def output():
        for item in venues:
            if item.LocalTerms__c is None:
                continue
            item.Result = None
            res = re.search(reg1, item.LocalTerms__c)
            if res is not None:
                item.Result = 'Matched local terms to be removed'
                item.PrefixCharsToRemove = res.group(2)
            elif lookfor1 in item.LocalTerms__c:
                item.Result = 'Matched local terms to be removed'
            # elif lookfor2 in item.LocalTerms__c:
            #     item.Result = 'Matched outlier text in local terms'
            else:
                continue
            if do_fix:
                del item['Name']
                del item['PrimaryOffice__r.Name']
                if res:
                    item.LocalTerms__c = item.LocalTerms__c.replace(
                        res.group(0), '')
                else:
                    item.LocalTerms__c = item.LocalTerms__c.replace(
                        lookfor1, '')
            # if item.Id in [
            #         '0011Q00002NSRJfQAP',
            #         # '0013600001YSvCUAA1',
            #         # '0013600001YSvK7AAL',
            #         # '0011Q000021frucQAA',
            #     ]:
            yield item

    results = list(output())

    pdh.to_excel(
        results, f'{sf.instance} 2021-11-10 with "the night of the show".xlsx')
    # write_pickle(f'{sf.instance} 2021-10-05 Venues to Update Local Terms.pickle', results)

    # if do_fix:
    #     with sf.bypass_settings():
    #         sf.update(results)
    return
Ejemplo n.º 9
0
def main():
    ev = pd.read_excel(data_path, 'Evenko')
    rome = pd.read_excel(data_path, 'Rome')

    # diff = pdh.diff_and_fuzzy_match(rome, ev, 'Id', 'Type__c', None, None, None, False)
    diff = pdh.compare_datasets_for_merge(rome, ev, 'Type__c', None, 'Type__c')
    pdh.to_excel(diff, 'Evenko-Rome Expenses Diff Result.xlsx')
    return
def write_missing_eos_ids_file(offer_ids=None):
    offer_ids = offer_ids or []
    onsale_offer_ids = get_onsale_data()['Offer']['OfferId'].tolist()
    all_offer_ids = onsale_offer_ids + offer_ids
    missing, missing_by_tour = find_missing_eos_ids(offer_ids=all_offer_ids)
    missing_records = uk.query_by_eos_ids(sql, missing)
    pdh.to_excel(missing_records,
                 loc.uk_onsale_migration_missing_eos_ids_output)
    return missing, missing_by_tour
def main():
    usernames = [
        None
        # , "[email protected]"
        # , "[email protected]"
        # , "[email protected]"
        # , "*****@*****.**"
        # , "*****@*****.**"
        # , "*****@*****.**"
        , "*****@*****.**"
    ]
    sessions = [Salesforce_API(item) for item in usernames if item is not None]
    for session in sessions:
        session.save_record_snapshot_on_select = True
        events = session.select("""
        SELECT Id
        , ShowCount__c
        , (SELECT GrossSales__c, ProjectedGrossSales__c FROM TicketScales__r WHERE StageType__c = 'Plan')
        FROM Event__c
        WHERE IsTouringApp__c = True
        AND TourLeg__c <> NULL
        """, mode='simple')
        for event in events:
            event.SelloutGrossSales__c = event.ShowCount__c * sum([item.GrossSales__c for item in event.TicketScales__r])
            event.ProjectedGrossSales__c = event.ShowCount__c * sum([item.GrossSales__c for item in event.TicketScales__r])

        deals = session.select("""
        SELECT Id, Event__r.TourLeg__r.Tour__r.ArtistAgency__c, Event__r.TourLeg__r.Tour__r.ArtistAgent__c, Type__c, DealType__c, BackendPercent__c, SplitPercentage__c, SplitBackendPercent__c
        FROM Deal__c
        WHERE Event__c <> NULL
        AND Event__r.IsTouringApp__c = True
        AND Event__r.TourLeg__c <> NULL
        AND RecordType.Name = 'Artist'
        """, return_type='dataframe', mode='simple')

        pdh.to_excel({
            'Events': events,
            'Deals': deals
        }, 'Inspect Deal data backfill BEFORE.xlsx')

        deals['IncludeToBeSharedAmount__c'] = True
        deals.loc[deals['Type__c'] == 'Support', 'DealType__c'] = 'Flat'
        deals.loc[deals['Type__c'] == 'Primary Headliner', 'SplitPercentage__c'] = 100
        deals.loc[deals['Type__c'] == 'Primary Headliner', 'SplitBackendPercent__c'] = deals[deals['Type__c'] == 'Primary Headliner']['BackendPercent__c']
        deals.fillna('', inplace=True)

        pdh.to_excel({
            'Events': events,
            'Deals': deals
        }, 'Inspect Deal data backfill.xlsx')

        session.add_bypass_settings()
        # job1 = session.update(events, mode='bulk')
        job2 = session.update(deals, mode='bulk', batch_size=1000)
        threading.wait()
        session.remove_bypass_settings()
        return
Ejemplo n.º 12
0
def main(sessions, do_fix=False):
    import pandas as pd
    import functions.pandas_helpers as pdh
    from classes.py_threading import ThreadPool, Thread
    threading = ThreadPool()
    sessions = {
        username: sf
        for username, sf in sessions.items() if username in instances_to_run
    }
    return_string = ""
    fields_to_match = [
        'Venue__r.Name',
        'Office__r.Name',
        'PrimaryHeadlinerArtist__c',
        # 'EventFirstDate__c',
    ]

    @threading.run_async
    def inner(sf):
        fields_to_match_str = ', '.join(f'{f}, BusinessPlanEvent__r.{f}'
                                        for f in fields_to_match)
        query = f"""
        SELECT Id, Tour__r.TourTitle__c, BusinessPlanEvent__c, EventTitle__c, {fields_to_match_str}
        FROM Event__c
        WHERE BusinessPlanEvent__c <> NULL
        """
        records = sf.select(query, mode='simple',
                            return_type='dataframe').fillna('')

        # pdquery = ' or '.join(f'(`{f}` != `BusinessPlanEvent__r.{f}`)' for f in fields_to_match)

        def diff(row):
            differences = [
                f for f in fields_to_match
                if row[f] != row['BusinessPlanEvent__r.' + f]
            ]
            return ', '.join(differences)

        # mismatched = records.query(pdquery)
        records['Diff'] = records.apply(diff, axis=1)
        return records.query('Diff != ""')

    # Run for each Salesforce instance
    results = {sf.instance: inner(sf) for sf in sessions.values()}
    for sf in sessions.values():
        result = results[sf.instance].result()
        for f in fields_to_match:
            mismatches_for_field = result.query(
                f"`{f}` != `BusinessPlanEvent__r.{f}`")
            results[f'{f} Mismatches'] = mismatches_for_field
            return_string += f'\n{sf.instance.upper()} has {len(mismatches_for_field)} mismatches on the {f} field between BP and Local Events'

    if do_fix:
        print(return_string)
        pdh.to_excel(results, 'Local Events with Field Mismatches.xlsx')
    return return_string
Ejemplo n.º 13
0
def main():
    usernames = [
        # "[email protected]",
        # "[email protected]",
        # "[email protected]",
        # "*****@*****.**",
        # "*****@*****.**",
        # "*****@*****.**",
        "*****@*****.**",
    ]
    sessions = [Salesforce_API(item) for item in usernames if item is not None]
    for session in sessions:
        accounts = session.select("""
        SELECT Id, RecordType.Name, Name, VenueName__c, BillingCity, BillingCountry, SourceSystemId__c, EOSId__c, CreatedBy.Name
        FROM Account
        """,
                                  return_type='dataframe')

        output = {}
        recordtypes = [
            'Venue',
            # 'Co-Promoter',
            # 'Artist',
            # 'Agency',
            # 'Ticket Agency',
        ]

        def wipe_same_record(x):
            for i in range(1, 4):
                if x[f'm{i}_Id'] == x['Id']:
                    for key in x.keys():
                        if key.startswith(f'm{i}_'):
                            x[key] = ''
            return x

        for r in recordtypes:
            filtered_accounts = accounts.query("`RecordType.Name` == @r")
            print(f'Processing {len(filtered_accounts)} {r} records')
            matching_fields = ['VenueName__c', 'BillingCity'
                               ] if r == 'Venue' else ['Name']
            df = pdh.compare_datasets_for_merge(
                filtered_accounts,
                filtered_accounts,
                matching_fields,
                fields_to_return=[
                    'Id', 'BillingCity', 'EOSId__c', 'SourceSystemId__c'
                ] + matching_fields,
                num_matches=4)
            df = df.apply(wipe_same_record, axis=1)
            df.sort_values(by=['m1_%', 'm2_%', 'm3_%', 'm4_%'],
                           ascending=False,
                           inplace=True)
            # df.drop(columns=[c for c in df.columns.values if c.startswith('m1_')], inplace=True)
            output[r] = df
        pdh.to_excel(output, 'Rome Duplicate Analysis.xlsx')
        return
Ejemplo n.º 14
0
def fix_venue_names():
    rome_venues = sf.select("""
    SELECT Id, VenueName__c, BillingCity, EOSId__c
    FROM Account
    WHERE RecordType.Name = 'Venue'
    AND EOSId__c <> NULL
    """)
    rome_venues_by_id = {item.EOSId__c: item for item in rome_venues}

    eos_venues = sql.query("""
    SELECT Id, 'Venue-' + CAST(Id AS VARCHAR) AS EOSId__c, Name
    FROM Venue
    WHERE RomeId IS NOT NULL
    """)

    toupdate = []
    # issue_rome_venues = [item for item in rome_venues if f'({item.BillingCity})' in item.VenueName__c]
    # for item in issue_rome_venues:
    #     item.NewName = item.VenueName__c.replace(f'({item.BillingCity})', '').strip()
    # issue_rome_venues = [item for item in rome_venues if item.VenueName__c.endswith(str(item.BillingCity))]
    # for item in issue_rome_venues:
    #     item.NewName = item.VenueName__c.replace(f', {item.BillingCity}', '').replace(f',{item.BillingCity}', '').replace(f'{item.BillingCity}', '').strip()

    for venue in eos_venues:
        rome_venue = rome_venues_by_id.get(venue['EOSId__c'], None)
        # if rome_venue is None: continue
        venue['RomeName'] = rome_venue.VenueName__c
        venue['RomeCity'] = rome_venue.BillingCity
        venue['OldName'] = venue['Name']
        venue['Name'] = f'{rome_venue.VenueName__c} ({rome_venue.BillingCity})'
        if rome_venue.BillingCity is not None and venue['OldName'] != venue[
                'Name']:
            toupdate.append(venue)
    # pdh.to_excel(issue_rome_venues, 'issue_rome_venues.xlsx')
    pdh.to_excel(toupdate, 'EOS Venues Update.xlsx')

    update_records = [{
        key: val
        for key, val in record.items() if key in {'Id', 'Name'}
    } for record in toupdate]

    success = []
    failure = []
    for i, chunk in enumerate(chunks(update_records, 10)):
        try:
            print(f'updating {i} chunk')
            sql.update('Venue', 'Id', chunk)
            success.extend(chunk)
        except:
            failure.extend(chunk)
    # To review
    # 0:{'Id': 1043, 'Name': 'Hartwall Arena (Helsinki)'}
    # 1:{'Id': 1306, 'Name': 'Telenor Arena (Oslo)'}
    return
Ejemplo n.º 15
0
def run_job(file_name,
            data_function,
            fix_function,
            usernames,
            do_fix=False,
            download_result=False,
            sf_sessions=None,
            sql_sessions=None):
    sessions = get_sessions(usernames, sf_sessions, sql_sessions)
    job_name = os.path.split(file_name)[1].replace('.py', '')
    return_strings = []
    threads = []
    results = []
    total_len = 0
    for sf, sql in sessions:
        thread = threading.new(data_function, sf, sql)
        threads.append((sf, sql, thread))

    for sf, sql, thread in threads:
        data, vars, error_message = thread.result()
        vars = vars or {}
        if isinstance(data, dict) and not error_message:
            error_message = '\n'.join(
                (f'{sf.instance.upper()} {k}: ' +
                 (error_message or f'{len(v)} problem records')
                 for k, v in data.items()))
            total_len = sum((len(v) for v in data.values()))
        else:
            error_message = f'{sf.instance.upper()}: ' + (
                error_message or f'{len(data)} problem records')
            total_len += len(data)
        if len(data) > 0:
            results.append((sf, sql, data, vars, error_message))
            return_strings.append(error_message)

    if download_result:
        timenow = datetime.now().strftime("%Y-%m-%d %H.%M")
        output = {}
        for sf, sql, data, vars, error_message in results:
            if isinstance(data, dict):
                for k, v in data.items():
                    output[f'{sf.instance.upper()} {k} ({len(v)})'] = v
            else:
                output[f'{sf.instance.upper()} ({len(data)})'] = data
        to_excel(output, f'{timenow} {job_name} ({total_len}).xlsx')

    if do_fix and total_len > 0:
        print(job_name)
        for sf, sql, data, vars, error_message in results:
            if len(data) > 0:
                print(error_message)
                fix_function(sf, sql, vars, data)

    return '\n'.join(return_strings)
Ejemplo n.º 16
0
 def to_excel(self, file_path=None, additional_sheets=None):
     file_path = file_path or self.excel_file_path
     flat = SalesforceMetadataFile.flatten_dict(self.tree)
     dir.make_dir(self.excel_files_folder_path, False)
     output = {self.metadata_type(): flat}
     if additional_sheets is not None:
         output.update(additional_sheets)
     pdh.to_excel(output, self.excel_file_path)
     print(
         f'Open report at the following location:\n{os.path.abspath(self.excel_file_path)}'
     )
def main():

    issues = sf.select("""
    select Event__c, Type__c, count(Id) cnt, sum(OfferRate__c) OfferRate, sum(InHouseRate__c) InHouseRate
    from ledgerentrybreakout__c
    where type__c IN('TSO','TSO Adjustment')
    and event__r.istouringapp__c = true
    and createddate >= LAST_YEAR
    and Event__c != NULL
    group by event__c, type__c
    having count(id) > 1
    """)

    records = sf.select("""
    SELECT Id, Event__r.TourLeg__r.Tour__c, Event__c, Event__r.EventTitle__c, Type__c, OfferRate__c, InHouseRate__c, CreatedDate
    FROM LedgerEntryBreakout__c
    WHERE Event__c IN @issues.Event__c
    AND Event__r.IsTouringApp__c = True
    AND type__c IN('TSO','TSO Adjustment')
    and createddate >= LAST_YEAR
    and Event__c != NULL
    ORDER BY Event__c, CreatedDate ASC
    """,
                        return_type='flat')

    to_update = []
    to_delete = []
    for issue in issues:
        evt = issue['Event__c']
        typ = issue['Type__c']
        evt_records = [
            i for i in records if i['Event__c'] == evt and i['Type__c'] == typ
        ]
        first = evt_records[0].copy()
        remaining = evt_records[1:]
        first['PriorOfferRate'] = first['OfferRate__c']
        first['PriorInHouseRate'] = first['InHouseRate__c']
        first['OfferRate__c'] = issue['OfferRate']
        first['InHouseRate__c'] = issue['InHouseRate']
        to_update.append(first)
        to_delete.extend(remaining)

    pdh.to_excel(
        {
            'Issues': issues,
            'Rome Records': records,
            'To Update': to_update,
            'To Delete': to_delete,
        }, 'TSO Dupes')
    if (to_update or to_delete) and prompt('Ready?', boolean=True):
        sf.delete(to_delete, batch_size=5)
        sf.update(to_update, batch_size=5)
    return
Ejemplo n.º 18
0
def main():

    package_licenses = session.select(
        "SELECT Id, NamespacePrefix FROM PackageLicense WHERE PackageLicense.NamespacePrefix IN('Loop','SLCA2','GBLite')"
    )
    users = session.select("""
    SELECT Id, Name, Email, UserRole.Name, Profile.Name
    FROM User
    WHERE IsActive = True
    AND Profile.Name LIKE '%Internal%'
    """)
    existing_user_package_licenses = session.select(
        "SELECT UserId, PackageLicenseId FROM UserPackageLicense")

    df1 = pd.DataFrame(package_licenses)
    df2 = pd.DataFrame(users)
    df4 = df2[df2['Profile.Name'] == 'LNE Internal User']
    df5 = df2[df2['Profile.Name'] == 'LNE Personnel Community']

    df3 = pd.DataFrame(existing_user_package_licenses)

    df1.rename(columns={'Id': 'PackageLicenseId'}, inplace=True)
    df2.rename(columns={'Id': 'UserId'}, inplace=True)

    output2 = (pd.merge(df1, df2,
                        how='cross').merge(df3,
                                           how='outer',
                                           on=['UserId', 'PackageLicenseId'],
                                           indicator=True))
    output2 = (output2.loc[output2['_merge'] == 'left_only'].drop(
        columns='_merge'))

    output = pd.merge(left=df1, right=df2, how='cross')
    output = pd.merge(output,
                      df3,
                      how='outer',
                      on=['UserId', 'PackageLicenseId'],
                      indicator=True)
    output = output[output['_merge'] == 'left_only'].copy()
    output.drop(columns='_merge', inplace=True)

    if len(output) > 0:
        pdh.to_excel(output, 'Users that need Package Licenses.xlsx')
        # session.insert('UserPackageLicense', output.to_dict('records'))

    # records = session.select("""
    # SELECT Id, Event__r.EventTitle__c, PublicOnSale__c
    # FROM EventDateTime__c
    # WHERE Event__r.BusinessPlanEvent__c <> NULL
    # AND Event__c NOT IN (SELECT Event__c FROM AdPlan__c)
    # AND PublicOnSale__c <> NULL
    # """, mode='simple')
    return
def main(sessions, do_fix=False):
    import pandas as pd
    import functions.pandas_helpers as pdh
    from classes.py_threading import ThreadPool, Thread
    threading = ThreadPool()
    sessions = {
        username: sf
        for username, sf in sessions.items() if username in instances_to_run
    }
    return_string = ""

    @threading.run_async
    def inner(sf):
        query = """
        SELECT Id, Event__r.EventTitle__c, Type__c, Label__c, Capacity__c, ProjectedPaidTickets__c, Price__c
        FROM TicketScale__c
        WHERE CreatedDate >= THIS_YEAR
        AND IsTouringApp__c = True
        AND StageType__c IN ('Plan','Projection')
        AND (Capacity__c = NULL OR ProjectedPaidTickets__c = NULL OR Price__c = NULL)
        """
        records = sf.select(query, mode='simple', return_type='dataframe')
        return records

    # Run for each Salesforce instance
    results = {sf.instance: inner(sf) for sf in sessions.values()}
    for sf in sessions.values():
        result = results[sf.instance].result()
        if len(result) > 0:
            return_string += f"\n{sf.instance.upper()} has {len(result)} Ticket Scales with blank Capacity, ProjectedPaidTickets, or Price__c"

    if do_fix:
        if len(result) > 0:
            fix = lambda x: 0 if pd.isnull(x) else x
            for sf in sessions.values():
                result = results[sf.instance].result()
                fixed = result.copy()
                fixed['Capacity__c'] = fixed['Capacity__c'].apply(fix)
                fixed['ProjectedPaidTickets__c'] = fixed[
                    'ProjectedPaidTickets__c'].apply(fix)
                fixed['Price__c'] = fixed['Price__c'].apply(fix)
                pdh.to_excel({
                    'Original': result,
                    'Fixed': fixed
                }, f'({sf.instance}) Ticket Scales with blank Capacity ProjectedPaidTickets or Price__c.xlsx'
                             )
                sf.add_bypass_settings()
                sf.update(fixed[[
                    'Id', 'Capacity__c', 'ProjectedPaidTickets__c', 'Price__c'
                ]])
                sf.remove_bypass_settings()
    return return_string
def example1():
    events = uat.select(
        "SELECT Id, EventTitle__c, PrimaryHeadlinerArtist__c FROM Event__c LIMIT 100",
        return_type='flat')
    deals = uat.select(
        "SELECT Id, Artist__r.Name, GuaranteeAmount__c FROM Deal__c WHERE RecordType.Name = 'Artist' LIMIT 1000",
        return_type='flat')
    ledgerentries = uat.select(
        "SELECT Id, GLCode__c, CurrentFlash__c FROM LedgerEntry__c LIMIT 3000",
        mode='bulk',
        return_type='flat')

    pdh.to_excel([events, deals, ledgerentries], 'example1.xlsx')
def main():
    session = Salesforce_API("*****@*****.**")
    
    records = session.select("""
    SELECT Id, Event__r.EventTitle__c, Capacity__c, SellableCapacity__c, ProjectedPaidTickets__c, LastModifiedDate
    FROM TicketScale__c
    WHERE LastModifiedDate >= THIS_YEAR
    ORDER BY Event__r.EventYear__c, Event__c
    """, return_type='dataframe')
    
    issues = records.query("SellableCapacity__c < ProjectedPaidTickets__c")

    pdh.to_excel(issues, 'Ticket Scales with Projected greater than Sellable Capacity.xlsx')
        
    return
Ejemplo n.º 22
0
def main():
    sf = Salesforce_API('*****@*****.**')
    sql = SQL_Server_API('EOS-prod')
    # sql = SQL_Server_API('EOS-pre-prod')

    offer_ids = sf.select('SELECT EOSId__c FROM Tour__c WHERE EOSId__c <> NULL', return_type='dataframe').EOSId__c.astype('int').tolist()
    # offer_ids = [54441, 57933, 57965, 58400, 58737, 58050, 57930, 57263, 53728, 55808]

    result, bycolumn = uk.diff_eos_offers_from_backup(sql, 'Prod 2021-08-16', offer_ids, True)
    result = {f'{key} ({len(val)})': val for key,val in result.items() if len(val) > 0}
    bycolumn = {key: {f'{f} ({len(df)})': df for f, df in dfs.items() if len(df) > 0} for key,dfs in bycolumn.items()}
    pdh.to_excel(result, 'UK Tour Data Changes.xlsx')
    for key, val in bycolumn.items():
        if len(val) > 0:
            pdh.to_excel(val, f'UK Tour Data Changes By Column - {key}.xlsx')
    return
def find_venue_dupes():
    rome_venues = sf.select("""
    SELECT Id, EOSId__c, Name, VenueName__c, BillingCity
    FROM Account
    WHERE EOSId__c <> NULL
    AND RecordType.Name = 'Venue'
    """,
                            mode='bulk')
    eos_venues = sql.query("""
    SELECT 'Venue-' + CAST(Id AS VARCHAR) AS EOSId__c, Name
    FROM Venue
    """)

    eos_venues_by_name = {item['Name']: item for item in eos_venues}

    issues = []
    for venue in rome_venues:
        issue1 = eos_venues_by_name.get(venue.VenueName__c, None)
        issue2 = eos_venues_by_name.get(venue.Name, None)
        if issue1 is not None and issue1['EOSId__c'] != venue['EOSId__c']:
            issues.append({
                'RomeId': venue.Id,
                'RomeLink': f'https://{sf.simple.sf_instance}/{venue.Id}',
                'Name': venue.Name,
                'CurrRomeEOSId': venue.EOSId__c,
                'ChangeRomeEOSIdTo': issue1['EOSId__c'],
            })
        if issue2 is not None and issue2['EOSId__c'] != venue['EOSId__c']:
            issues.append({
                'RomeId': venue.Id,
                'RomeLink': f'https://{sf.simple.sf_instance}/{venue.Id}',
                'Name': venue.Name,
                'CurrRomeEOSId': venue.EOSId__c,
                'ChangeRomeEOSIdTo': issue2['EOSId__c'],
            })
    if issues:
        pdh.to_excel(issues, f'{sf.instance} Rome EOS Venue Issues.xlsx')
    do_fix = len(issues) > 0 and prompt('Fix the venues?', boolean=True)
    if do_fix:
        update_records = pd.DataFrame(issues).rename(
            columns={
                'RomeId': 'Id',
                'ChangeRomeEOSIdTo': 'EOSId__c'
            }).drop(columns=['Name'])
        sf.update(update_records)
    return
def example8():
    xlsx = pd.ExcelFile(
        '/Users/daniel.hicks_1/Downloads/ExampleEventImportData.xlsx')
    import_data = {
        'Event__c': pd.read_excel(xlsx, 'Event__c'),
        'EventDateTime__c': pd.read_excel(xlsx, 'EventDateTime__c'),
        'Deal__c': pd.read_excel(xlsx, 'Deal__c'),
        'TicketScale__c': pd.read_excel(xlsx, 'TicketScale__c'),
        'LedgerEntry__c': pd.read_excel(xlsx, 'LedgerEntry__c'),
    }

    model = sfc.SalesforceLiveNationModelCompute(
        uat, **import_data, set_null_datasets_to_empty_list=True)
    computed = model.compute_all()
    data = {key: val for key, val in computed.data2.items() if val is not None}
    pdh.to_excel(data, 'COMPUTED - ExampleEventImportData.xlsx')
    print('')
Ejemplo n.º 25
0
def touring_app_analysis():
    gh.pull_remote(repo_path, ignore_errors=True)
    files = dir.get_files_list(f'{repo_path}/resources/react-sfdc')
    args = [(f, ) for f in files if f.endswith('.tsx')
            if 'SettlementExpenseGridRow' in f]
    results = mp.starmap(get_function_definitions, args, multi=False)
    all = [f._asdict() for f in itertools.chain.from_iterable(results)]
    pdh.to_excel(all, 'Touring App Functions')
    dupes = pdh.find_duplicates(all, 'body_nowhitespace', ['name'],
                                ['name', 'file_path'], 5, False, True)
    out = {
        'All': all,
    }
    for i, df in enumerate(dupes):
        out[f'List {i}'] = df
    pdh.to_excel(dupes, 'Touring App Functions Duplicate Analysis')
    return results
Ejemplo n.º 26
0
    def inner(sf):
        lebs = threading.new(sf.select,
                             """
            SELECT Id, Event__r.EventTitle__c, LedgerEntry__r.Event__r.EventTitle__c, Event__r.IsTouringApp__c
            , LedgerEntry__r.Event__r.IsTouringApp__c, Event__c, GLCodePicklist__c, LedgerEntry__c, LedgerEntry__r.Event__c 
            , CreatedDate, CreatedBy.Name, LastModifiedDate, LastModifiedBy.Name
            FROM LedgerEntryBreakout__c 
            WHERE CreatedDate >= YESTERDAY
            AND (
                Event__c = NULL
                OR Event__r.BusinessPlanEvent__c <> NULL
                OR IsTouringApp__c = TRUE
            )
            """,
                             mode='bulk',
                             return_type='dataframe').fillna('')
        les = threading.new(sf.select,
                            """
            SELECT Id, Event__c, GLCode__c
            FROM LedgerEntry__c 
            WHERE CreatedDate >= THIS_MONTH
            AND Event__c <> NULL
            AND (
                Event__r.BusinessPlanEvent__c <> NULL 
                OR IsTouringApp__c = TRUE)
            """,
                            mode='bulk',
                            return_type='dataframe')
        issues = lebs[lebs['Event__c'] != lebs['LedgerEntry__r.Event__c']]

        # les_map = les.set_index(['Event__c', 'GLCode__c']).to_dict('index')
        # def get_corrected_le(row):
        #     tup = (row['Event__c'], row['GLCodePicklist__c'])
        #     return les_map[tup]['Id'] if tup in les_map else None
        # if len(issues) > 0:
        #     issues['Corrected LedgerEntry__c'] = issues.apply(get_corrected_le, axis=1)
        print(len(issues))
        pdh.to_excel(
            issues,
            f'{sf.instance} LEBs with Event__c different than LE ({len(issues)}).xlsx'
        )
        # sf.add_bypass_settings()
        # sf.delete(issues[issues['Event__c'] == ''].copy(), mode='bulk')
        # sf.remove_bypass_settings()
        return issues
def main():
    pass
    events = sf.select("""
    SELECT Id, Office__r.Name, Venue__r.Name, EventName__c, Status__c, Venue__r.EvenkoAACodePrefix__c, ShowCount__c, EventFirstDate__c, EvenkoAACode__c, InitialConfirmedDate__c
    FROM Event__c
    WHERE EvenkoAACode__c = NULL
    AND Venue__r.EvenkoAACodePrefix__c != NULL
    AND Status__c IN ('Confirmed','Flash Started','Flash Completed')
    AND Division__c = 'Evenko'
    """, return_type='flat')
    for item in events:
        s = item['Venue__r.EvenkoAACodePrefix__c'] + item['EventFirstDate__c'][2:].replace('-','')
        item['EvenkoAACode__c'] = s
    pdh.to_html(pd.DataFrame(events), 'New Evenko Codes')
    pdh.to_excel(pd.DataFrame(events), 'New Evenko Codes')
    with sf.bypass_settings():
        sf.update(events)
    return
Ejemplo n.º 28
0
def main(username):
    session = Salesforce_API(username)

    query = """
    SELECT Id
    FROM Account
    """
    result = session.select(query)
    # For a bulk query, use:
    # result = session.select(query, mode='bulk')

    sheets_data = {
        'Sheet1': result,
        'Sheet2':
        pd.DataFrame()  # can export multiple sheets in the same excel file
    }

    pdh.to_excel(sheets_data, 'Query Result.xlsx')
def rome_accounts():
    match_threshold = 60

    accounts = sf.select("""
    SELECT Id, Name, VenueName__c, BillingCity, RecordType.Name
    FROM Account
    -- WHERE RecordType.Name IN ('Venue','Ticket Agency')
    """,
                         return_type='dataframe')

    grouped = accounts.groupby('RecordType.Name')

    match_fields = {'Venue': ['VenueName__c', 'BillingCity']}

    @threading.run_async
    def get_dupe_analysis(recordtype, *args, **kwargs):
        print(f'Running duplicate check for {recordtype}')
        result = pdh.find_duplicates(*args, **kwargs)
        output = {}
        for i, df in enumerate(result):
            if len(df) > 0:
                df = df[df['m_%'] >= match_threshold].copy()
                df['MergeLink'] = df.apply(lambda row: (
                    f'https://{sf.simple.sf_instance}/merge/accmergewizard.jsp?goNext=+Next+&cid={row["Id"]}&cid={row["m_Id"]}'
                ),
                                           axis=1)
                output[f'{recordtype} {i+1}'] = df
        return output

    results = {}
    for recordtype, indexes in grouped.groups.items():
        match_on = match_fields.get(recordtype, 'Name')
        results.update(
            get_dupe_analysis(recordtype,
                              accounts.iloc[indexes],
                              match_on,
                              None,
                              'Id',
                              num_matches=3))

    pdh.to_excel(results,
                 f'({sf.instance}) Rome Account Duplicate Analysis.xlsx')
    return
def venue_aa_code_update():

    staged = sf.fupdate(
        f"""
    UPDATE Event__c
    SET EvenkoAACode__c = Venue__r.EvenkoAACodePrefix__c + format_date_yymmdd(EventFirstDate__c) + IF(
            Venue__r.EvenkoAACodePrefix__c == 'WE'
            , REPLACE(Venue__r.LNEMDID__c, 'AC', '')
            , '')
    WHERE Office__r.Division__c = 'Evenko'
    AND EvenkoAACode__c = NULL
    AND EventFirstDate__c != NULL
    AND Status__c = 'Confirmed'
    AND Venue__r.EvenkoAACodePrefix__c != NULL
    """,
        show_columns=['EventTitle__c', 'EventFirstDate__c', 'OfficeName__c'])

    pdh.to_excel(staged, 'Staged Evenko updates')

    if prompt('Update?', boolean=True):
        with sf.bypass_settings():
            sf.update(staged)