Пример #1
0
 def inner(path, node):
     for item in os.listdir(path):
         itempath = path + '/' + item
         if os.path.isdir(itempath):
             newnode = ObjDict()
             node[item] = newnode
             inner(itempath, newnode)
         else:
             if item == 'package.xml':
                 continue
             name = itempath.split('/')[-1]
             if trim_suffixes:
                 name = name[:name.find('.')]
             try:
                 if return_type == 'binary':
                     node[name] = open(itempath, 'rb').read()
                 elif return_type == 'dict':
                     node[name] = ObjDict.deepclone(
                         xmltodict.parse(
                             open(itempath, 'rb').read()))
                 elif return_type == 'records':
                     node[name] = flatten_dict(
                         xmltodict(open(itempath, 'rb').read()))
                 elif return_type == 'parsed':
                     node[name] = ObjDict.deepclone(
                         XmlParser(file_path=itempath))
             except:
                 node[name] = open(itempath, 'rb').read()
Пример #2
0
    def _get_opener(self, sheet_name=None, convert_to_text=True):
        def conv_excel_date(tup):
            t = [str(s) for s in tup]
            if tup[3] + tup[4] + tup[5] == 0:
                return "{}-{}-{}".format(t[0].zfill(4), t[1].zfill(2),
                                         t[2].zfill(2))
            else:
                return "{}-{}-{}T{}:{}.{}Z".format(
                    t[0].zfill(4), t[1].zfill(2), t[2].zfill(2), t[3].zfill(2),
                    t[4].zfill(2), t[5].zfill(3))

        if self.file_location.endswith(".csv"):
            with open(self.file_location, 'r', encoding='utf-8-sig') as file:
                csv_reader = csv.DictReader(file)
                for row in csv_reader:
                    yield ObjDict(row)
        elif self.file_location.endswith(
                ".xlsx") or self.file_location.endswith(
                    ".xls") or self.file_location.endswith(".xlsm"):
            xlsx_file = xlrd.open_workbook(self.file_location)
            sheets = xlsx_file.sheet_names()
            if len(sheets) > 1 and sheet_name == None:
                print("\nWhat sheet would you like to read?")
                print(self.file_location)
                print("\n".join([
                    "\t{}: {}".format(i + 1, sheet)
                    for i, sheet in enumerate(sheets)
                ]))
                sheet_name = sheets[int(input()) - 1]
            elif sheet_name == None:
                sheet_name = sheets[0]
            sheet_to_open = xlsx_file.sheet_by_name(sheet_name)
            headers = sheet_to_open.row_values(0)
            for row_num in range(1, sheet_to_open.nrows):
                new_row = dict()
                src_row = sheet_to_open.row(row_num)
                for col_num, field in enumerate(headers):
                    src_cell = src_row[col_num]
                    if src_cell.ctype == 1:  # text
                        new_row[field] = src_cell.value
                    elif src_cell.ctype == 2 and convert_to_text:  # number
                        new_row[field] = str(
                            src_cell.value).rstrip("0").rstrip(".")
                    elif src_cell.ctype == 3 and convert_to_text:  # date
                        new_row[field] = conv_excel_date(
                            xlrd.xldate_as_tuple(src_cell.value,
                                                 xlsx_file.datemode))
                    if src_cell.ctype == 4 and convert_to_text:  # bool
                        new_row[
                            field] = "true" if src_cell.value == 1 else "false"
                    else:
                        new_row[field] = src_cell.value
                yield ObjDict(new_row)
Пример #3
0
 def elem_passes_filter(curr_path_depth, elem):
     if filter is None: return True
     local_tree = ObjDict()
     node = local_tree
     for s in reversed(curr_path_depth[:-1]):
         local_tree[s] = ObjDict()
         node = local_tree[s]
     node[curr_path_depth[-1]] = elem
     try:
         return eval(filter, {}, local_tree) is True
     except AttributeError as e:
         return True
     except:
         return False
Пример #4
0
 def unflatten_records(records):
     if type(records) is pd.DataFrame:
         records = records.to_dict('records')
     output = ObjDict()
     for row in records:
         field_values = ObjDict({
             key[key.find('.') + 1:]: val
             for key, val in row.items()
             if key not in ('Location', 'Type') and pd.notnull(val)
         })
         field_values = ObjDict({
             key:
             ([] if val == '#list#' else {} if val == '#dict#' else val)
             for key, val in field_values.items()
         })
         node = output
         for loc in row['Location'].split('|')[1:]:
             if isinstance(node, dict):
                 if loc not in node:
                     node[loc] = ObjDict(
                     ) if row['Type'] == 'property' else []
                 node = node[loc]
             elif isinstance(node, list):
                 if loc not in node[-1]:
                     node[-1][loc] = ObjDict(
                     ) if row['Type'] == 'property' else []
                 node = node[-1][loc]
         if row['Type'] == 'property':
             node.update(field_values)
         elif row['Type'] == 'listitem':
             node.append(field_values)
         elif row['Type'] == 'listvalue':
             node.append(field_values['value'])
     return output
Пример #5
0
def get_file_data_by_tour_and_event(file_data):
    offer_ids = file_data.Tour__c['EOSId__c'].tolist()
    event_ids = file_data.Event__c['EOSId__c'].tolist()
    file_data_tour_groups_by_table = {
        k: v.groupby('TourEOSId')
        for k, v in file_data.items() if len(v) > 0
    }
    file_data_by_tour = {
        eosid: ObjDict({
            k: file_data_tour_groups_by_table[k].get_group(eosid) if eosid
            in file_data_tour_groups_by_table[k].groups else pd.DataFrame()
            for k in file_data_tour_groups_by_table
        })
        for eosid in offer_ids
    }
    file_data_event_groups_by_table = {
        k: v.groupby('EventEOSId')
        for k, v in file_data.items() if 'EventEOSId' in v
    }
    file_data_by_event = {
        eosid: {
            k: file_data_event_groups_by_table[k].get_group(eosid) if eosid
            in file_data_event_groups_by_table[k].groups else pd.DataFrame()
            for k in file_data_event_groups_by_table
        }
        for eosid in event_ids
    }
    return file_data_by_tour, file_data_by_event
Пример #6
0
 def get_populated_fields(data):
     output = ObjDict()
     for p, d in data.items():
         if len(d) == 0:
             continue
         output[p] = set()
         object_name = session.get_object_name(d[0].Id)
         editable_fields = [
             item['name']
             for item in session.get_object_description(object_name).fields
             if item['calculated'] is False and item['nillable'] is True
         ]
         default_false_fields = [
             item['name']
             for item in session.get_object_description(object_name).fields
             if item['type'] == 'boolean' and item['defaultValue'] is False
         ]
         default_true_fields = [
             item['name']
             for item in session.get_object_description(object_name).fields
             if item['type'] == 'boolean' and item['defaultValue'] is True
         ]
         for r in d:
             for f in editable_fields:
                 if f in r and r[f] is not None:
                     output[p].add(f)
             for f in default_false_fields:
                 if f in r and r[f] is not False:
                     output[p].add(f)
             for f in default_true_fields:
                 if f in r and r[f] is not True:
                     output[p].add(f)
     return output
def get_costing_data_by_tour(re_run):
    if re_run:
        uk.extract_onsale_file_data(sf,
                                    multi=True,
                                    create_combined_file=True,
                                    create_event_files=True,
                                    ask_to_regenerate_files=re_run)
    onsale_data = uk.get_cached_combined_file(
        loc.uk_onsale_migration_combined_pickle)
    offer_ids = onsale_data.Tour__c['EOSId__c'].tolist()
    onsale_data_tour_groups_by_table = {
        k: v.groupby('TourEOSId')
        for k, v in onsale_data.items()
    }
    costing_data_by_tour = {
        eosid: ObjDict({
            k: onsale_data_tour_groups_by_table[k].get_group(eosid) if eosid
            in onsale_data_tour_groups_by_table[k].groups else pd.DataFrame()
            for k in onsale_data
        })
        for eosid in offer_ids
    }
    # onsale_data_event_groups_by_table = {
    #     k: v.groupby('EventEOSId')
    #     for k,v in onsale_data.items()
    # }
    # costing_data_by_event = {
    #     eosid: {
    #         k: onsale_data_event_groups_by_table[k].get_group(eosid) if eosid in onsale_data_event_groups_by_table[k].groups else pd.DataFrame()
    #         for k in onsale_data
    #     }
    #     for eosid in event_ids
    # }
    return costing_data_by_tour
Пример #8
0
 def from_xml(self, file_path=None):
     file_path = file_path or (self.source_file_path
                               if self.source_file_type == 'xml' else None)
     self.xml_file_path = os.path.abspath(file_path)
     with open(file_path, 'r') as f:
         self.tree = ObjDict.deepclone(xmltodict.parse(f.read()))
     self._meta_is_dirty
     return self.tree
Пример #9
0
 def get_recordtype_map(self, key=('SobjectType', 'Name')):
     cname = 'get_recordtype_map'
     assert isinstance(key, tuple), "Key parameter must be a tuple"
     if cname not in self.cache:
         self.cache[cname] = self.sf.select("SELECT * FROM RecordType", mute=True, mode='simple')
     return ObjDict({
         tuple([item[s] for s in key]): item
         for item in self.cache[cname]
     })
Пример #10
0
def finalize(output):
    threading.wait()

    for obj, records in output.items():
        if len(records) == 0:
            continue
        object_name = session.get_object_name(records[0].Id)
        object_desc = session.get_object_description(object_name)
        for record in records:
            for child_relationship in object_desc['childRelationships']:
                record[child_relationship['relationshipName']] = []

    for obj, records in output.items():
        if len(records) == 0:
            continue
        object_name = session.get_object_name(records[0].Id)
        object_desc = session.get_object_description(object_name)
        lookup_fields = [
            ObjDict(field) for field in object_desc['fields']
            if field["type"] == "reference"
        ]

        for field in lookup_fields:
            for record in records:
                if record[field.name] is not None and record[
                        field.name] in record_map:
                    parent_record = record_map[record[field.name]]
                    # Link parent record to child record
                    record[field.relationshipName] = parent_record
                    # Link child record to parent record
                    parent_record_object_name = session.get_object_name(
                        parent_record.Id)
                    child_relationship_name = [
                        item for item in session.get_object_description(
                            parent_record_object_name)['childRelationships']
                        if 'childSObject' in item and item['childSObject'] ==
                        object_name and item['field'] == field.name
                    ]
                    if len(child_relationship_name) == 1:
                        parent_record[child_relationship_name[0]
                                      ['relationshipName']].append(record)

    for event in output.events:
        event.deals = event.Deals__r
        # event.bonusdetails = event.Deals__r.BonusDetails__r
        # event.stepups = event.Deals__r.ArtistRetroStepUpDetails__r
        event.ticketscales = event.TicketScales__r
        event.deductions = event.Deductions__r
        event.ledgerentrybreakouts = event.LedgerEntryBreakouts__r
        event.expenses = [
            item for item in event.ledgerentrybreakouts
            if item.LedgerEntry__r.RecordType.Name == 'Expenses'
        ]
        event.ancillaries = [
            item for item in event.ledgerentrybreakouts
            if item in event.expenses
        ]
def main():

    psdev = Salesforce_API('*****@*****.**')
    sit = Salesforce_API('*****@*****.**')
    lne = Salesforce_API('*****@*****.**')
    session = lne
    session.save_record_snapshot_on_select = True
    
    tours = session.select("SELECT Id, TourName__c FROM Tour__c WHERE LastModifiedDate >= LAST_WEEK", contentType='JSON')
    tourlegs = session.select("""
        SELECT Id, Tour__c, LegName__c, Order__c, TicketScalePriceLevels__c 
        FROM TourLeg__c 
        WHERE Tour__c IN ('{}')
        ORDER BY Tour__c, Order__c ASC
    """.format("','".join([item.Id for item in tours])), contentType='JSON')
    ticketscales = session.select("""
        SELECT Id, Event__r.TourLeg__r.Tour__r.TourTitle__c, Event__r.TourLeg__r.LegName__c, Event__r.EventName__c, Event__r.TourLeg__c, Event__c, Type__c, Label__c
        FROM TicketScale__c 
        WHERE Event__r.TourLeg__r.Tour__c IN ('{}')
    """.format("','".join([item.Id for item in tours])), contentType='JSON')

    issues = []

    for tourleg in tourlegs:
        tourleg.ticketscales = [item for item in ticketscales if item.Event__r.TourLeg__c == tourleg.Id]
        labels = None if tourleg.TicketScalePriceLevels__c is None else ObjDict.deepclone(json.loads(tourleg.TicketScalePriceLevels__c))
        label_map = {item['type']: item for item in labels} if labels is not None else None
        for ts in tourleg.ticketscales:
            config = label_map[ts.Type__c] if labels is not None and ts.Type__c in label_map else None
            ts.tsOriginalLabel = ts.Label__c
            ts.tourLegLabel = config.label if config is not None else None
            if config is not None and config.type != config.label and ts.Label__c is None:
                issues.append({'ts':ts, 'issue': "Ticket Scale missing label when custom label is set"})
                ts.Label__c = config.label
            elif config is not None and ts.Label__c != config.label and ts.Label__c is not None:
                issues.append({'ts':ts, 'issue': "Ticket Scale label doesn't match Tour Leg Type-Label"})
                ts.Label__c = config.label
            elif config is None and ts.Label__c is not None:
                if ts.Label__c == ts.Type__c:
                    issues.append({'ts':ts, 'issue': "Tour Leg has no Type-Label maps, but Ticket Scale has a value in Label__c that matches Type__c"})
                else:
                    issues.append({'ts':ts, 'issue': "Tour Leg has no Type-Label maps, but Ticket Scale has a custom value in Label__c that is different than Type__c"})
                    # ts.Label__c = None # Need to use simple api
        tourleg.labels = labels
        pass
    print('\n'.join([item['issue'] for item in issues]))
    
    if len(issues) > 0:

        pdh.multiple_df_to_excel({'Sheet1': pd.DataFrame(pdh.flatten(issues))}, 'Data Issues - TS Label__c.xlsx')

        session.add_bypass_settings()
        session.update(ticketscales)
        session.remove_bypass_settings()

    return
def get_file_data(filepaths, multi=False):

    copromotersmap = {item.Name: item for item in copromoters.result()}

    # dfs_ledgerentry = []
    # dfs_ledgerentrybreakout = []
    # for path in filepaths:
    #     try:
    #         xlsx_file = pd.ExcelFile(path)
    #         sheets = xlsx_file.sheet_names
    #         le_sheet = [s for s in sheets if s in ('Ledger Entry Export', 'Ledger Entry Report', 'Ledger entry export')][0]
    #         leb_sheet = [s for s in sheets if s in ('Ledger Entry Breakout Export', 'Breakout export')][0]
    #         le = pd.read_excel(xlsx_file, sheet_name=le_sheet)
    #         leb = pd.read_excel(xlsx_file, sheet_name=leb_sheet)
    #         le['Source_File'] = os.path.split(path)[-1]
    #         leb['Source_File'] = os.path.split(path)[-1]
    #         dfs_ledgerentry.append(le)
    #         dfs_ledgerentrybreakout.append(leb)
    #     except Exception as e:
    #         print(f'\n## Could not parse {path}:\n{e}\nIf list index out of range, then a sheet is missing')
    

    all_data = ObjDict()
    if multi:
        params = [
            (
                path
                , expenses_custom_metadata.result()
                , copromotersmap
            )
            for path in filepaths
        ]
        # params = [
        #     (
        #         dfs_ledgerentry[i].fillna('').query("`LedgerEntry__c.SourceSystemId__c` != ''")
        #         , dfs_ledgerentrybreakout[i].fillna('')
        #         , expenses_custom_metadata.result()
        #         , copromotersmap
        #     )
        #     for i in range(len(dfs_ledgerentry))
        # ]
        pool = multiprocessing.Pool()
        results = pool.starmap(get_file_data_inner, params)
        pool.close()
        pool.join()
    else:
        results = [
            get_file_data_inner(path, expenses_custom_metadata.result(), copromotersmap)
            for path in filepaths
        ]
        # df1 = pd.concat(dfs_ledgerentry).fillna('').query("`LedgerEntry__c.SourceSystemId__c` != ''")
        # df2 = pd.concat(dfs_ledgerentrybreakout).fillna('')
        # return get_file_data_inner(df1, df2, expenses_custom_metadata.result(), copromotersmap)
    for obj in results[0]:
        all_data[obj] = pd.concat([res[obj] for res in results if res is not None])
    return all_data
Пример #13
0
def get_datatype(field):
    output = ObjDict({'datatype': field.type, 'info': ''})
    type = field.type

    if type in ['string', 'textarea']:
        output.datatype = field.extraTypeInfo if field.extraTypeInfo == 'richtextarea' else type
        output.info = field.length
    elif type in ['double', 'currency', 'percent']:
        output.info = '({},{})'.format(field.precision - field.scale,
                                       field.scale)
    elif type in ['reference']:
        output.info = '({})'.format(', '.join(field.referenceTo))

    if field.calculated is True and field.calculatedFormula is None:
        output.info += ' Rollup field'

    # if type in ['string','textarea']:
    #     type = field.extraTypeInfo if field.extraTypeInfo == 'richtextarea' else type
    #     return '{} ({})'.format(type, field.length)
    # elif type in ['double','currency','percent']:
    #     return '{} ({},{})'.format(type, field.precision - field.scale, field.scale)
    # elif type in ['reference']:
    #     return '{} ({})'.format(type, ', '.join(field.referenceTo))
    # elif type in ['boolean','picklist','multipicklist']:
    #     return '{}'.format(type)
    # else:
    #     return type
    return output
Пример #14
0
def read_excel(file_path, sheets=None, **kwargs):
    xlsx = pd.ExcelFile(file_path)
    if sheets is None:
        sheets = xlsx.sheet_names
    elif isinstance(sheets, str):
        sheets = [sheets]
    missing_sheets = [
        sheet for sheet in sheets if sheet not in xlsx.sheet_names
    ]
    assert not missing_sheets, f'The following sheets are missing from XLSX file "{file_path}"\n{missing_sheets}'
    return ObjDict(
        {sheet: pd.read_excel(xlsx, sheet, **kwargs)
         for sheet in sheets})
Пример #15
0
    def to_xml(self, file_path=None):
        file_path = file_path or self.xml_file_path

        copy = ObjDict.deepclone(self.tree)
        self.fix_quotes(copy, 'GlobalValueSet.description')
        self.fix_quotes(copy, 'GlobalValueSet.customValue.fullName')
        self.fix_quotes(copy, 'GlobalValueSet.customValue.label')

        s = xmltodict.unparse(copy,
                              encoding='UTF-8',
                              short_empty_elements=True,
                              pretty=True,
                              indent='    ') + '\n'
        s = s.replace('"', '"').replace(''', ''')
        if file_path is not None:
            dir.create_file(file_path, s)
        return s
Пример #16
0
def get_object_fields(session, metadata):
    objects = ObjDict.deepclone(session.get_org_description()['sobjects'])
    object_descs = {
        obj.name: threading.new(session.get_object_description, obj.name)
        for obj in objects
        if obj.custom is True
        or obj.name in ['Account','Contact']}
    
    all_fields = []
    for object_name, desc in object_descs.items():
        # desc = session.get_object_description(object_name)
        fields = pdh.flatten(desc.result().fields)
        for f in fields:
            f['ObjectName'] = object_name
        all_fields.extend(fields)
    df = pd.DataFrame(all_fields)
    get_object_fields.dataset_name = 'Object Fields'
    get_object_fields.merge_fields = ['ObjectName', 'name']
    return df
Пример #17
0
def get_object_picklist_options(session, metadata):
    objects = ObjDict.deepclone(session.get_org_description()['sobjects'])
    object_descs = {
        obj.name: threading.new(session.get_object_description, obj.name)
        for obj in objects
        if obj.custom is True
        or obj.name in ['Account','Contact']}
    
    all_picklist_options = []
    for object_name, desc in object_descs.items():
        # desc = session.get_object_description(object_name)
        fields = [f for f in desc.result().fields if f.type in ['picklist','multipicklist']]
        for f in fields:
            for p in f.picklistValues:
                p['ObjectName'] = object_name
                p['Field Name'] = f.name
            all_picklist_options.extend(f.picklistValues)
    df = pd.DataFrame(all_picklist_options)
    get_object_picklist_options.dataset_name = 'Object Picklist Options'
    get_object_picklist_options.merge_fields = ['ObjectName', 'Field Name', 'label']
    return df
Пример #18
0
 def get_multilookup_object_fields(self, object_name, fields_to_lookup):
     result = ObjDict()
     root_fields = self.sf.get_object_fields(object_name)
     root_relationships = self.sf.get_object_fields(object_name, 'lookupFieldsRelationshipName')
     for field_name in fields_to_lookup:
         if field_name in root_fields:
             result[field_name] = root_fields[field_name]
             continue
         curr_object = object_name
         curr_fields = root_fields
         curr_relationships = root_relationships
         for snippet in field_name.split('.'):
             relation = curr_relationships.get(snippet, None)
             rel_field = curr_fields.get(snippet, None)
             if relation:
                 curr_object = relation.referenceTo[0]
                 curr_fields = self.sf.get_object_fields(curr_object)
                 curr_relationships = self.sf.get_object_fields(curr_object, 'lookupFieldsRelationshipName')
             elif rel_field:
                 result[field_name] = rel_field
     return result
Пример #19
0
def parse_query(query):
    query = str(query).replace(u'\xa0', u' ')  #Clean up bad space character
    query = query.strip()
    splitlines = query.split('\n')
    query = '\n'.join(s for s in splitlines
                      if s.strip().startswith('--') is False)

    regex = re.compile(
        r'SELECT|[A-Z]+\([A-Z\d_]+\)\s+FROM|(\([^)]+\)|([A-Z]+)\([A-Z\d_]+\)(?:\s+([A-Z\d_]+))?|[A-Z\d_\.]+|\*(?:.([A-Z]+))?)',
        re.IGNORECASE)
    where_clause_regex = re.compile(
        r'\([^)]+\)[\s\S]|FROM\s[A-Z\d_]+|(WHERE[\s\S]+)', re.IGNORECASE)
    fields = []
    unique_fields = {}
    subqueries = []
    where_clause = None
    for res in re.finditer(regex, query):
        group0, group1, group2, group3, group4 = res.group(0), res.group(
            1), res.group(2), res.group(3), res.group(4)
        if group1 == 'FROM' or group0.upper().endswith('FROM'): break
        if group1 is None: continue

        out = ObjDict({
            'name': group1,
            'is_star': group1 == '*',
            'star_option': group4,
            'is_subquery': group1.startswith('('),
            'is_aggregate_function': group2 is not None,
            'aggregate_function': group2,
            'aggregate_label': group3,
        })
        fields.append(out)
        if out.is_subquery: subqueries.append(out)
        if not out.is_star: unique_fields[group1] = None
    for res in re.finditer(where_clause_regex, query):
        group1 = res.group(1)
        if group1 is None: continue
        where_clause = group1

    return
def get_onsale_data():
    data = {
        'Offer':
        sql.query("""
            SELECT OfferId, ArtistName, OfferStatusName, Company, OracleCode
                , FORMAT(MIN(ShowDate), 'yyyy-MM-dd') AS FirstDate
                , FORMAT(MAX(ShowDate), 'yyyy-MM-dd') AS LastDate
                , COUNT(*) AS ShowCount
                , SUM(CAST(PostponedDateTBC AS INT)) AS PostponedDateTBCShows
            FROM vwEOSShow
            WHERE (ShowDate>=GetDate() OR PostponedDateTBC=1)
            AND CountryId = 1
            AND OfferStatusName IN ('Confirmed','On Sale','Settled','Draft')
            GROUP BY OfferId, ArtistName, OfferStatusName, CountryName, Company, OracleCode
            ORDER BY MAX(ShowDate) ASC
        """),
        'ItineraryShow':
        sql.query("""
            SELECT DISTINCT OfferId, ShowId, ShowDate, VenueName, ArtistName, OfferStatusName
            FROM vwEOSShow
            WHERE ShowDate>=GetDate() OR PostponedDateTBC=1
        """),
    }
    return ObjDict({key: pd.DataFrame(val) for key, val in data.items()})
Пример #21
0
def get_metadata_reference_records():

    metadata_reference_url = 'https://lneallaccess-my.sharepoint.com/:x:/g/personal/mike_wishner_lyv_livenation_com/EfcTIOXoayhCvDFq3t-kGEwB4aQc20-iALSiKqu-uLAxqw?e=0JJnGu&download=1'
    # url = 'https://lneallaccess-my.sharepoint.com/personal/mike_wishner_lyv_livenation_com/_layouts/15/download.aspx?UniqueId=e52013f7%2D6be8%2D4228%2Dbc31%2D6adedfa4184c'
    r = requests.get(metadata_reference_url, allow_redirects=True)

    file_name = "./resources/Rome Touring Object Model.xlsx"
    write_file = open(file_name, 'wb')
    write_file.write(bytearray(r.content))
    write_file.close()

    xlsx_file = xlrd.open_workbook(file_name)
    sheets = xlsx_file.sheet_names()

    output = ObjDict()

    for object_name, tab_name in metadata_tables.items():
        excel_ref_sheet = xlsx_file.sheet_by_name(
            tab_name) if tab_name in sheets else None

        file_records = []
        headers = []
        if excel_ref_sheet is not None:
            headers = excel_ref_sheet.row_values(0)
            for row_num in range(1, excel_ref_sheet.nrows):
                new_row = ObjDict()
                src_row = excel_ref_sheet.row_values(row_num)
                for col_num in range(0, len(headers)):
                    new_row[headers[col_num]] = src_row[col_num]

                new_row.sObject = object_name
                new_row.full_name = new_row.sObject + '.' + new_row.DeveloperName
                file_records.append(ObjDict(new_row))

        if object_name == 'PicklistOption__mdt':
            for item in file_records:
                item.Default__c = "true" if item.Default__c == 1 else "false"
                item.AlwaysShown__c = "true" if item.AlwaysShown__c == 1 else "false"
                item.AllowMultiple__c = "true" if item.AllowMultiple__c == 1 else "false"
                item.GLCode__c = str(item.GLCode__c)[0:5]
        output[object_name] = file_records

    return output
Пример #22
0
def main(dataset_functions, multi, latest_from_cache=False):
    sessions = ObjDict()
    # sessions.dev1 = Salesforce_API('[email protected]')
    # sessions.qa1 = Salesforce_API('[email protected]')
    # sessions.psdev = Salesforce_API('*****@*****.**')
    # sessions.sit = Salesforce_API('*****@*****.**')
    sessions.uat = Salesforce_API('*****@*****.**')
    sessions.lne = Salesforce_API('*****@*****.**')

    # dataset_functions = {
    #     "Global Value Sets": get_global_value_set_options
    #     # , "Object Fields": get_object_fields
    #     # , "Object Picklists": get_object_picklist_options
    #     , 'Metadata Files': get_filebinary
    #     , 'Object Fields': get_object_fields
    #     , 'Object Field Picklists': get_object_field_picklistvalues
    #     # , 'Object List Views': get_object_listviews
    #     , 'Object Validation Rules': get_object_validationrules
    #     , 'Object Record Types': get_object_recordtypes
    #     , 'Object RecType Picklists': get_object_recordtype_picklistvalues
    #     , 'Roles': get_roles
    #     , 'Groups': get_groups
    #     , 'Profiles': get_profiles
    #     , 'Permission Sets': get_permissionsets
    #     , "Sharing Rules": get_sharingrules
    #     , "Custom Metadata": get_custom_metadata
    #     # , ""
    # }
    if latest_from_cache:
        def last_saved(instance):
            print(f'Using last-cached data for {instance}')
            root_path = './resources/ant/'
            folder_path = sorted([f for f in os.listdir(root_path) if f'{instance} retrieve' in f], reverse=True)[0]
            return AntMetadataFolder(root_path + folder_path)
        metadata = {
            instance: threading.new(last_saved, instance)
            for instance, session in sessions.items()
        }
    else:
        metadata = {
            instance: threading.new(metadata_full_pull, session)
            for instance, session in sessions.items()
        }

    # datasets = {
    #     dataset_name: {
    #         instance: (function, session, metadata[instance])
    #         for instance, session in sessions.items()
    #     }
    #     for dataset_name, function in dataset_functions.items()
    # }
    # dataset_key_fields = {
    #     "Global Value Sets": ['GlobalValueSetName', 'valueName']
    #     , 'Metadata Files': ['FileName']
    #     , "Object Fields": ['ObjectName', 'name']
    #     , "Object Picklists": ['ObjectName', 'Field Name', 'label']
    #     , 'Object Fields': ['ObjectName', 'fullName']
    #     , 'Object Field Picklists': ['ObjectName', 'FieldName', 'fullName']
    #     , 'Object List Views': ['ObjectName', 'fullName']
    #     , 'Object Validation Rules': ['ObjectName', 'fullName']
    #     , 'Object Record Types': ['ObjectName', 'fullName']
    #     , 'Object RecType Picklists': ['ObjectName', 'RecordType', 'Picklist', 'fullName']
    #     , 'Roles': ['FileName','name']
    #     , 'Groups': ['FileName','name']
    #     , 'Profiles': ['ProfileName','Attribute','field','application','apexClass','layout','object','apexPage','recordType','tab','name']
    #     , 'Permission Sets': ['PermissionSetName','Attribute','field','application','apexClass','object','apexPage','recordType','tab','name']
    #     , "Sharing Rules": ['RuleType', 'ObjectName.Name', 'fullName']
    #     , "Custom Metadata": ['ObjectName.Name', 'ObjectName', 'DeveloperName']
    # }

    # diff_instances(metadata, sessions, dataset_functions, multi, 'uat', 'psdev')
    diff_instances(metadata, sessions, dataset_functions, multi, 'uat', 'lne')
    # # diff_instances(datasets, dataset_key_fields, 'dev1', 'psdev')
    # diff_instances(datasets, dataset_key_fields, 'dev1', 'qa1')
    # diff_instances(datasets, dataset_key_fields, 'dev1', 'uat')
    # diff_instances(datasets, dataset_key_fields, 'psdev', 'sit')
    # diff_instances(datasets, dataset_key_fields, 'psdev', 'uat')
    # diff_instances(datasets, dataset_key_fields, 'psdev', 'lne')
    threading.wait()
    return
def main():
    downloads_folder = dir.get_download_folder()
    files = [(f, os.path.getctime(f))
             for f in dir.listdir(downloads_folder, False, True)
             if f.endswith('.zip') and 'OneDrive_' in f]
    files.sort(key=lambda item: item[1], reverse=True)
    source_zip_file_path = files[0][0]
    timenow = datetime.datetime.now().strftime('%Y-%m-%d %H.%M')
    # evenko_folder_path = '/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/Evenko/'
    # import_zip_path = f'{evenko_folder_path}Templates.zip'
    # import_folder_path = f'{evenko_folder_path}Templates/'
    migration_folder_path = f'/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/Evenko/Migration Source/Evenko Migration {timenow}/'
    with zipfile.ZipFile(source_zip_file_path, "r") as zip_ref:
        zip_ref.extractall(migration_folder_path)

    import_data_path = f'{migration_folder_path}/Historical data'

    # import_folder_path = '/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/Historical data Evenko 2021-08-11'
    def map_sheet_name(v):
        return ('EventDateTime__c' if 'EventDateTime' in v else
                'Event__c' if 'Event' in v else 'Deal__c' if 'Deal' in v else
                'TicketScale__c' if 'TicketScale' in v else 'Deduction__c'
                if 'Deduction' in v else 'LedgerEntry__c' if 'LedgerEntry_' in
                v else 'LedgerEntryBreakout__c' if 'LedgerEntryActive' in
                v else None)

    data = ObjDict({
        # map_sheet_name(f): pd.read_excel(import_data_path+'/'+f)
        map_sheet_name(f): pd.read_csv(import_data_path + '/' + f,
                                       encoding='ISO-8859-1')
        for f in os.listdir(import_data_path) if map_sheet_name(f) is not None
    })
    for obj, df in data.items():
        df.replace('#COMA#', ',', inplace=True, regex=True)
        df.replace('#CHAR13#', '\n', inplace=True, regex=True)
        df.replace('#CHAR10#', '\r', inplace=True, regex=True)
        df.replace('NULL', np.nan, inplace=True)
    data.update(
        {key: pd.DataFrame()
         for key in objects_to_import if key not in data})
    lneaccounts = threading.new(lne.select,
                                "SELECT Id, SourceSystemId__c FROM Account",
                                mute=True)
    uataccounts = threading.new(uat.select,
                                "SELECT Id, SourceSystemId__c FROM Account",
                                mute=True)
    evenkoofficeid = uat.select(
        "SELECT Id FROM Account WHERE RecordType.Name = 'Office' AND Name = 'Evenko'",
        mute=True,
        mode='simple',
        cache_duration=24 * 60)[0].Id
    user_email_mapper = uat.user_mapper(
        'Email',
        return_field='Id',
        where_clause="WHERE UserRole.Name LIKE '%Evenko%'")
    # glaccountsmap = {
    #     item.GLCode__c: item for item in
    #     uat.select("SELECT Id, Name, Type__c, GLCode__c, Category__c FROM GLAccount__c WHERE ActiveFlag__c = True", mute=True, mode='simple', cache_duration=24*60)
    # }
    glaccountsmap = (threading.new(
        uat.select,
        "SELECT Id, Name, Type__c, GLCode__c, Category__c FROM GLAccount__c WHERE ActiveFlag__c = True",
        mute=True,
        mode='simple',
        cache_duration=24 *
        60).then(lambda result: {item.GLCode__c: item
                                 for item in result}))

    # FIXES
    data.Deal__c.Artist__c.replace('Coheadlner', np.nan, inplace=True)
    data.Event__c.rename({'MDAID': 'EvenkoAACode__c'}, inplace=True)

    evt = data.Event__c
    edt = data.EventDateTime__c
    deal = data.Deal__c
    ts = data.TicketScale__c
    ded = data.Deduction__c
    le = data.LedgerEntry__c
    leb = data.LedgerEntryBreakout__c
    # leb = leb[leb['OfferRate__c'].str.isnumeric().fillna(False)]

    evt['Office__c'] = evenkoofficeid
    # evt['Promoter__c'] = evt['Promoter__r'].apply(lambda x: user_email_mapper(x.lower()) if type(x) is str else None)
    # evt['ProductionManager__c'] = evt['ProductionManager__r'].apply(lambda x: user_email_mapper(x.lower()) if type(x) is str else None)
    # evt['TicketingManager__c'] = evt['TicketingManager__r'].apply(lambda x: user_email_mapper(x.lower()) if type(x) is str else None)

    # pdh.to_excel(data, import_folder_path+'/'+'Combined.xlsx')

    # # If only Plan or only Projection Ticket Scale records were provided for an event, then copy the Ticket Scales so that both Plan and Projection are inserted
    # copied_ts_dfs = []
    # for event_id in evt['SourceSystemId__c'].tolist():
    #     evt_ts_df = ts[ts['Event__r.SourceSystemId__c'] == event_id]
    #     plan = evt_ts_df[evt_ts_df['StageType__c'] == 'Plan']
    #     projection = evt_ts_df[evt_ts_df['StageType__c'] == 'Projection']
    #     if len(plan) > 0 and len(projection) == 0:
    #         new = plan.copy()
    #         new['StageType__c'] = 'Projection'
    #         copied_ts_dfs.append(new)
    #     if len(projection) > 0 and len(plan) == 0:
    #         new = projection.copy()
    #         new['StageType__c'] = 'Plan'
    #         copied_ts_dfs.append(new)
    # ts = pd.concat([ts] + copied_ts_dfs)

    if le is not None:
        if 'GLAccount__r.GLCode__c' in le.columns:
            le['GLCode'] = le['GLAccount__r.GLCode__c']
            le['RecordType.Name'] = le['GLAccount__r.GLCode__c'].apply(
                lambda x: glaccountsmap[x].Type__c)
        le['BookingSettlement__c'] = True
        data.LedgerEntry__c = le

    # artistnamesmap = {item.Id:item.Name for item in artistnames.result()}
    # primaryheadlinerdeals = deal.copy()[deal['Type__c'] == 'Primary Headliner']
    # primaryheadlinerdeals['SourceSystemId__c'] = primaryheadlinerdeals['Event__r.SourceSystemId__c']
    # primaryheadlinerdeals['PrimaryHeadlinerArtist__c'] = primaryheadlinerdeals['Artist__c'].apply(lambda x: artistnamesmap[x])
    # primaryheadlinerdeals = primaryheadlinerdeals[['SourceSystemId__c', 'PrimaryHeadlinerArtist__c']]

    # evt = evt.merge(right=primaryheadlinerdeals, how='left', on='SourceSystemId__c')

    # Data fixes
    # evt.rename(columns={'Event__c.Venue__c': 'Venue__c'}, inplace=True)
    ded.rename(
        columns={'Event__r.SourceSystemId__c  ': 'Event__r.SourceSystemId__c'},
        inplace=True)
    # deal['RecordType.Name'] = 'Artist'
    # deal['Type__c'] = 'Primary Headliner'
    deal.rename(columns={
        'Deal__c.Agency__c': 'Agency__c',
        'Deal__c.Agent__c': 'Agent__c'
    },
                inplace=True)
    if le is not None and len(le) > 0:
        # le.rename(columns={'GLCode__c': 'GLAccount__r.GLCode__c', 'SourceSystemId__c': 'Event__r.SourceSystemId__c'}, inplace=True)
        le.fillna('', inplace=True)
        le.query("`GLCode__c` != ''", inplace=True)

    event_lookup = {
        item['SourceSystemId__c']: item
        for item in evt.to_dict('records')
    }
    edt['Venue__c'] = edt['Event__r.SourceSystemId__c'].apply(
        lambda x: event_lookup[x]['Venue__c'] if x in event_lookup else '')
    # del data['LedgerEntry__c']

    # event_filter = 'EvenkoDeal-12986'
    # # uat.delete_events(f"SELECT Id FROM Event__c WHERE SourceSystemId__c = '{event_filter}'")
    # evt.query("SourceSystemId__c == @event_filter", inplace=True)
    # edt.query("`Event__r.SourceSystemId__c` == @event_filter", inplace=True)
    # ded.query("`Event__r.SourceSystemId__c` == @event_filter", inplace=True)
    # le.query("`Event__r.SourceSystemId__c` == @event_filter", inplace=True)
    # ts.query("`Event__r.SourceSystemId__c` == @event_filter", inplace=True)
    # deal.query("`Event__r.SourceSystemId__c` == @event_filter", inplace=True)

    # Re-names in order to insert records to UAT instead of PROD
    # if uat.instance == 'uat':
    #     uat_ids_map = {item.Id: item for item in uataccounts.result()}
    #     uat_src_ids_map = {item.SourceSystemId__c: item for item in uataccounts.result()}
    #     def get_uat_id(id):
    #         if id not in uat_ids_map:
    #             if id in uat_src_ids_map:
    #                 return uat_src_ids_map[id].Id
    #             else:
    #                 return ''
    #         return id
    #     evt['Venue__c']       = evt['Venue__c'].apply(get_uat_id)
    #     evt['Office__c']      = evt['Office__c'].apply(get_uat_id)
    #     deal['Artist__c']     = deal['Artist__c'].apply(get_uat_id)
    #     deal['CoPromoter__c'] = deal['CoPromoter__c'].apply(get_uat_id)
    #     deal['Agency__c']     = deal['Agency__c'].apply(get_uat_id)
    #     deal['Agent__c']      = deal['Agent__c'].apply(get_uat_id)

    del data['LedgerEntry__c']
    # Model to populate all remaining fields
    model = SalesforceLiveNationModelCompute(
        uat,
        **data,
        set_null_datasets_to_empty_list=True,
    )
    computed = model.compute_all(
        keep_exception_columns=True,
        to_compute={
            'Event__c': {
                'RecordTypeId', 'ShowCount__c', 'ShowCountWithDates__c',
                'EventFirstDate__c', 'EventLastDate__c', 'EventYear__c',
                'PrimaryHeadlinerArtist__c', 'Artists__c', 'IsTouringApp__c',
                'OfficeName__c', 'Division__c', 'Geography__c',
                'PrimaryVenueOffice__c', 'VenueOwnership__c',
                'HiddenVenueOffice__c'
            }
        })
    print(f'Failed: {computed.failed}')
    # if 'LedgerEntry__c' in computed.data2:
    # del computed.data2.LedgerEntry__c['GLAccount__c']

    pdh.to_excel(computed.data2,
                 migration_folder_path + '/' + 'Combined Import Data.xlsx')
    os.remove(source_zip_file_path)

    # Filters for testing purposes
    # data.Event__c = evt[pd.notnull(evt['Venue__c'])].copy()

    # event_src_ids = set(data.Event__c['SourceSystemId__c'].tolist())
    # for obj, df in data.items():
    #     if 'Event__r.SourceSystemId__c' in df.columns:
    #         data[obj] = df[df['Event__r.SourceSystemId__c'].apply(lambda x: x in event_src_ids) == True].copy()

    # pdh.to_excel(data, import_folder_path+'/'+'CombinedFiltered.xlsx')
    # # Deletion for testing purposes, to have a fresh slate
    # uat.delete_events("SELECT Id FROM Event__c WHERE SourceSystemId__c IN ('{}')".format("','".join(evt['SourceSystemId__c'].tolist())))

    upsert_event_data_to_rome(uat, computed.data2, True)

    # result = uat.create_events(
    #     computed.data2
    #     , {
    #         'Event__c': 'SourceSystemId__c'
    #         , 'EventDateTime__c': 'SourceSystemId__c'
    #     }
    #     , delete_old_child_records=True
    #     , run_fc=False
    # )
    # pdh.to_excel({key:val for key,val in result.errors.items() if len(val) > 0}, import_folder_path+'/'+'Migration Results - ERRORS.xlsx')
    # pdh.to_excel(result.success, import_folder_path+'/'+'Migration Results - SUCCESS.xlsx')
    return
def upsert_event_data_to_rome(sf,
                              all_data,
                              delete_events_first=False,
                              **kwargs):
    all_data = ObjDict(all_data)
    all_data.setdefault(None)
    sf.default_mode = 'bulk'
    if delete_events_first:
        event_ids_to_delete = all_data.Event__c['SourceSystemId__c']
        sf.delete_events(
            "SELECT Id FROM Event__c WHERE SourceSystemId__c IN @event_ids_to_delete"
        )
    with sf.bypass_settings():
        internalcopro_deal_data = all_data.Deal__c.query(
            '`RecordType.Name` == "Co-Promoter" and Type__c == "Internal"')
        other_deal_data = all_data.Deal__c.query(
            '`RecordType.Name` != "Co-Promoter" or Type__c != "Internal"')

        tou = threading.new(sf.upsert, 'Tour__c', all_data.Tour__c,
                            'SourceSystemId__c').wait()
        tde = threading.new(sf.upsert, 'TourDeal__c', all_data.TourDeal__c,
                            'SourceSystemId__c').wait()
        leg = threading.new(sf.upsert, 'TourLeg__c', all_data.TourLeg__c,
                            'SourceSystemId__c').wait()
        leg = threading.new(sf.upsert, 'TourOnSale__c', all_data.TourOnSale__c,
                            'SourceSystemId__c')
        evt = threading.new(sf.upsert, 'Event__c', all_data.Event__c,
                            'SourceSystemId__c').wait()
        edt = threading.new(sf.upsert, 'EventDateTime__c',
                            all_data.EventDateTime__c, 'SourceSystemId__c')
        eos = threading.new(sf.upsert, 'EventOnSale__c',
                            all_data.EventOnSale__c, 'SourceSystemId__c')
        de1 = threading.new(sf.upsert, 'Deal__c', internalcopro_deal_data,
                            'SourceSystemId__c').wait()
        de2 = threading.new(sf.upsert, 'Deal__c', other_deal_data, 'Id')
        led = threading.new(sf.upsert_ledgerentries, 'LedgerEntry__c',
                            all_data.LedgerEntry__c, 'SourceSystemId__c', evt,
                            'SourceSystemId__c')
        led = threading.new(sf.upsert_deductions, 'Deduction__c',
                            all_data.Deduction__c, 'SourceSystemId__c', evt,
                            'SourceSystemId__c')
        ts1 = threading.new(sf.upsert_ticketscales, 'TicketScale__c',
                            all_data.TicketScale__c, 'Id', evt,
                            'SourceSystemId__c')
        led.wait()
        leb = threading.new(sf.upsert, 'LedgerEntryBreakout__c',
                            all_data.LedgerEntryBreakout__c, 'Id')

        sf.run_fc_async([item['Id'] for item in evt.result() if 'Id' in item])

        threads = [var for var in locals().values() if type(var) is Thread]
        jobs = ObjDict({th.result().object_name: th for th in threads})
        for job in jobs.values():
            if len(job.errors) > 0:
                errors_df = pd.DataFrame(job.errors)[['sf_result']]
                print(f'{job.object_name} errors:\n{errors_df}')
        if len(jobs.Event__c.results) < 10:
            print('New Events:')
            print('\n'.join(
                pd.DataFrame(jobs.Event__c.results)['Id'].apply(
                    lambda x: f'https://{sf.simple.sf_instance}/{x}').tolist()
                [:10]))
        sf.default_mode = 'simple'
        return jobs
def main():
    # sql = SQL_Server_API(eos_prod_creds)
    # sf = Salesforce_API('*****@*****.**')
    sql = SQL_Server_API(eos_stage_creds)
    sf = Salesforce_API('*****@*****.**')

    sf.bypass_prod_operation_approval()
    delete_tours_first = False
    skip_already_created_tours = False

    # all_21_sept = [37225,31270,31270,22916,22695,53206,52018,52018,54352,52872,52090,51322,51322,50940,42648,20125,22958,29920,23463,22398,19370,19370,22884,28579,26342,35517,38949,38949,38586,35627,33668,28753,31358,35591,39302,35581,22495,52850,44233,44265,44265,51030,51028,43995,41284,41284,43884,42729,53383,49144,33642,40596,38839,41018,41018,44235,44235,37310,48932,50809,49168,39253,39253,39874,39874,46689,42649,42649,40842,51665,42653,49209,50770,40975,51555,51555,51560,35693,31353,31220,31220,37306,30794,23619,23619,38877,38877,30697,31576,37381,38894,32096,32096,33484,33484,30044,34283,23121,19405,22356,22356,23668,24095,25145,28657,19175,23122,23152,20541,20367,23745,23744,22532,23024,23505,25144,20225,20225,22689,20448,20448,20374,20374,30189,54035,54544,40974,41007,31962,33940,33940,34009,39123,22324,22835,22835,22825,22825,22870,23123,23123,23155,32282,52020,37205,37205,43740,53714,48756,50791,50791,39719,52059,52428,51243,51243,51589,50762,50762,47739,38843,51102,45676,45676,49117,48857,48857,51261,48847,41321,40872,50865,43765,41338,38841,44251,41404,41404,45686,44488,44488,35803,40531,41303,40578,42612,40965,43873,43873,40866,38790,38790,40986,40986,40867,40867,40748,40031,40397,39421,37335,37335,40453,37140,37140,22832,22832,39128,37509,34220,39715,38839,37174,37006,39178,31041,34156,39736,37426,37426,35557,35721,35721,31854,37251,37251,32278,38549,38540,39186,38959,32052,37453,40102,38976,37029,37029,39020,31054,31558,37485,31844,35620,33628,33966,30910,30910,30697,31664,31664,30811,30811,31542,31542,31029,33684,31974,31309,31345,31001,30696,30583,28593,30315,23619,23619,24113,24113,28769,30172,30172,28622,23622,23622,23795,23795,23597,23849,23849,30209,25157,30684,23869,23869,23882,30731,23845,23845,23845,25178,25178,24021,24021,22914,25117,22945,23690,25163,29923,25179,25179,28701,28701,30036,28684,28571,24100,23657,23656,23791,26332,22829,30244,28582,24098,24098,23713,23319,23319,28699,23366,24108,23500,22045,22045,22452,22831,22831,22836,22836,20881,22029,23498,22446,20440,20440,22425,19446,19446,20882,20773,20773,22436,19396,20637,20373,20373,20484,20484,22596,20451,19343,19343,18763,18633,20830,22256,54015,51782,53686,51104,51352,40287,51534,48753,51404,51404,48899,49137,49137,40952,45535,49190,49190,51624,51624,40860,42670,44103,52470,45678,40258,43800,43800,41446,39966,39638,40229,40235,37514,39713,37435,38633,37128,40997,39935,39935,37027,37047,34385,38921,37342,39112,38922,35684,35684,30310,30501,30501,30246,30947,30947,31113,31055,22918,30268,28575,24101,24101,25126,23938,23938,23919,22535,22524,22524,22524,20237,20565,28635,28635,29885,18623,31737,39346,39346,39263,52027,52028,41009,44119,44373,49243,50779,51153,51153,37515,31116,22312,24037,24037,29836,29836,22261,52324,40252,40252,39707,39707,31374,45657,49321,49321,53613,54619,40132,41317,49210,35543,51594,53300,24022,35585,39184,20954,49573,48970,48970,39304,39304,23871,45685,43742,43742,20844,20844,20844,34376,23523,23523,22553,22553,23323,28576,22438,39113,30360,30360,34232,34232,50646,51002,39728,22993,22448,22448,26195,20713,22192,20478,52718,53284,54553,39945,40425,40518,48880,39566,40218,51159,50625,38981,43950,51469,52362,52362,41037,41104,44505,49143,51780,52383,50918,40457,40457,39629,39629,38983,38811,43818,51511,49244,49244,43903,40984,41008,41008,35839,35839,49520,49520,49441,49441,32090,31636,35889,38829,33860,31949,33996,28616,28616,31368,31935,30848,33680,35780,37309,37370,30507,30507,31075,34289,37167,33505,34028,34028,40579,34309,38694,38701,39648,31959,33484,33484,35659,34065,40127,32376,39745,23488,23873,23873,29829,26200,22449,24106,24106,19990,19990,22040,22550,22662,23522,23522,30175,30375,25124,28637,29956,29956,20304,20294,22454,20685,20685,24040,23653,26354,30177,24093,24093,19752,22264,38575,38575,40385,40385,40894,51911,51911,41194,44374,44374,44402,50807,50998,52686,53459,40699,40987,41001,49335,52160,52422,52348,31551,31544,31545,31546,31547,31548,37463,34399,33616,33616,39571,39125,23642,22271,38840,37431,22660,22660,24022,33642,28635,28635,38973,49322,49322,46730,23622,23622,52851,31711,20480,22406,38702,31947,33723,33723,32150,51045,51045,51359,51359,53542,38853,35592,22612,20210,52242,52242,28770,30537,30537,52721,53925,51266,43845,43845,53076,40290,41340,49533,40540,40958,44177,44177,33679,36979,35939,37130,37130,13218,13218,23297,23911,22147,23145,23145,51011,51011,39678,39678,28821,28821,31531,30487,51651,51651,30476,24067,24067,20194,52018,52018,30190,32059,32061,49450,51208,51208,30851,30851,34043,51504,41002,51384,51384,18201,20155,52210,52445,52637,52322,52322,52322,53888,40261,40261,44248,44526,51448,49534,40135,43763,43763,40549,40833,40833,39727,30854,30854,39173,36996,32398,32398,30835,30835,34423,34423,30950,30950,39391,33965,39514,33702,31945,34244,19855,19855,22816,20709,20350,20350,23788,20599,52832,50870,53129,53129,46730,53516,51653,51750,52357,52470,50792,50792,48980,50827,50827,39043,39043,45569,41467,51509,51405,51591,53245,51756,50637,50951,41206,48894,40239,40239,44382,38833,41493,41493,40538,32283,39515,39515,39730,38545,39615,39615,37432,38673,38673,39288,39489,35943,33674,34405,35948,34007,32395,32395,33757,35683,33955,31061,33781,30961,31110,31552,31312,31831,31831,30493,30293,30293,30276,30547,24008,24008,24009,23291,24015,23157,24011,19446,19446,23189,22830,21986,22834,22834,22038,22273,18889,19659,20566,20573,20573,19413,30530]
    # all_22 = [37225,31270,22916,22695,53206,52018,54352,52872,52090,51322,50940,42648,20125,22958,29920,23463,22398,19370,22884,28579,26342,35517,38949,38586,35627,33668,28753,31358,35591,39302,35581,22495,52850,44233,44265,51030,51028,43995,41284,43884,42729,53383,49144,33642,40596,38839,41018,44235,37310,48932,34107,50809,49168,39253,39874,46689,42649,40842,51665,41576,42653,49209,50770,40975,51555,51560,35693,31353,31220,37306,30794,23619,38877,30697,31576,40326,40323,40179,37381,38894,32096,33484,30044,34283,23121,19405,22356,23668,24095,25145,28657,19175,23122,23152,20541,20367,23745,23744,23480,22532,23024,23505,25144,20225,22689,20448,20374,30189,19750,54035,54544,40974,41007,31962,33940,34009,30882,39123,22324,23670,41563,22835,22825,22870,23123,23155,32282,38815,52020,39743,41085,37205,43740,53714,48756,50791,39719,52059,52428,51243,51589,50762,47739,38843,51102,45676,49117,48857,51261,48847,41321,40872,50865,43765,41338,38841,44251,41404,45686,44488,35803,40531,41303,40578,42612,40965,43873,40866,38790,40986,40867,40748,40031,40397,39421,37335,40453,37140,22832,39128,37509,34220,39715,38839,37174,37006,39178,31041,34156,39736,37426,35557,35721,31854,37251,32278,38549,38540,39186,38959,32052,37453,30106,40102,38976,37029,39943,39020,31054,31558,37485,31844,35620,33628,33966,30910,30697,31664,30811,31542,31029,33684,31974,31309,31345,31001,30696,30583,28593,30315,23619,24113,28769,30172,28683,28622,23622,23795,23597,23849,30209,25157,30684,23869,23882,23129,30731,23845,25178,24021,22118,22914,25117,22945,23690,25163,23337,29923,25179,19750,28701,30036,28684,28571,24100,23657,23656,23791,26332,22829,30244,28582,24112,24098,23713,23319,28699,23366,24108,23500,22045,22452,22831,22836,20881,22029,23498,22260,22446,20440,22425,19446,20882,20773,22436,20447,19560,19396,20637,20373,19392,20484,22596,20451,19343,18763,18633,20830,22256,54015,51782,53686,51104,51352,40287,51534,48753,51404,48899,49137,40952,45535,49190,51624,40860,42670,44103,52470,45678,40258,43800,41446,39966,39638,40229,40235,37514,39713,37435,38633,37128,40997,39935,37027,37047,34385,38921,37342,39112,38922,35684,30310,30501,30246,30947,31113,31055,22918,30268,28575,24101,25126,23938,23919,22535,22524,20237,20565,28635,29885,20777,18623,31737,39346,39263,30882,52027,52028,41009,44119,44373,49243,50779,51153,37515,31116,22312,24037,29836,22261,52324,40252,39707,31374,45657,49321,53613,54619,40132,41317,34107,49210,35543,51594,53300,24022,35585,39184,20954,49573,48970,39304,23871,45685,43742,20844,20764,34376,23523,22553,23323,28576,22438,39113,30360,34232,50646,51002,39728,22993,22448,26195,20713,22192,28564,20478,52718,53284,54553,39945,40425,39700,40518,48880,39566,40218,51159,50625,38981,43950,51469,52362,41037,41104,44505,49143,51780,34107,52383,50918,40457,39629,38983,38811,43818,51511,49244,43903,40984,41008,35839,49520,49441,32090,31636,35889,38829,33860,31949,33996,28616,31368,31935,30848,32065,33680,35780,37309,37370,30507,31075,34289,37167,33505,34028,40579,34309,38694,38701,39648,31959,33484,35659,34065,40127,32376,39745,23488,23873,29829,26200,22449,24106,19990,22040,20925,22550,22662,23522,30175,30375,25124,28637,29956,20304,20294,22454,20685,24040,23653,26354,30177,24093,19752,22264,38575,40385,40894,51911,41194,44374,44402,50807,50998,52686,53459,40699,40987,41001,49335,52160,52422,52348,31551,31544,31545,31546,31547,31548,37463,34399,33616,39571,39125,23642,23595,22271,38840,37431,22660,24022,33642,28635,38973,49322,39961,46730,23622,52851,31711,20480,22406,38702,51354,39424,31947,33723,32150,51045,51359,53542,38853,35592,22612,20210,52242,28770,30537,41248,52721,53925,51266,43845,53076,40290,41340,49533,40540,40958,44177,33679,36979,35939,37130,13218,23297,23911,22147,23145,51011,39678,28821,31531,30487,51651,30476,24067,20194,52018,30190,32059,32061,49450,51208,30851,34043,41468,51504,41002,51384,18201,20155,52210,52445,52637,52322,53888,40261,44248,44526,51448,49534,40135,43763,40549,40833,39727,30854,39173,36996,32398,30835,34423,30950,39391,33965,39514,33702,31945,34244,19855,22816,20709,20350,23788,20599,52832,50870,53129,46730,53516,51653,51131,51750,52357,52470,50792,48980,50827,39043,45569,48942,41467,51509,51405,51591,53245,51756,50637,50951,41206,48894,39216,40239,44382,38833,41493,40538,32283,39515,39730,40125,38545,39615,37432,38673,39288,39489,35943,33674,31566,34405,35948,34007,32395,33757,35683,33955,31061,33781,30961,31110,31552,31312,31831,30493,30293,30276,30547,22088,24008,24009,23291,24015,23157,24011,19446,23189,22830,19750,21986,22834,19555,22038,22273,18889,19659,20566,20573,19413,39193,30530,20474,30573,49546]
    # lewis_1102 = [22829,22831,22660,22662,22830,22695,22816,22495,22524,22596,22535,30177,30189,30268,30293,30310,30315,30360,30476,30487,30501,30530,30696,30583,30276,30684,30493,30209,30507,30244,30375,30547,30246,28657,28701,28753,28821,29829,29920,30172,30175,29923,29836,28684,29885,28699,22832,19659,23463,23505,23522,20155,19370,19396,19413,23323,23498,20194,23488,19446,19405,19752,20125,22449,22452,22454,23157,23189,23795,23882,23919,23938,24015,24021,23668,23690,24022,30794,30811,23713,23791,23291,23319,24067,30854,30848,22045,22264,22356,22398,22406,22446,24008,30731,23744,23745,23788,24037,24040,30910,22448,23845,23656,23657,23297,22261,22436,23849,24009,22273,23911,22438,24011,22192,22312,22324,24093,20350,20367,13218,18633,18763,18201,18623,20374,20882,22029,33781,33860,34007,40952,34043,41009,41018,34309,20713,20830,20881,33684,33940,41007,35581,34220,34232,20954,41194,34244,34283,33668,34009,41037,41284,34385,34399,21986,34289,34405,33674,34423,22038,40958,40965,40975,33955,34028,35543,28616,28622,33679,35557,33680,33757,33965,40974,40986,41206,40987,40997,33966,41001,33996,18889,20448,20451,20478,20484,20541,20480,20565,20440,32283,23873,32395,33616,33628,31962,31974,32096,32278,32376,23869,32398,41303,31959,32090,32052,39874,52362,52445,52637,52686,52718,52721,52832,52357,52850,52383,52422,39745,52851,39935,52428,38877,38894,51624,51665,51780,51782,51911,38540,38545,38586,38673,38701,38790,38811,38829,38833,38840,38843,38853,52242,52028,38694,51651,52027,52059,52090,52160,52322,52210,38633,38839,38549,38841,38921,38959,38575,38922,51589,51750,52348,51756,51594,51653,52324,39253,36996,39186,50791,50792,50865,50870,50951,50998,51002,51045,51102,51159,39288,37027,37029,37128,39302,37130,37140,39304,37205,51011,51153,51243,51266,39263,39346,51104,51208,37167,37174,39391,39489,51384,50625,50807,51352,50918,51322,50646,50809,50940,50779,40833,40842,40866,40867,40872,40860,40894,39966,40031,40132,40218,40235,40239,39945,40102,40135,40229,53284,53459,53542,53686,53888,54015,54035,54553,54619,40252,40261,40287,53300,53516,41321,41446,53925,54352,43742,52872,43763,43903,43884,43873,44233,54544,31636,44265,53129,53383,31711,44374,53714,45535,45685,48880,53613,38983,37431,48980,49143,48894,53206,49209,53245,39184,28575,28576,25126,25145,25163,25179,26195,26200,26342,28579,26354,28582,24113,25144,25178,26332,28593,25117,28571,23622,23642,23653,23024,22993,23121,46689,37306,37310,43765,43818,43845,37370,37426,44505,37251,43740,37335,44526,45686,37309,45657,37342,37381,45676,43995,44177,44235,44382,44488,30950,42729,48847,48932,48970,44103,31029,42649,43950,41317,41404,41493,30961,48857,44119,44248,42653,48753,30947,44251,42612,48756,44373,48899,41338,42670,41340,23597,22914,22918,22884,20210,20237,35693,35721,38981,35780,35803,35889,51504,35683,51405,51509,51511,51534,20294,35684,39020,49190,49244,49322,20304,35627,51448,51555,51560,35839,51469,51404,38976,39112,39113,39125,39128,39173,39123,35939,35943,35948,36979,31220,31309,24100,24106,31353,31358,40290,40385,40425,31552,31558,31664,24108,40453,40457,31374,31075,31110,31116,24101,31737,31368,24098,40397,40578,35591,35592,31854,24095,40258,40518,31531,40531,40538,40549,40596,35620,40748,31054,35585,31844,31542,31547,31551,31055,31544,31545,31548,40699,49117,49137,49168,49321,49450,49520,49534,49335,49533,49144,49441,20685,20709,39515,39566,39571,39615,39629,20573,20599,39678,39736,39707,39713,39715,39719,39727,39728,39730,39638,39648,37435,37485,37509,37515,37432,37453,37463,37514]
    all_1104 = [
        37225, 31270, 22916, 52018, 42648, 22958, 35517, 38949, 51030, 51028,
        33642, 34107, 41576, 50770, 23619, 30697, 31576, 40326, 40323, 40179,
        33484, 30044, 19175, 23122, 23152, 23480, 22532, 20225, 22689, 19750,
        30882, 23670, 41563, 22835, 22825, 22870, 23123, 23155, 32282, 38815,
        52020, 39743, 41085, 50762, 47739, 51261, 39421, 37006, 39178, 31041,
        34156, 30106, 39943, 30697, 31345, 31001, 23619, 28769, 28683, 25157,
        23129, 22118, 22945, 23337, 19750, 30036, 24112, 23366, 23500, 22836,
        22260, 22425, 20773, 20447, 19560, 20637, 20373, 19392, 19343, 22256,
        52470, 45678, 43800, 37047, 31113, 28635, 20777, 30882, 49243, 34107,
        49210, 49573, 23871, 20844, 20764, 34376, 23523, 22553, 28564, 39700,
        41104, 34107, 40984, 41008, 31949, 31935, 32065, 33505, 40579, 35659,
        34065, 40127, 19990, 22040, 20925, 22550, 25124, 28637, 29956, 44402,
        31546, 23595, 22271, 33642, 28635, 38973, 39961, 46730, 38702, 51354,
        39424, 31947, 33723, 32150, 51359, 22612, 28770, 30537, 41248, 53076,
        40540, 22147, 23145, 30190, 32059, 32061, 30851, 41468, 41002, 30835,
        39514, 33702, 31945, 19855, 46730, 51131, 50827, 39043, 45569, 48942,
        41467, 51591, 50637, 39216, 40125, 31566, 31061, 31312, 31831, 22088,
        19750, 22834, 19555, 20566, 39193, 20474, 30573, 49546
    ]

    offer_ids = all_1104
    # offer_ids = [37225]

    if sf.instance == 'lne' or skip_already_created_tours:
        current_tours = set([
            item.EOSId__c for item in sf.select(
                """SELECT EOSId__c FROM Tour__c WHERE EOSId__c <> NULL 
        AND IsHistoricalTour__c = False""")
        ])
        tours_to_not_import = [
            item for item in offer_ids if str(item) in current_tours
        ]
        if len(tours_to_not_import) > 0:
            print(
                f'Skipping the following tours because they are already in Production: {tours_to_not_import}'
            )
            offer_ids = [
                item for item in offer_ids if str(item) not in current_tours
            ]

    assert len(offer_ids) > 0

    eos_data = uk.query_tours(sql, offer_ids, is_onsale=False)
    if len(eos_data.Tour__c) == 0: raise Exception('No Offers to migrate')
    eos_data_with_remapped_eos_ids, remapped_eos_ids = uk.replace_duplicate_eos_ids(
        eos_data)
    eos_data_with_split_headliners, artist_ids_missing_in_rome_by_tour = uk.split_headliner_and_coheadliner(
        sf, eos_data_with_remapped_eos_ids)
    eos_data_with_missing_ids_removed, eos_ids_missing_in_rome, removed_eos_ids_by_tour = uk.remove_eos_ids_missing_in_rome(
        sf, eos_data_with_split_headliners)

    all_missing_eos_ids_by_tour = combine_missing_ids_dicts(
        removed_eos_ids_by_tour, artist_ids_missing_in_rome_by_tour)
    eos_ids_missing_in_rome.update(
        itertools.chain.from_iterable(
            artist_ids_missing_in_rome_by_tour.values()))
    assert len(
        eos_ids_missing_in_rome
    ) == 0 or sf.instance != 'lne', f'Some EOS Ids are missing: {eos_ids_missing_in_rome}\nThe following tours have missing data: {[int(s) for s in all_missing_eos_ids_by_tour]}'

    eos_data_dfs = ObjDict({
        obj: pd.DataFrame(data)
        for obj, data in eos_data_with_missing_ids_removed.items()
    })
    eos_data_with_file_data = uk.merge_eos_data_with_file_data(eos_data_dfs,
                                                               is_onsale=False)

    eos_data_computed = uk.add_computed_fields(sf, eos_data_with_file_data)

    validations(eos_data_computed, eos_ids_missing_in_rome,
                sf.credentials['sandbox'] == 'False')

    threading.new(pdh.to_excel, eos_data_computed.data2,
                  'Migrate EOS Historical Tours.xlsx')
    sf.bypass_prod_operation_approval()
    rome_results = uk.upsert_eos_data_to_rome(
        sf,
        eos_data_computed.data2,
        is_onsale=False,
        delete_tours_first=delete_tours_first)
    tour_results = itertools.chain.from_iterable(
        [job.results for job in rome_results if job.object_name == 'Tour__c'])
    event_results = itertools.chain.from_iterable(
        [job.results for job in rome_results if job.object_name == 'Event__c'])
    # Do NOT Update RomeIds for Offers in EOS, for historical Tours: uk.update_romeids_in_eos
    # Do NOT set Tour Personnel for Historical Tours

    if eos_ids_missing_in_rome:
        missing_eos_id_info = uk.query_by_eos_ids(
            sql, eos_ids_missing_in_rome,
            ['Name', 'FirstName', 'LastName', 'Email', 'EmailAddress'])
        pdh.to_excel(missing_eos_id_info, 'Missing EOS Data.xlsx')
        print(f'Missing EOS Ids in Rome: {eos_ids_missing_in_rome}')

    # tourlegs = sf.select("""
    # SELECT Id
    # FROM TourLeg__c
    # WHERE Tour__r.AppScope__c = 'UK'
    # AND CreatedBy.Name = 'DataMigration User'
    # AND Id NOT IN (SELECT TourLeg__c FROM Event__c)
    # """)
    # sf.delete(tourlegs)

    return
def main(sessions, do_fix=False):
    sessions = {
        username: sf
        for username, sf in sessions.items() if username in instances_to_run
    }
    return_list = []
    return_string = ""

    source_control_directory = r'/Users/daniel.hicks_1/Documents/Tower/liveNationSFDC UK/src/customMetadata/'

    url = 'https://lneallaccess-my.sharepoint.com/:x:/g/personal/mike_wishner_lyv_livenation_com/EfcTIOXoayhCvDFq3t-kGEwB4aQc20-iALSiKqu-uLAxqw?e=0JJnGu&download=1'
    r = requests.get(url, allow_redirects=True)

    file_name = "./../Rome Touring Object Model.xlsx"

    write_file = open(file_name, 'wb')
    write_file.write(bytearray(r.content))
    write_file.close()

    xlsx_file = xlrd.open_workbook(file_name)
    sheets = xlsx_file.sheet_names()
    picklist_ref_sheet = xlsx_file.sheet_by_name(
        "Picklist Ref") if "Picklist Ref" in sheets else None

    file_records = []
    headers = []
    if picklist_ref_sheet is not None:
        headers = picklist_ref_sheet.row_values(0)
        for row_num in range(1, picklist_ref_sheet.nrows):
            new_row = dict()
            src_row = picklist_ref_sheet.row_values(row_num)
            for col_num in range(0, len(headers)):
                new_row[headers[col_num]] = src_row[col_num]
            file_records.append(ObjDict(new_row))

    for item in file_records:
        item.Default__c = "true" if item.Default__c == 1 else "false"
        item.AlwaysShown__c = "true" if item.AlwaysShown__c == 1 else "false"
        item.AllowMultiple__c = "true" if item.AllowMultiple__c == 1 else "false"
        item.GLCode__c = str(item.GLCode__c)[0:5]

    # Run for each Salesforce instance
    for username, sf in sessions.items():
        instance = re.search("(.+\\.rome)(?:\\.(.+))?", username,
                             re.IGNORECASE).group(2)
        instance = "lne" if instance == None else instance

        picklist_option_desc = sf.get_object_description("PicklistOption__mdt")
        rome_fields = [item["name"] for item in picklist_option_desc["fields"]]

        rome_records = sf.select_records("""
        SELECT {}
        FROM PicklistOption__mdt
        """.format(",".join(rome_fields)),
                                         mode='simple')

        for item in rome_records:
            item.GLCode__c = str(item.GLCode__c)[0:5]

        new_records = [
            item for item in file_records
            if item["Ignore?"] not in ['Yes', 'Delete'] and item.DeveloperName
            not in [item2.DeveloperName
                    for item2 in rome_records] and item.DeveloperName != ""
        ]
        deleted_records = [
            item for item in rome_records if item.DeveloperName not in [
                item2.DeveloperName for item2 in file_records
                if item2["Ignore?"] != 'Delete'
            ]
        ]
        updated_records = []
        changes = []

        for rome_record in rome_records:
            matching_file_records = [
                item for item in file_records
                if item.DeveloperName == rome_record.DeveloperName
            ]
            if len(matching_file_records) == 0:
                continue
            file_record = matching_file_records[0]

            numChanges = 0
            for field in rome_record:
                if field not in file_record or file_record["Ignore?"] == "Yes":
                    continue
                v1 = str(rome_record[field]).strip()
                v2 = str(file_record[field]).strip()

                if field in file_record and v1 != v2:
                    numChanges += 1
                    changes.append({
                        "DeveloperName": rome_record.DeveloperName,
                        "field": field,
                        "old": v1,
                        "new": v2
                    })
                    if numChanges == 1:
                        updated_records.append(file_record)

        return_list.extend(deleted_records)

    if len(return_list) > 0:
        return_string = "Custom Metadata needs to be deleted in some sandboxes:\n"
        return_string += "\n".join([
            "https://lne{}.lightning.force.com/lightning/setup/CustomMetadata/page?address=%2F{}"
            .format("" if instance == "lne" else "--" + instance, item.Id)
            for item in deleted_records
        ])
    return return_string
def get_cached_historical_data():
    xlsx = pd.ExcelFile(f'{folder}Output/HistoricalFileData.xlsx')
    return ObjDict({
        'Tour__c': pd.read_excel(xlsx, 'Tour__c'),
        'Event__c': pd.read_excel(xlsx, 'Event__c'),
    })
Пример #28
0
def dfconcerts_get_file_data(top=None):
    xlsx = pd.ExcelFile('/Users/daniel.hicks_1/Documents/Rome/Rome Downloads/UK Migration/DF/DFC Actuals 2019 w DH notes on GLAccount.xlsx')
    df = pd.read_excel(xlsx, 'Data', header=2)
    if top:
        df = df.iloc[:top]
    df = df[~df['Tour__c.EOSId__c'].isna()]
    df['Event__c.EOSId__c'] = pdh.int_to_str(df['Event__c.EOSId__c'])
    df['Tour__c.EOSId__c'] = pdh.int_to_str(df['Tour__c.EOSId__c'])
    glaccounts = pd.read_excel(xlsx, 'GLAccounts').set_index('Column').to_dict('index')

    # tours = df[[f for f in df.columns.values if str(f).startswith('Tour__c.')]]
    # events = df[[f for f in df.columns.values if str(f).startswith('Event__c.')]]

    leb_dfs = []
    le_dfs = []
    event_map = df.set_index('Event__c.EOSId__c').to_dict('index')
    for col, val in glaccounts.items():
        meta = ObjDict(val)
        if meta.IsExpenseLEB:
            leb_df = (
                df[['Event__c.EOSId__c', col]]
                .copy()
                .rename(columns={
                'Event__c.EOSId__c': 'Event__r.EOSId__c',
                })
            )
            leb_df = leb_df[(~leb_df[col].isna()) & (leb_df[col] != 0)]
            leb_df['LedgerEntry__r.SourceSystemId__c'] = leb_df["Event__r.EOSId__c"].apply(lambda x: f'DF-Historical-{x}-{meta.GLCode}')
            leb_df['SourceSystemId__c'] = leb_df["Event__r.EOSId__c"].apply(lambda x: f'DF-Historical-{x}-{meta.GLCode}-{meta.Type__c}')
            leb_df['GLCodePicklist__c'] = meta.GLCode
            leb_df['TouringCategory__c'] = meta.TouringCategory__c
            leb_df['Type__c'] = meta.Type__c
            leb_df['Label__c'] = meta.Type__c
            leb_df['OfferRateType__c'] = 'Flat'
            leb_df['InHouseRateType__c'] = 'Flat'
            leb_df['SettlementRateType__c'] = 'Flat'
            leb_df['OfferRate__c'] = leb_df[col]
            leb_df['InHouseRate__c'] = leb_df[col]
            leb_df['Settlement__c'] = leb_df[col]
            leb_df['SettlementOnly__c'] = False
            leb_df['CurrencyIsoCode'] = 'GBP'
            leb_df.drop(columns=col, inplace=True)
            leb_dfs.append(leb_df)
            if pd.to_numeric(df[col].dropna(), errors='coerce').notnull().all() == False:
                raise Exception(f'Column {col} contains non-numeric data')

        if True:
            le_df = df[['Event__c.EOSId__c', col]].copy().rename(columns={
                'Event__c.EOSId__c': 'Event__r.EOSId__c',
                col: 'CurrentFlash__c'
            })
            le_df['SourceSystemId__c'] = le_df["Event__r.EOSId__c"].apply(lambda x: f'DF-Historical-{x}-{meta.GLCode}')
            le_df['GLAccount__r.GLCode__c'] = meta.GLCode
            le_df['CurrencyIsoCode'] = 'GBP'

            le_dfs.append(le_df)
            if pd.to_numeric(df[col].dropna(), errors='coerce').notnull().all() == False:
                raise Exception(f'Column {col} contains non-numeric data')

    leb_df = pd.concat(leb_dfs)
    le_df = pd.concat(le_dfs)
    le_df = (
        le_df.groupby(by=['Event__r.EOSId__c', 'GLAccount__r.GLCode__c', 'SourceSystemId__c'], sort=False)
        .sum()
        .reset_index()
    )

    return ObjDict({
        'All': df,
        'LedgerEntryBreakout__c': leb_df,
        'LedgerEntry__c': le_df,
        'EventMap': event_map,
    })
def main():
    # CURRENT KNOWN ISSUES
    # If an event is not marked as a roll date in EOS, but it has different onsales than other events, we are going to mark it a roll date
    # We have no handling for the "ITB" OfferType. What to do?
    # What to do with Offers which do not have a tour currency of GBP? These are broken. Out of scope?
    # Changed fields: ArtistGuaranteeCurrencyId, LastUpdatedBy, LastUpdatedDateTime, OfferTypeId, OfficeId, OwnerId, PromoterProfitCurrencyId, UpdateId
    # TODO: Map the Artist Guarantee amounts (and artist currency?)
    # POTENTIAL ISSUE: Offer Artist Payments @ 1.00 Exch Rate, for artist currency other than GBP?
    # TODO: Identify problematic tours which are on the list to migrate:
    #   Tours with EOS Ids missing in prod
    #   Tours with tour currency that is not GBP
    #   Tours with all past dates? Finance - need to identify if we will be treating past dates in an onsale tour as historical

    # username = "******"
    # username = '******'

    # sql = SQL_Server_API(eos_prod_creds)
    # sf = Salesforce_API('*****@*****.**')
    sql = SQL_Server_API(eos_stage_creds)
    sf = Salesforce_API('*****@*****.**')

    keep_connection_alive(sf)
    # recs = sf.select("SELECT Id, EOSLastModifiedDate__c, EOSIntegrationSyncDate__c FROM Tour__c WHERE EOSId__c <> NULL", return_type='dataframe')
    # recs['EOSIntegrationSyncDate__c'] = recs['EOSLastModifiedDate__c']
    # sf.update(recs)

    # {'ArtistAgent-974', '2469', 'Artist-22297', 'Artist-22296', 'ArtistAgent-748', 'nan', 'ArtistAgency-2774', 'Artist-19279', '1299'}

    delete_tours_first = True
    skip_already_created_tours = False
    # offer_ids = [54766]
    # offer_ids = [44189]
    # offer_ids = [52474]
    # offer_ids = [54441, 57933, 57965, 58400, 58737, 58050, 57930, 57263, 53728, 55808]

    # all_offer_ids_with_costings = [53981,57933,56534,56602,57906,58892,53556,57278,52720,58451,58490,55051,59093,58432,58803,55163,57965,58400,58737,54711,54297,52356,54441,57424,53846,57707,57276,55166,54006,58070,58406,57406,56517,58029,58073,58114,57273,57256,57337,53176,58802,54555,54980,57350,59121,53943,57996,56116,59140,58492,53909,52453,56561,56469,52474,58741,57923,58404,56813,53911,52477,58824,56527,58115,58050,52693,56321,55513,54326,58875,58121,57013,43748,57404,56537,58032,53890,57242,57843,54573,57788,58839,57894,57165,57992,53449,57006,57957,54134,53617,55138,44189,58560,57930,57469,57263,55224,56487,58842,51264,54069,55469,56957,58820,53816,53870,39597,56518,58821,58905,58739,58430,57279,52382,54888,57713,57030,58757,57403,55486,57847,58039,58505,56449,57773,56117,53683,58762,57086,58813,57943,57000,54932,56525,56998,53612,55222,54639,53722,53847,57037,57878,58691,53568,52697,57417,53944,54554,58770,57338,51663,54039,53728,52605,54206,58405,54195,53739,57972,55808,57291,54020,58606,58953,45648,57206,55741,57255,53602,53986,52427,54847,57709,56381,53623,57223,58086,58493,53812,55823,57011,55139,52419,53500,54443,52021,58804,53163,59151,56567,57269,51776,56173,55745,57024,53825,56440,54255,57167,56169,55405,52109,55466,58504,54208,56412,57959,58777,58423,58055,57186,54256,53182,54181,54633,53824,57418,57792,58794,58812,54634]
    # offer_ids = all_offer_ids_with_costings

    # safe_offer_ids_with_costings = [53981,57933,56534,56602,57906,58892,53556,57278,52720,58451,58490,55051,59093,58803,57965,58400,58737,54711,52356,54441,57424,53846,57276,58070,58406,57406,56517,58029,58073,58114,57273,57337,53176,58802,52453,56561,56469,52474,58741,57923,58404,56813,53911,52477,58824,58050,52693,56321,58121,53617,58560,57930,57469,57263,55224,56487,58842,54069,55469,56957,58820,53816,53870,56518,58905,58430,57279,52382,54888,57713,57030,58757,57403,55486,57847,58039,56449,56117,53683,58762,57086,57943,57000,54932,56525,53612,55222,54639,53722,53847,57037,57878,53568,52697,57417,53944,54554,58770,57338,51663,53728,52605,54206,57972,55808,57291,54020,58953,57206,55741,53986,54847,53825,56440,54255,56169,52109,55466,58504,54208,56412,57959,58777,58423,58055,57186,54256,54181,54633,53824,57418,58794,58812,54634]
    # offer_ids = safe_offer_ids_with_costings
    # offer_ids_with_no_costings = [52015,55146,55487,54925,54837,54766,54561,55451,54838,55458,54382,55134,55033,54743,55624,53591,55538,52725,54501,41498,53204,54815,57025,55588,57253,53328,53864,54926,57145,56889,56399,54243,55131,54662,56909,55164,55796,58088,58041,55161,58508,57354,57292,57863,56871,58455,58487,58417,58522,58523,59133,58998,57970,57852,56885,56869,58445,58447,59085,58868,55589,58774,59114,57886,53410,57887,58890,58823,58756,57939,56886,56874,57940,58510,58518,58519,59079,58450,57873,54249,53680,54313,57941,57911,58898,58771,58107,58858,54965,58833,58834,58017,52629,55168,58056,58798,56867,56959,57014,57034,53631,55313,54530,54307,57384,58427,54581,54593,54216,58092,57197,58089,59003,58091,55816,53530,56454,58809,58151,57527,57919,57907,57849,58801,58743,55599,53904,57739,58825,57869,54874,57741,58165,59127,57711,57039,52852,54975,56588,56436,54226,58429,58955,58395,58081,58064,58805,57925,53923,54386,57736,58537,54897,54200,58876,56106,54833,55440,53858,58191,58639,58909,57720,58118,58399,57968,58065,58456,58796,54203,55590,58453,58420,58443,56523,56425,55148,55123,54209,55477,57366,58002,58066,58896,56560,54350,57860,58507,56450,54199,56470,58817,59112,58431,54883,54611,56815,58144,58521,56509,55557,58440,58036,56862,56411,55402,59002,59158,59104,59142,56533,56566,57920,58410,55586,54198,59030,54207,57910,57271,58067,58108,57867,56400,58845,58763,58828,54080,57875,57876,53201,56520,57227,57184,54800,54221,57423,57201,57408,54522,55912,57230,57264,57457,57786,58690,58085,53815,54337,53203,58018,57331,59144,59083,57493,58793,56589,58035,54817,54161,54401,54622,58844,58530,57035,59152,56505,53202,55159,55584,57433,54769,54879,59162,58598,57884,57311,56519,56260,57730,56834,58123,58038,57169,57723,57877,58996,55119,53632,54759,57218,59080,59089,57989,55943,55806,56579,54540,58075,59134,58448,57297,57732,56906,58811,56424,57444,57127,58444,58795,53494,54556,57915,55735,58991,57848,54409,57528,59090,59081,58994,57936,53457,59136,59109,58877,58879,54830,53492,59153,57842,58037,58641,58826,58854,58259,59115,57855,58841,45568,59150,58740,54954,59157,55467,58850,57846,56456,55544,55457,57974,58764,52804,54803,58077,58019,57232,57219,52676,58843,58829,58558,53134,53538,53855,57021,56577,52962,57850,55760,58766,53687,55819,58792,57355,53881,52957,56865,53158,58765,58761,57987,54310,56866,55231,59145,57262,55117,54786,55797,55798,55800,57053,56868,55799,52902,56863,57328,57329,53826,51729,57185,55661,57892,53998,54818,58001,58779,57284,53892,58871,58884,58889,58886,58872,58900]
    # offer_ids_no_costings_18 = [54895,54638,55461,54840,55160,52733,54375,54240,54964,56595,53616,54779,58479,58480,58495,58496,58473,58895,54499,58497,58435,59177,59233,58498,58499,58753,58117,59163,58754,56383,57675,58109,55515,57529,57893,57369,58442,58768,52584,56582,54642,54315,58744,57198,59201,59286,54887,59278,59269,59231,58989,59254,59274,55313,54664,54307,57018,57859,58776,57293,58993,53358,57772,57414,56310,56219,59222,58475,54826,57714,53021,55587,56864,57916,57162,54377,54359,59197,56256,57845,57865,57415,57243,56455,55944,59168,54517,57719,56526,56596,54604,59271,59261,59221,57928,56903,56557,57674,58008,58897,58723,55046,56538,55764,56604,54493,58952,58903,56468,57270,58667,58424,58456,57922,57826,56591,58005,54589,56430,55583,57089,57771,57856,57888,58083,56446,54636,57728,56524,59284,54230,57166,57984,55053,54967,58848,58506,58282,56114,56441,56431,58816,59132,58507,56416,54785,54620,53867,54282,55807,57800,57912,57742,57839,56442,54641,54277,56594,58469,57981,59225,59226,59253,57333,56218,54426,53482,54909,56531,56968,57150,57726,56162,54541,57426,57917,57287,58501,57737,56504,56405,59165,52462,58663,56216,58470,58840,57298,56513,57471,59227,59161,59148,58525,58126,59199,59203,54800,54212,57056,57216,57201,58031,58869,59189,56414,57782,57182,59264,59171,55598,58742,58491,57207,57882,57990,54388,54911,56444,56472,59234,56574,54254,57718,59220,59232,56828,58822,53692,56593,56420,56206,59244,59257,59267,59194,59159,58095,58142,58425,54373,56543,59172,57299,55776,56522,57971,58967,58902,59178,57991,57724,56217,54876,55047,54612,58116,58411,58167,58520,59170,59241,59169,59218,57456,57260,54371,56254,56255,59242,59146,57978,57356,58815,57787,55349,55911,59182,57163,57073,58051,58472,58030,57412,57191,59116,58082,58090,57840,54423,57738,59166,57999,56541,56178,59117,59249,58494,53496,52982,53312,59175,57187,57889,56423,54719,58068,55961,58441,57442,56506,58152,57500,59237,56996,57202,59111,59243,57960,54765,58867,53177,57007,58020,59282,53519,59108,54717,55082,58760,58797,54204,57962,59262,57909,54801,54867,55453,57862,59107,59235,54592,54777,59193,59245,55198,58028,59135,56486,54609,54809,59216,58166,59185,58052,58800,59105,49139,56395,57927,59200,58788,58345,57857,53735,59276,55008,54570,54799,56170,56528,57967,58883,56438,58906,59275,53466,55763,56516,58736,57979,58143,59122,59154,57286,55552,54534,58990,58827,58449,58113,58488,57793,59123,49521,58418,58511,58799,56153,56422,59103,57866,59095,53715,55347,57272,57161,57986,56421,56094,58080,53733,59266,56572,59155,52083,59259,58759,54335,52986,57851,53286,54211,57790,53433,53737,59260,59240,53167,53743,55414,56510,52459,56558,59209,52896,56445,58076,58806,56121,52884,53823,58901,54961,59191,59187,57020,54848,54413,59213,59212,59224,59211,59214]
    # offer_ids = offer_ids_no_costings_18
    # for_prod = [58504,58794,56469,57959,58842,57713,54441,54206,57186,55741,58050,53728,56449,53617,54069,55466,56169,56517,57418,54639,58802,53816,53870,58905,57206,58820,58406,58404,57000,53568,57037,53176,57273,53846,56602,57291,57847,57923,55486,58073,58812,52453,59093,53722,57906,58741,53847,54847,57030,56534,58953,58560,55469,53986,52605,56957,53911,57424,58029,57263,52356,57933,52474,54208,53825,53612,57086,58423,52382,58451,55222,58892,54554]
    # offer_ids = for_prod[0:5]
    # offer_ids = for_prod
    # dawn_list = [52382,53176,53556,53612,53825,53846,56412,57206,57273,57276,57278,58406,58423,58777]
    # dawn_list_17 = [55138,54039,58821,57269,58430,57037,57279,57792,58839,58820,58406,57206,45648,58050,58953,53825,53612,52382,58423,52021,57086,56567,53846,56412,57403,53500,59140,57273,53176,57242,57276,58777,57923,56813,53556,57278,52477,52697,53182,55486,57957,57878,58073,57256,57337,57338,53683,56487]
    # dawn_list_18 = [53568,53722,53722,53824,53824,52419,54633,54181,53449,55139,54573,57000,54932,57424,54441,56537,56998,56525,56537,56998,54441,57424,57424,56169,53890,54639,57709,54555,56169,52109,56169,57843,53943,52109,54208,54980,57709,54888,54888,55222,57959,55222,56381,57713,57713,57223,57350,57350,57223,57165,57350,57011,57992,59121,58070,59121,57186,58606,57186,56116,58493,54256,57011,57418,54443,56517,56517,57417,58794,58794,53944,53944,56518,58812,54554,54554,58770,57847,57847,58812,58039,58842,58842,54006,54195,58804,58804,53163,53163,59151,56117,54634,58762,58762,58405,53909,56440,56440,51663,53847,57404,57404,54255,56173,57788,58504,56173,54255,57788,58504,57707,57707,57469,57030,55166,53617,57030,58055,57030,57894,57406,57006,57006,58691,58691,58029,58114,54206,54206,53870,54020,58802,58802,53816,54134,52427]
    # dawn_list_19_uat = [58032,58560,57996,54069,58824,58492,58505,57263,55405,44189,53623]
    # dawn_list_19 = [52720,58905,57291,57291,58739,57965,53739,58400,57930,58737,56561,56561,55469,56957,56957,56602,56534,56602,57906,57906,52109,52109,56469,52474,54354,58757,58741,58741,58757,55224,58404,58892,58892,53911,57894,53911,56527,54297,54711,58451,58490,59093,39597,57013,52693,56321,55051,57013,58432,58432,53986,53986,58115,58813,58813,58813,58803,54326,58875,58121,58121,55163]
    # dawn_list_20 = [58032,58560,57996,54069,58824,58492,58505,57263,55405,44189,53623]
    # lewis_list_01 = [59196,59198,59094,59291,59251,59238,59268,59246,59270,58449]
    # dawn_list_02 = [55454,55014,59184,59286,52629,59379,59205,58093,59368,59328,59358,58032,57167,59326,59332,59256,59333,59287,59384,59281,59366,59315,57127,59292,53457,59295,59329,58492,59228,59371,53158,55117,59272,52034,54510,54508]
    # dawn_red_list_02 = [54847,51776,58086,59268,59246,53602,55513]
    # all_remaining_02 = [54625,52936,52019,53423,54341,54605,54845,54760,55157,55137,55357,54631,55353,54590,55616,55124,54889,54107,54381,53874,58087,52592,52922,59286,59294,56539,59278,59231,59274,56814,53875,52874,57368,59205,59379,57585,55352,59222,56568,57028,58810,57254,53757,56453,52825,57002,57295,59358,59328,58428,57958,54846,58421,51541,57870,56221,55147,57400,53446,54000,59321,53619,58610,57727,57108,56564,57982,57938,56447,59293,53866,59219,57275,56583,55165,57881,56172,59223,57179,57708,59323,58419,54376,59196,54623,59326,59336,52819,57734,53357,59332,57282,55464,59318,59291,59322,59251,59385,55455,59283,59256,58885,59333,59287,57334,58561,58855,57783,56545,57041,59303,59384,57985,55350,59281,59364,55783,53227,58069,55528,59366,58584,53900,55189,59380,57175,57858,54576,58458,58426,57983,59315,59252,58159,54802,54385,58457,56140,57087,59327,57178,55759,52478,59279,59280,57924,53729,55404,57729,59306,57854,59390,57785,56275,59305,59143,57386,59292,59388,58838,59324,54797,59361,59268,57966,59190,59250,58836,56448,57419,57716,56508,59317,59393,59295,59330,58481,59314,59300,53910,57770,59110,54822,59329,58927,59391,55140,58071,53605,59082,59285,59215,59087,59141,57721,57733,58778,55474,54721,59316,59228,59360,59290,59248,59246,57861,54197,58856,58156,52376,53332,53614,57413,59311,59230,58482,54038,59091,58559,53157,59371,57769,55480,54446,54003,54910,59386,59265,52695,57045,59301,59374,58808,59369,58010,52042,58141,57791,58446,59398,59270,59359,58775,55532,58531,56515,45559,58738,59195,59247,59363,52985,58503,53882,54149,58852,58468,57170,59375,52726,59272,54868,56443,53744,54311,54899,54900,54898,54903,54904,55143,59149,59387,57280,59373,54338,57407,59210]
    # club_shows_03 = [55160,59286,59294,59231,59278,59274,55352,59222,59358,59328,58897,59321,59293,59326,59336,59291,59318,59283,59256,59333,59094,59303,59384,59281,59366,57787,59315,59327,55513,59279,59305,54717,59324,59317,57857,59316,59360,59311,59374,59398,59359,59270,59363,58806,59387,59373,52356,58032,54847,59322,59287,59292,59295,59332,59385,59306]
    # lewis_list_03 = [51541,55466,55823,53812,59190,59250,59215,59141,59290,59369,55143,52453]
    # all_remaining_03 = [59231,59278,59274,59379,59205,59222,59219,59223,59323,59196,59251,59364,59380,59252,59280,59390,59388,59361,59268,59190,59250,59393,59330,59300,59329,59391,59285,59228,59290,59248,59246,59230,59371,59265,59369,59195,59247,59375,59272,59210]
    # lewis_list_07 = [51541,55466,55823,53812,59190,59215,59290,55143,52453]
    # dawn_list_09 = [55454,55014,59198,52629,58093,53812,51541,57400,55466,55823,52034,51776,43748,53728,55745,57127,55741,53457,58492,59141,53602,57773,54510,53158,56449,55117,51264,52605,55143,54508,59272,59371,59228,59379,59205,59190,59329,59290]
    df_list_09 = [
        55353, 55160, 54107, 54381, 53874, 52922, 59231, 59278, 59274, 53875,
        52874, 57368, 57585, 59222, 56568, 57028, 58810, 57254, 53757, 52825,
        57002, 57295, 58897, 57958, 54846, 58421, 56221, 53446, 54000, 53619,
        57727, 57108, 56564, 57938, 56447, 53866, 59219, 57275, 55165, 57881,
        56172, 59223, 57179, 57708, 59323, 58419, 54376, 54623, 57734, 53357,
        57282, 55464, 55455, 58885, 57334, 58561, 58855, 57783, 56545, 57041,
        55350, 59364, 55783, 53227, 58069, 55528, 58584, 57787, 53900, 59380,
        57218, 57175, 57858, 54576, 58458, 58426, 57983, 59252, 58159, 54802,
        58457, 56140, 53729, 57087, 57178, 55759, 52478, 59280, 57924, 55404,
        53729, 57729, 59188, 57854, 59390, 57785, 56275, 59388, 54717, 58838,
        54797, 59361, 57966, 58836, 56448, 57419, 59393, 56508, 59330, 58481,
        57857, 59300, 53910, 57770, 59110, 54822, 58927, 59391, 55140, 58071,
        53605, 59082, 59285, 59087, 57721, 57733, 58778, 55474, 54721, 59248,
        57861, 54197, 58856, 58156, 52376, 53614, 53332, 57413, 59230, 58482,
        54038, 59091, 58559, 53157, 57769, 55480, 54446, 54003, 54910, 59265,
        57045, 52695, 59301, 58808, 58010, 52042, 58141, 57791, 58446, 58775,
        55532, 58531, 56515, 45559, 58738, 59247, 52985, 58503, 53882, 58806,
        54149, 58852, 58468, 57170, 59375, 52726, 54868, 56443, 53744, 54311,
        54899, 54900, 54898, 54903, 54904, 59149, 57280, 54338, 57407
    ]
    # all_remaining_09 = [55160,59231,59278,59274,59222,53812,58897,57400,55466,55823,52034,59196,59251,43748,57787,53728,55745,55741,43748,55741,53812,55745,54717,59268,59250,57857,59141,59246,53602,53602,57773,57773,59369,59195,56449,58806,51264,52605,59479,55143,51264,52453,59210,57255,57255,57024,57024]
    # all_remaining_10 = [55160,59274,59592,59222,53812,58897,57400,55466,55823,52034,59196,59251,43748,57787,53728,55745,55741,43748,55741,55745,53812,54717,59268,59250,57857,59141,59246,53602,53602,57773,57773,59369,59195,56449,58806,59523,59549,59589,59590,51264,52605,59479,55143,51264,52453,59210,57255,57255,57024,57024]
    # all_remaining_13 = [55160,59222,53812,58897,57400,55466,55823,52034,59196,59251,43748,57787,53728,55745,55741,43748,55741,55745,53812,54717,59268,59250,57857,59141,59246,53602,53602,57773,57773,59369,59195,56449,58806,59523,59549,59589,59590,59592,51264,52605,59479,55143,51264,52453,59210,57255,57255,57024,57024]
    # dawn_list_13 = [53812,55466,55823,52034,55745,55741,51264,52605,55143,52453,57255,57024] # TO DO
    dawn_list_14 = [59196, 59251, 59268, 59250, 59246, 59369]
    all_nov_19 = [
        60219, 60203, 60183, 60088, 59772, 59604, 59592, 59590, 59589, 59549,
        59523, 59386, 59314, 59210, 59195, 59143, 59141, 58806, 57857, 57787,
        57773, 57773, 57716, 56449, 55189, 55160, 54717, 53602, 53602
    ]

    offer_ids = [56449, 53602]

    # sf.delete_tours("""SELECT Id FROM Tour__c WHERE EOSId__c IN @offer_ids""")

    # new_tours = pd.DataFrame(sf.select("SELECT EOSId__c, LastModifiedDate FROM Tour__c WHERE EOSId__c IN @offer_ids", mode='simple'))
    # tours_to_update = new_tours.assign(
    #     EOSIntegrationSyncDate__c = lambda df: df['LastModifiedDate'],
    #     EOSLastModifiedDate__c    = lambda df: df['LastModifiedDate'],
    #     IsHistoricalTour__c       = False,
    # ).drop(columns=['LastModifiedDate'])
    # sf.upsert('Tour__c', tours_to_update, 'EOSId__c', mode='simple')

    # tours_with_leb_0_issue = [54847]
    tours_with_null_venue_event = []  # [57894]
    tours_with_missing_booker_eos_id = [
    ]  # [54382, 53158, 52629, 53457, 52109]
    offer_ids = [
        item for item in offer_ids
        if item not in tours_with_missing_booker_eos_id
        and item not in tours_with_null_venue_event
    ]

    # 2469 Josh Casey DF ignore
    # 1299 Chris Loomes
    missing_1299_booker = [
        54717, 57787, 57857, 58473, 58480, 58496, 58499, 58753, 58754, 58806,
        58897, 59222, 59231, 59274, 59278
    ]
    missing_2469_booker = [55160]
    missing_974_artist_agent = []  # [59286]
    offer_ids = [
        item for item in offer_ids
        if item not in missing_1299_booker and item not in missing_2469_booker
        and item not in missing_974_artist_agent
    ]

    # temporary_tours_with_missing_eos_ids = [54307, 55313, 58456, 54382, 58507, 54243, 55588, 54800, 57127, 53158, 52629, 53457, 55117, 57218, 57201] + [59286, 58499, 58897, 58496, 58480, 58473, 57787, 57857, 58753, 58754, 59278, 59222, 59231, 59274, 58806, 55160, 54717, 59282, 59235]
    # offer_ids = [item for item in offer_ids if item not in temporary_tours_with_missing_eos_ids]

    # offer_ids = [59111,54840]

    # # To re-migrate all tours
    # offer_ids = [int(item.EOSId__c) for item in sf.select("""
    # SELECT Id, EOSId__c
    # FROM Tour__c
    # WHERE IsHistoricalTour__c = False
    # AND EOSId__c != NULL
    # AND SourceSystemId__c LIKE '%Offer%'
    # """)]

    # sample_eos_offers = sql.query("""
    # SELECT TOP 1 o.Id
    # FROM Offer o
    # LEFT JOIN Currency ppc
    #         ON ppc.Id = o.PromoterProfitCurrencyId
    # WHERE o.Id IN (
    #     SELECT DISTINCT OfferId
    # 	FROM vwEOSShow
    # 	WHERE (ShowDate>=GetDate() OR PostponedDateTBC=1)
    # 	AND CountryId = 1
    # 	AND OfferStatusName IN ('Confirmed','On Sale','Settled','Draft')
    # )
    # AND o.RomeId IS NULL
    # AND o.ArtistGuaranteeAmount > 0
    # AND o.CopromoterId IS NOT NULL
    # AND ppc.IsoCode = 'GBP'
    # """)
    # offer_ids = [item['Id'] for item in sample_eos_offers]

    if sf.instance == 'lne' or skip_already_created_tours:
        current_tours = set([
            item.EOSId__c for item in sf.select(
                """SELECT EOSId__c FROM Tour__c WHERE EOSId__c <> NULL 
        AND IsHistoricalTour__c = False""")
        ])
        tours_to_not_import = [
            item for item in offer_ids if str(item) in current_tours
        ]
        if len(tours_to_not_import) > 0:
            print(
                f'Skipping the following tours because they are already in Production: {tours_to_not_import}'
            )
            offer_ids = [
                item for item in offer_ids if str(item) not in current_tours
            ]

    # offer_ids = offer_ids[:50]
    # offer_ids = [53943]
    assert len(offer_ids) > 0

    eos_data = uk.query_tours(sql, offer_ids, is_onsale=True)
    pdh.to_excel(eos_data,
                 'Migrate EOS On-Sale Tours - Raw EOS Query Data.xlsx')
    if len(eos_data.Tour__c) == 0: raise Exception('No Offers to migrate')
    eos_data_with_remapped_eos_ids, remapped_eos_ids = uk.replace_duplicate_eos_ids(
        eos_data)
    eos_data_with_split_headliners, artist_ids_missing_in_rome_by_tour = uk.split_headliner_and_coheadliner(
        sf, eos_data_with_remapped_eos_ids)
    eos_data_with_missing_ids_removed, eos_ids_missing_in_rome, removed_eos_ids_by_tour = uk.remove_eos_ids_missing_in_rome(
        sf, eos_data_with_split_headliners)

    all_missing_eos_ids_by_tour = combine_missing_ids_dicts(
        removed_eos_ids_by_tour, artist_ids_missing_in_rome_by_tour)
    eos_ids_missing_in_rome.update(
        itertools.chain.from_iterable(
            artist_ids_missing_in_rome_by_tour.values()))
    # print([int(k) for k,v in all_missing_eos_ids_by_tour.items() if '1299' in v])
    assert len(
        eos_ids_missing_in_rome
    ) == 0 or sf.instance != 'lne', f'Some EOS Ids are missing: {eos_ids_missing_in_rome}\nThe following tours have missing data: {[int(s) for s in all_missing_eos_ids_by_tour]}'

    eos_data_dfs = ObjDict({
        obj: pd.DataFrame(data)
        for obj, data in eos_data_with_missing_ids_removed.items()
    })
    eos_data_with_file_data = uk.merge_eos_data_with_file_data(eos_data_dfs,
                                                               is_onsale=True)

    eos_data_computed = uk.add_computed_fields(sf, eos_data_with_file_data)

    validations(eos_data_computed, eos_ids_missing_in_rome,
                sf.credentials['sandbox'] == 'False')

    threading.new(pdh.to_excel, eos_data_computed.data2,
                  'Migrate EOS On-Sale Tours.xlsx')
    # monitor_eos = uk.monitor_eos_tours(sql, [item['EOSId__c'] for item in eos_data.Tour__c])
    # monitor_rome = uk.monitor_rome_tours(sf, [item['EOSId__c'] for item in eos_data.Tour__c])
    # sf.dynamic_upsert(eos_data_computed.data2, mode='dynamic')
    sf.bypass_prod_operation_approval()
    rome_results = uk.upsert_eos_data_to_rome(
        sf,
        eos_data_computed.data2,
        is_onsale=True,
        delete_tours_first=delete_tours_first)
    tour_results = itertools.chain.from_iterable(
        [job.results for job in rome_results if job.object_name == 'Tour__c'])
    event_results = itertools.chain.from_iterable(
        [job.results for job in rome_results if job.object_name == 'Event__c'])
    uk.update_romeids_in_eos(sql, tour_results, event_results)
    if sf.instance == 'lne':
        uk.add_default_tour_personnel(
            sf, [item['EOSId__c'] for item in tour_results])

    failed_deal_jobs = [
        job.errors for job in rome_results
        if job.object_name == 'Deal__c' and len(job.errors) > 0
    ]

    if eos_ids_missing_in_rome:
        missing_eos_id_info = uk.query_by_eos_ids(
            sql, eos_ids_missing_in_rome,
            ['Name', 'FirstName', 'LastName', 'Email', 'EmailAddress'])
        pdh.to_excel(missing_eos_id_info, 'Missing EOS Data.xlsx')
        print(f'Missing EOS Ids in Rome: {eos_ids_missing_in_rome}')
        # print(pd.concat(missing_eos_id_info.values()).to_string())

    # tourlegs = sf.select("""
    # SELECT Id
    # FROM TourLeg__c
    # WHERE Tour__r.AppScope__c = 'UK'
    # AND CreatedBy.Name = 'DataMigration User'
    # AND Id NOT IN (SELECT TourLeg__c FROM Event__c)
    # """)
    # sf.delete(tourlegs)

    # OfferID	Currency	Exchange Rate
    # 54297	USD	0.72314944
    # 55166	USD	0.71428572
    # 54326	USD	0.72825256
    # 58875	USD	0.74074073
    # 54134	USD	0.82644628
    # 39597	USD	0.77363454
    # 58821	USD	0.70611495
    # 56998	USD	1
    # 45648	USD	1
    # 55139	USD	1

    # print(f'EOS Monitor in progress')
    return
Пример #30
0
def main(sessions, do_fix=False):
    first_session = sessions[0]
    for s in sessions:
        s.print_messages = False
    session = first_session

    files_list = [
        item for item in listdir("./resources/sf_update_files")
        if isdir("./resources/sf_update_files/" + item) == False
        and item != ".DS_Store" and not item.startswith("~$")
    ]
    files_list = sorted(files_list,
                        key=lambda item: -(path.getmtime(
                            "./resources/sf_update_files/" + item)))

    selected_file = prompt('\nThe following update files are available:',
                           files_list)

    rows = []
    # object_name = lastSettings["objectName"] if file_selection_input == "0" else None
    data_sheet_name = None

    # Settings defaults
    settings = ObjDict({
        "BATCH_SIZE": 2000,
        "OPERATION": None,
        "DO_UPSERT": None  # Should be deprecated... use OPERATION instead
        ,
        "BYPASS_AUTOMATION": None,
        "EXT_ID": None,
        "PARALLEL_CONCURRENCY": True,
        "BULK_API_MODE": True
    })

    # Get data and settings from file
    if selected_file.endswith(".csv"):
        with open("./resources/sf_update_files/" + selected_file,
                  'r',
                  encoding='utf-8-sig') as file:
            reader = csv.DictReader(file)
            for row in reader:
                rows.append(row)
    elif selected_file.endswith(".xlsx"):
        file_path = './resources/sf_update_files/{}'.format(selected_file)
        xlsx_file = pd.ExcelFile(file_path)
        sheets = xlsx_file.sheet_names
        data_sheet_name = sheets[0]
        datadf = pd.read_excel(xlsx_file, data_sheet_name)
        settingsdf = pd.read_excel(
            xlsx_file, 'Settings') if 'Settings' in sheets else None

        # Fill nulls with blank strings
        datadf = datadf.fillna(value='')
        # Set column names to strings
        datadf.columns = datadf.columns.astype(str)
        # Set timestamp columns to string
        for col in datadf.select_dtypes(include=['datetime64']).columns.values:
            datadf[col] = datadf[col].astype(str)
        # Set numeric columns to zero-trimmed string
        for col in datadf.select_dtypes(
                include=['int64', 'float64']).columns.values:
            datadf[col] = datadf[col].astype(float).astype(str)
            datadf[col] = datadf[col].str.replace('.0', '', regex=False)

        rows = datadf.to_dict('records')

        if settingsdf is not None:
            inputsettings = settingsdf.set_index('Field').to_dict('index')
            inputsettings = {
                key: val['Value']
                for key, val in inputsettings.items()
            }
            settings.update(inputsettings)

        # xlsx_file = xlrd.open_workbook("./resources/sf_update_files/" + selected_file)
        # sheets = xlsx_file.sheet_names()
        # data_sheet = xlsx_file.sheet_by_index(0)
        # data_sheet_name = sheets[0]
        # settings_sheet = xlsx_file.sheet_by_name("Settings") if "Settings" in sheets else None

        # headers = [str(v) for v in data_sheet.row_values(0)]
        # for row_num in range(1,data_sheet.nrows):
        #     new_row = dict()
        #     src_row = data_sheet.row_values(row_num)
        #     for col_num in range(0,len(headers)):
        #         new_row[headers[col_num]] = src_row[col_num]
        #     rows.append(new_row)

        # if settings_sheet is not None:
        #     for row_num in range(0, settings_sheet.nrows):
        #         src_row = settings_sheet.row_values(row_num)

        #         settings[src_row[0]] = True if src_row[1] == 1 else False if src_row[1] == 0 else src_row[1]
        pass
    else:
        print("No file")

    # Handle for deprecated DO_UPSERT setting
    if settings.DO_UPSERT is True and settings.OPERATION is None:
        settings.OPERATION = 'UPSERT'

    operation = str(
        settings.OPERATION).lower() if settings.OPERATION is not None else None

    # Try to detect Object name from record Ids in file
    # If no record Ids are present, try to use the name of the tab in the file we are loading
    # If no match is found, prompt the user for the object name
    rows_with_id = [r for r in rows if "Id" in r and r["Id"] != ""]
    source_field_names = {key for key in rows[0].keys()}
    if len(rows_with_id) > 0:
        object_name = session.get_object_name(rows_with_id[0]["Id"])
    else:
        all_object_names = [
            item["name"] for item in session.get_org_description()["sobjects"]
        ]
        object_names_in_file_name = [
            item for item in all_object_names
            if " " + item + " " in selected_file
        ]
        if data_sheet_name in all_object_names:
            object_name = data_sheet_name
        elif len(object_names_in_file_name) == 1:
            object_name = object_names_in_file_name[0]
        else:
            object_name = prompt(
                "\nWhat object are the records in this file for?")

    # lastSettings["objectName"] = object_name
    # with open(settingsLoc, 'w') as outfile:
    #     json.dump(lastSettings, outfile)

    try:
        object_desc = session.get_object_description(object_name)
    except:
        raise

    if operation is None or operation == 'upsert':
        source_fields_relationship_names = {
            f[0:f.find('.')]
            for f in source_field_names if '.' in f
        }

        upsert_matches = [{
            "field": item["name"]
        } for item in object_desc.fields if item["externalId"] == True
                          and item["name"] in source_field_names]

        possible_reference_upsert_matches = [
            {
                "referenceTo":
                item.referenceTo[0],
                "match_string":
                item.relationshipName + ".",
                "reference_object_descs": [
                    threading.new(session.get_object_description, r)
                    for r in item.referenceTo
                ]
            } for item in object_desc.fields if len(item.referenceTo) > 0
            and item.relationshipName in source_fields_relationship_names
        ]

        reference_upsert_matches = []

        for field in source_field_names:
            for match in possible_reference_upsert_matches:
                if field.startswith(match["match_string"]):
                    upsert_match_object_desc = session.get_object_description(
                        match["referenceTo"])
                    reference_upsert_matches.extend(
                        [{
                            "field": field,
                            "matching_object": match["referenceTo"],
                            "matching_field": item["name"]
                        } for item in upsert_match_object_desc["fields"]
                         if item["externalId"] == True
                         and item["name"] == field[field.find(".") + 1:]])

        if len(upsert_matches) > 0:
            print(
                "\nFound the following External ID references for this object: {}"
                .format(", ".join([item["field"] for item in upsert_matches])))
        if len(reference_upsert_matches) > 0:
            print(
                "Found the following External ID references for a lookup object: {}"
                .format(", ".join(
                    [item["field"] for item in reference_upsert_matches])))

        if len(upsert_matches) + len(
                reference_upsert_matches) > 0 and settings.OPERATION is None:
            if prompt("\nWould you like to upsert?", boolean=True):
                operation = 'upsert'

    if operation is None:
        if ((len(rows_with_id) > 0) == False  # If true, cannot do insert
                and (len(rows_with_id) != len(rows))
                == False):  # If true, cannot do update
            operation = prompt("\nWhat operation would you like to perform?",
                               options={
                                   'Insert': 'insert',
                                   'Update': 'update'
                               })

    # lastSettings["doUpsert"] = do_upsert
    # with open(settingsLoc, 'w') as outfile:
    #     json.dump(lastSettings, outfile)

    if operation == 'upsert':
        upsert_matches.insert(0, {"field": "Id"})
        self_external_id = settings.EXT_ID

        if self_external_id is None:
            if len(upsert_matches) == 1:
                self_external_id = upsert_matches[0]["field"]
            else:
                self_external_id = prompt(
                    "\nWhat ID field would you like to use for upsert?",
                    options=[item['field'] for item in upsert_matches])
                # print("\nWhat ID field would you like to use for upsert?")
                # counter = 1
                # print_str = ""
                # for item in upsert_matches:
                #     print_str += "{}) {} \n".format(counter, item["field"])
                #     counter += 1
                # print(print_str)
                # self_external_id = upsert_matches[int(input())-1]["field"]

        if len([
                item for item in upsert_matches
                if item["field"] == self_external_id
        ]) == 0:
            print(
                "External ID field '{}' does not appear in the selected file name."
                .format(self_external_id))
            raise

    fields_to_update = [
        item["name"] for item in object_desc["fields"]
        if item["name"] in rows[0] and item["updateable"] == True
        and item["calculated"] == False and item["autoNumber"] == False
    ]
    fields_to_update.extend(
        [item["field"] for item in reference_upsert_matches])
    fields_to_ignore = [
        item for item in rows[0] if item not in fields_to_update
    ]

    rows_to_update = [{
        f: v
        for (f, v) in r.items() if f == "Id" or f in fields_to_update
        or f in [mat["field"] for mat in upsert_matches]
    } for r in rows]

    mode = 'bulk' if settings.BULK_API_MODE == True else 'simple'
    if settings.BYPASS_AUTOMATION is None:
        settings.BYPASS_AUTOMATION = prompt(
            f"\nDo you need to bypass automation for this {operation}?",
            boolean=True)

    print("Selected file:     {}".format(selected_file))
    print("Operation:         {}".format(operation.title()) +
          (" (on {})".format(self_external_id
                             ) if self_external_id is not None else ""))
    print("Table:             {}".format(object_name))
    print("Bypass automation: {}".format(settings.BYPASS_AUTOMATION))
    print("Fields to update:  {}".format(", ".join(fields_to_update)))
    print("Fields to ignore:  {}".format(", ".join(fields_to_ignore)))
    do_operation_confirmation = prompt(
        f"\nWill {operation} {len(rows)} records. Are you sure?", boolean=True)

    # Now that all settings have been determined, perform the insert/update/delete in ALL sessions that were passed into the process
    # It is assumed that the system metadata that was queried for the 1st session is the same in the other sessions
    concurrency = "Parallel" if settings.PARALLEL_CONCURRENCY else "Serial"
    if do_operation_confirmation:
        settings.BYPASS_AUTOMATION = settings.BYPASS_AUTOMATION

        def perform_crud_operation(session):
            if settings.BYPASS_AUTOMATION:
                session.add_bypass_settings()
            else:
                session.remove_bypass_settings()

            if operation == "insert":
                job_result = session.insert_records(object_name,
                                                    rows_to_update,
                                                    concurrency=concurrency)
            elif operation == "update":
                job_result = session.update_records(rows_to_update,
                                                    concurrency=concurrency)
            elif operation == "upsert":
                job_result = session.upsert_records(object_name,
                                                    rows_to_update,
                                                    self_external_id,
                                                    concurrency=concurrency,
                                                    mode=mode)
            else:
                pass

            if job_result is not None and "status" in job_result and job_result[
                    "status"]["numberRecordsFailed"] != "0":
                print("{} records failed.".format(
                    job_result["status"]["numberRecordsFailed"]))
                # results = session.get_job_results(job_result)
                # session.write_file("./resources/sf_update_files/error_logs/error_{}".format(selected_file.replace(".xlsx", ".csv")), results)

            if settings.BYPASS_AUTOMATION:
                session.remove_bypass_settings()

        for session in sessions:
            threading.new(perform_crud_operation, session)
        threading.wait()

        print("\nOperation complete!")
    else:
        print("\nTerminated.")

    pass