Ejemplo n.º 1
0
def process_args(api, args):
    if args.input:
        df = pd.concat(
            [
                lpt.read_input(input_file, dtype=str).fillna("")
                for input_file in args.input
            ],
            ignore_index=True,
            sort=False,
        )

        results = (batch_upsert_quotes(api, args.scope, c)
                   for c in lpt.chunk(df, 2000))

        # Create dict of LUSID upsert quotes failures
        failures = {
            k: v
            for i in results if i.is_right
            for k, v in i.right.content.failed.items()
        }
        # Update return df with the errors from LUSID
        updated_failures = update_failures(failures, df)

        # Check no api exceptions for any of the batches
        for f in results:
            if any(f.is_left):
                return f.left

        # If there were any LUSID failures, return the df
        if len(failures) > 0:
            return Either.Right(updated_failures)

    return Either.Right("Success")
Ejemplo n.º 2
0
    def transactions(r=None):
        if args.transactions:
            df = lpt.read_input(args.transactions)
            if args.map:
                mi.map_instruments(api, df, "instrument_uid"),

            def load_transactions(portfolio, txns):
                return api.call.upsert_transactions(
                    args.scope,
                    portfolio,
                    transactions=api.from_df(txns, api.models.TransactionRequest),
                )

            if args.portfolio.lower().startswith("col:"):
                # Multiple portfolios contained in the file. Read the ID from the columns

                portfolio_column = args.portfolio[4:]

                def load_groups(iterator):
                    try:
                        portfolio, df = next(iterator)
                        print("Transactions: {}".format(portfolio))
                        return load_transactions(
                            portfolio, df.drop(portfolio_column, axis=1)
                        ).bind(lambda r: load_groups(iterator))
                    except StopIteration:
                        return Either.Right(None)

                return load_groups(iter(df.groupby(portfolio_column)))
            else:
                # one-off load. The portfolio id is provided
                return load_transactions(args.portfolio, df)

        return Either.Right(None)
Ejemplo n.º 3
0
 def load_groups(iterator):
     try:
         portfolio, df = next(iterator)
         print("Holdings: {}".format(portfolio))
         return set_holdings(
             portfolio,
             df).bind(lambda r: load_groups(iterator))
     except StopIteration:
         return Either.Right(None)
Ejemplo n.º 4
0
 def load_groups(iterator):
     try:
         portfolio, df = next(iterator)
         print("Transactions: {}".format(portfolio))
         return load_transactions(
             portfolio, df.drop(portfolio_column, axis=1)
         ).bind(lambda r: load_groups(iterator))
     except StopIteration:
         return Either.Right(None)
Ejemplo n.º 5
0
    def batch_query(instr_type, prefix, outstanding):
        if len(outstanding) > 0:
            batch = outstanding[:500]  # records to process now
            remainder = outstanding[500:]  # remaining records

            # called if the get_instruments() succeeds
            def get_success(result):
                get_found = result.content.values
                get_failed = result.content.failed

                # Update successfully found items
                update_mappings(get_found, prefix)

                if len(get_failed) > 0:
                    if instr_type == "ClientInternal":
                        # For un-mapped internal codes, we will try to add (upsert)

                        # called if the upsert_instruments() succeeds
                        def add_success(result):
                            add_worked = result.content.values
                            add_failed = result.content.failed

                            if len(add_failed) > 0:
                                return Either.Left(
                                    "Failed to add internal instruments")

                            # Update successfully added items
                            update_mappings(add_worked, prefix)

                            # Kick off the next batch
                            return batch_query(instr_type, prefix, remainder)

                        # Create the upsert request from the failed items
                        request = {
                            k: api.models.InstrumentDefinition(
                                name=v.id,
                                identifiers={"ClientInternal": v.id})
                            for k, v in get_failed.items()
                        }

                        return api.call.upsert_instruments(request).bind(
                            add_success)
                    else:
                        # Instruments are not mapped. Nothing we cando.
                        return Either.Left(
                            "Failed to locate instruments of type {}".format(
                                instr_type))
                else:
                    # No failures, kick off the next batch
                    return batch_query(instr_type, prefix, remainder)

            return api.call.get_instruments(
                instr_type, list(batch[WORKING].values)).bind(get_success)
        else:
            # No records remaining. Return the now-enriched dataframe
            return Either.Right(df)
Ejemplo n.º 6
0
                        def add_success(result):
                            add_worked = result.content.values
                            add_failed = result.content.failed

                            if len(add_failed) > 0:
                                return Either.Left(
                                    "Failed to add internal instruments")

                            # Update successfully added items
                            update_mappings(add_worked, prefix)

                            # Kick off the next batch
                            return batch_query(instr_type, prefix, remainder)
Ejemplo n.º 7
0
def page_all_results(fetch_page, page_handler):
    results = []

    def got_page(result):
        results.append(page_handler(result))

        links = [l for l in result.content.links if l.relation == "NextPage"]

        if len(links) > 0:
            match = rexp.match(links[0].href)
            if match:
                return urllib.parse.unquote(match.group(1))
        return None

    page = Either(None)
    while True:
        page = fetch_page(page.right).bind(got_page)
        if page.is_left():
            return page
        if page.right == None:
            break

    return pd.concat(results, ignore_index=True, sort=False)
 def load_groups(iterator, group_dict):
     try:
         index, row = next(iterator)
         return api.call.create_portfolio_group(
             scope=args.scope,
             request=api.models.CreatePortfolioGroupRequest(
                 code=row[GROUP_NAME],
                 values=group_dict[row[GROUP_NAME]][PORTFOLIOS],
                 sub_groups=group_dict[row[GROUP_NAME]][SUB_GROUPS],
                 display_name=row[DISPLAY_NAME],
                 description=row[DESCRIPTION],
             ),
         ).bind(lambda r: load_groups(iterator, group_dict))
     except StopIteration:
         return Either.Right(None)
Ejemplo n.º 9
0
 def create():
     if args.create:
         return api.call.create_portfolio(
             args.scope,
             transaction_portfolio=api.models.CreateTransactionPortfolioRequest(
                 code=args.portfolio,
                 display_name=args.create[0],
                 base_currency=args.create[1],
                 created=lpt.to_date(args.create[2]),
                 accounting_method=map_method(args.accounting_method),
                 sub_holding_keys=args.shk,
             ),
         )
     else:
         return Either.Right(None)
Ejemplo n.º 10
0
            def get_success(result):
                get_found = result.content.values
                get_failed = result.content.failed

                # Update successfully found items
                update_mappings(get_found, prefix)

                if len(get_failed) > 0:
                    if instr_type == "ClientInternal":
                        # For un-mapped internal codes, we will try to add (upsert)

                        # called if the upsert_instruments() succeeds
                        def add_success(result):
                            add_worked = result.content.values
                            add_failed = result.content.failed

                            if len(add_failed) > 0:
                                return Either.Left(
                                    "Failed to add internal instruments")

                            # Update successfully added items
                            update_mappings(add_worked, prefix)

                            # Kick off the next batch
                            return batch_query(instr_type, prefix, remainder)

                        # Create the upsert request from the failed items
                        request = {
                            k: api.models.InstrumentDefinition(
                                name=v.id,
                                identifiers={"ClientInternal": v.id})
                            for k, v in get_failed.items()
                        }

                        return api.call.upsert_instruments(request).bind(
                            add_success)
                    else:
                        # Instruments are not mapped. Nothing we cando.
                        return Either.Left(
                            "Failed to locate instruments of type {}".format(
                                instr_type))
                else:
                    # No failures, kick off the next batch
                    return batch_query(instr_type, prefix, remainder)
Ejemplo n.º 11
0
    def map_type(key, instr_type):
        prefix = key + ":"
        subset = df[df[column].str.startswith(prefix)]

        # See if there are any entries of this type
        if len(subset) > 0:
            width = len(prefix)
            uniques = subset[[column]].drop_duplicates(column)
            uniques[WORKING] = uniques[column].str.slice(width)

            def map_success(v):
                df.loc[subset.index,
                       column] = subset[column].map(mapping_table)
                return Either.Right(df)

            return batch_query(instr_type, prefix, uniques).bind(map_success)
        else:
            # Nothing to be done, pass the full result back
            return Either.Right(df)
Ejemplo n.º 12
0
    def reader(self, name: str, func: Callable, *args, **kwargs):
        """
        The responsibility of this function is to act as a wrapper around an API call and read the result of the
        call

        :param str name: The name of the API call e.g. BuildTransactions
        :param Callable func: The API call

        :return:
        """
        try:
            fn, result = self.calls.pop(0)
            assert fn == name
            return Either.Right(result)

        except Exception as e:
            print(f"ERROR READING FROM CACHE:{self.filename}\n"
                  "DELETE THE CACHE FILE AND RETRY")
            raise e
Ejemplo n.º 13
0
 def map_success(v):
     df.loc[subset.index,
            column] = subset[column].map(mapping_table)
     return Either.Right(df)
Ejemplo n.º 14
0
def process_args(api, args):
    def map_method(method):
        translations = {
            "FIFO": "FirstInFirstOut",
            "LIFO": "LastInFirstOut",
            "HCF": "HighestCostFirst",
            "LCF": "LowestCostFirst",
        }
        return translations.get(method, method)

    def create():
        if args.create:
            return api.call.create_portfolio(
                args.scope,
                create_transaction_portfolio_request=api.models.
                CreateTransactionPortfolioRequest(
                    code=args.portfolio,
                    display_name=args.create[0],
                    base_currency=args.create[1],
                    created=lpt.to_date(args.create[2]),
                    accounting_method=map_method(args.accounting_method),
                    sub_holding_keys=args.shk,
                ),
            )
        else:
            return Either.Right(None)

    def transactions(r=None):
        if args.transactions:
            df = lpt.read_input(args.transactions)
            if args.map:
                mi.map_instruments(api, df, "instrument_uid"),

            def load_transactions(portfolio, txns):
                return api.call.upsert_transactions(
                    args.scope,
                    portfolio,
                    transaction_request=api.from_df(
                        txns, api.models.TransactionRequest),
                )

            if args.portfolio.lower().startswith("col:"):
                # Multiple portfolios contained in the file. Read the ID from the columns

                portfolio_column = args.portfolio[4:]

                def load_groups(iterator):
                    try:
                        portfolio, df = next(iterator)
                        print("Transactions: {}".format(portfolio))
                        return load_transactions(
                            portfolio, df.drop(
                                portfolio_column,
                                axis=1)).bind(lambda r: load_groups(iterator))
                    except StopIteration:
                        return Either.Right(None)

                return load_groups(iter(df.groupby(portfolio_column)))
            else:
                # one-off load. The portfolio id is provided
                return load_transactions(args.portfolio, df)

        return Either.Right(None)

    def positions(r=None):
        if args.positions:
            taxlot_fields = [
                "units",
                "cost.amount",
                "cost.currency",
                "portfolio_cost",
                "price",
                "purchase_date",
                "settlement_date",
            ]

            df = lpt.read_input(args.positions[0])

            # Get unique key fields to group by
            keys = ["instrument_uid"
                    ] + [c for c in df.columns.values if c.startswith("SHK:")]
            # Fill down any blanks
            df[keys] = df[keys].fillna(method="ffill")

            def set_holdings(portfolio, holdings):
                # Group by the keys and build request for each group
                return api.call.set_holdings(
                    args.scope,
                    portfolio,
                    lpt.to_date(args.positions[1]),
                    adjust_holding_request=[
                        api.models.AdjustHoldingRequest(
                            instrument_identifiers=lpt.
                            to_instrument_identifiers(i if len(keys) ==
                                                      1 else i[0]),
                            sub_holding_keys=lpt.perpetual_upsert(
                                api.models, hld_df, "SHK:"),
                            properties=lpt.perpetual_upsert(
                                api.models, hld_df),
                            tax_lots=api.from_df(
                                hld_df[taxlot_fields],
                                api.models.TargetTaxLotRequest),
                        ) for i, hld_df in holdings.groupby(keys)
                    ],
                )

            if args.portfolio.lower().startswith("col:"):
                # Multiple portfolios contained in the file. Read the ID from the columns

                def load_groups(iterator):
                    try:
                        portfolio, df = next(iterator)
                        print("Holdings: {}".format(portfolio))
                        return set_holdings(
                            portfolio,
                            df).bind(lambda r: load_groups(iterator))
                    except StopIteration:
                        return Either.Right(None)

                return load_groups(iter(df.groupby(args.portfolio[4:])))
            else:
                # one-off load. The portfolio id is provided
                return set_holdings(args.portfolio, df)

        return Either.Right(None)

    return (create().bind(positions).bind(transactions).bind(
        lambda r: Either.Right("Done!")))
Ejemplo n.º 15
0
    def positions(r=None):
        if args.positions:
            taxlot_fields = [
                "units",
                "cost.amount",
                "cost.currency",
                "portfolio_cost",
                "price",
                "purchase_date",
                "settlement_date",
            ]

            df = lpt.read_input(args.positions[0])

            # Get unique key fields to group by
            keys = ["instrument_uid"
                    ] + [c for c in df.columns.values if c.startswith("SHK:")]
            # Fill down any blanks
            df[keys] = df[keys].fillna(method="ffill")

            def set_holdings(portfolio, holdings):
                # Group by the keys and build request for each group
                return api.call.set_holdings(
                    args.scope,
                    portfolio,
                    lpt.to_date(args.positions[1]),
                    adjust_holding_request=[
                        api.models.AdjustHoldingRequest(
                            instrument_identifiers=lpt.
                            to_instrument_identifiers(i if len(keys) ==
                                                      1 else i[0]),
                            sub_holding_keys=lpt.perpetual_upsert(
                                api.models, hld_df, "SHK:"),
                            properties=lpt.perpetual_upsert(
                                api.models, hld_df),
                            tax_lots=api.from_df(
                                hld_df[taxlot_fields],
                                api.models.TargetTaxLotRequest),
                        ) for i, hld_df in holdings.groupby(keys)
                    ],
                )

            if args.portfolio.lower().startswith("col:"):
                # Multiple portfolios contained in the file. Read the ID from the columns

                def load_groups(iterator):
                    try:
                        portfolio, df = next(iterator)
                        print("Holdings: {}".format(portfolio))
                        return set_holdings(
                            portfolio,
                            df).bind(lambda r: load_groups(iterator))
                    except StopIteration:
                        return Either.Right(None)

                return load_groups(iter(df.groupby(args.portfolio[4:])))
            else:
                # one-off load. The portfolio id is provided
                return set_holdings(args.portfolio, df)

        return Either.Right(None)
Ejemplo n.º 16
0
def process_args(api, args):
    taxlot_fields = [
        "units",
        "cost.amount",
        "cost.currency",
        "portfolio_cost",
        "price",
        "purchase_date",
        "settlement_date",
    ]

    # Check if is json or csv
    if "csv" in args.input:
        df = lpt.read_input(args.input)
    elif "json" in args.input:
        # Convert from json to dataframe
        with open(args.input, "r") as myfile:
            data = json.load(myfile)
        df = json_normalize(data, max_level=1)
        df.columns = df.columns.str.replace("subHoldingKeys.", SHK)
        df.columns = df.columns.str.replace("properties.", P)
        columns_to_modify = [
            c for c in df.columns.values if c.startswith(SHK) or c.startswith(P)
        ]
        for col in columns_to_modify:
            df[col] = df[col].apply(lambda x: x.get("value", {}).get("labelValue", ""))
    else:
        raise Exception(
            "The file provided: {} is not .json or .csv format.".format(args.input)
        )

    # Check the schema
    for column in taxlot_fields:
        if column not in df.columns:
            df[column] = None

    keys = ["instrument_uid"] + [c for c in df.columns.values if c.startswith(SHK)]

    # Fill down any blanks
    df[keys] = df[keys].fillna(method="ffill")

    # Group by the keys and build request for each group
    return api.call.set_holdings(
        args.scope,
        args.portfolio,
        lpt.to_date(args.date),
        holding_adjustments=[
            api.models.AdjustHoldingRequest(
                instrument_identifiers=lpt.to_instrument_identifiers(
                    i if len(keys) == 1 else i[0]
                ),
                sub_holding_keys=lpt.perpetual_upsert(api.models, hld_df, SHK),
                properties=lpt.perpetual_upsert(api.models, hld_df),
                tax_lots=api.from_df(
                    hld_df[taxlot_fields], api.models.TargetTaxLotRequest
                ),
            )
            for i, hld_df in df.groupby(keys)
        ],
    )

    return Either.Right(None)
Ejemplo n.º 17
0
def process_args(api, args):
    # Build the ResourceID variables
    def resource_id_vars(names):
        return [api.models.ResourceId(args.scope, code) for code in names]

    # Sort the dataframe in order for the sub-groups to be created before the groups (takes care of the dependency sub-group group)
    def sorted_group_df(df):

        df_sorted = df[df[SUB_GROUPS].isnull()]
        df_not_sorted = df[df[SUB_GROUPS].notnull()]

        while not df_not_sorted.empty:
            for i, row in df_not_sorted.iterrows():
                if row[SUB_GROUPS] in df_sorted[GROUP_NAME].to_list():
                    df_sorted = df_sorted.append(row)
                    df_not_sorted = df_not_sorted.drop(i)
            if (not any(x in df_sorted[GROUP_NAME].to_list()
                        for x in df_not_sorted[SUB_GROUPS].to_list())
                    and not df_not_sorted.empty):
                raise ValueError(
                    "Is a circular dependency between the groups and subgroups, or one of the subgroups does not exist {}"
                    .format(df_not_sorted))

        return df_sorted

    def load_groups(iterator, group_dict):
        try:
            index, row = next(iterator)
            return api.call.create_portfolio_group(
                scope=args.scope,
                create_portfolio_group_request=api.models.
                CreatePortfolioGroupRequest(
                    code=row[GROUP_NAME],
                    created=row.get(CREATED_DATE, None),
                    values=group_dict[row[GROUP_NAME]][PORTFOLIOS],
                    sub_groups=group_dict[row[GROUP_NAME]][SUB_GROUPS],
                    display_name=row[DISPLAY_NAME],
                    description=row[DESCRIPTION],
                ),
            ).bind(lambda r: load_groups(iterator, group_dict))
        except StopIteration:
            return Either.Right(None)

    def create_groups():
        df = lpt.read_input(args.input[0])

        # Build the group dictionaries {'group_name': {'Portfolios: ['portfolio1]', 'portfolio2'], 'Sub-groups': ['sub-group1', 'sub-group2']}
        group_dict = {}
        group_dict.update({
            group_name: {
                SUB_GROUPS:
                resource_id_vars(group_df[SUB_GROUPS].dropna().unique()),
                PORTFOLIOS:
                resource_id_vars(group_df[PORTFOLIOS].dropna().unique()),
            }
            for group_name, group_df in df.groupby(GROUP_NAME)
        })

        df = sorted_group_df(df)
        df.drop_duplicates(subset=[GROUP_NAME, DISPLAY_NAME],
                           keep="last",
                           inplace=True)
        return load_groups(df.iterrows(), group_dict)

    return create_groups().bind(lambda r: Either.Right("Done!"))