Exemplo n.º 1
0
    def test_read_input_for_non_excel(self, _, input_path):
        """
        Tests that non excel file paths do not attempt to retrieve a sheet (which should not be
        viable) and are instead read as csv files into a dataframe.

        """
        with patch("pandas.read_csv") as read_csv_mock:
            lpt.read_input(path=input_path)
            read_csv_mock.assert_called_once_with(input_path)
Exemplo n.º 2
0
def process_args(api, args):
    taxlot_fields = [
        "units",
        "cost.amount",
        "cost.currency",
        "portfolio_cost",
        "price",
        "purchase_date",
        "settlement_date",
    ]

    # Check if is json or csv
    if "csv" in args.input:
        df = lpt.read_input(args.input)
    elif "json" in args.input:
        # Convert from json to dataframe
        with open(args.input, "r") as myfile:
            data = json.load(myfile)
        df = json_normalize(data, max_level=1)
        df.columns = df.columns.str.replace("subHoldingKeys.", SHK)
        df.columns = df.columns.str.replace("properties.", P)
        columns_to_modify = [
            c for c in df.columns.values
            if c.startswith(SHK) or c.startswith(P)
        ]
        for col in columns_to_modify:
            df[col] = df[col].apply(
                lambda x: x.get("value", {}).get("labelValue", ""))
    else:
        raise Exception(
            "The file provided: {} is not .json or .csv format.".format(
                args.input))

    # Check the schema
    for column in taxlot_fields:
        if column not in df.columns:
            df[column] = None

    keys = ["instrument_uid"
            ] + [c for c in df.columns.values if c.startswith(SHK)]

    # Fill down any blanks
    df[keys] = df[keys].fillna(method="ffill")

    # Group by the keys and build request for each group
    return api.call.set_holdings(
        args.scope,
        args.portfolio,
        lpt.to_date(args.date),
        holding_adjustments=[
            api.models.AdjustHoldingRequest(
                instrument_identifiers=lpt.to_instrument_identifiers(
                    i if len(keys) == 1 else i[0]),
                sub_holding_keys=lpt.perpetual_upsert(api.models, hld_df, SHK),
                properties=lpt.perpetual_upsert(api.models, hld_df),
                tax_lots=api.from_df(hld_df[taxlot_fields],
                                     api.models.TargetTaxLotRequest),
            ) for i, hld_df in df.groupby(keys)
        ],
    )
Exemplo n.º 3
0
def process_args(api, args):
    if args.input:
        df = pd.concat(
            [
                lpt.read_input(input_file, dtype=str).fillna("")
                for input_file in args.input
            ],
            ignore_index=True,
            sort=False,
        )

        results = (batch_upsert_quotes(api, args.scope, c)
                   for c in lpt.chunk(df, 2000))

        # Create dict of LUSID upsert quotes failures
        failures = {
            k: v
            for i in results if i.is_right
            for k, v in i.right.content.failed.items()
        }
        # Update return df with the errors from LUSID
        updated_failures = update_failures(failures, df)

        # Check no api exceptions for any of the batches
        for f in results:
            if any(f.is_left):
                return f.left

        # If there were any LUSID failures, return the df
        if len(failures) > 0:
            return Either.Right(updated_failures)

    return Either.Right("Success")
Exemplo n.º 4
0
    def test_read_input_for_excel(
        self, _, input_path, expected_path, expected_sheet_name
    ):
        """
        Tests that read_input can support both windows and unix specific file paths. In either case need to ensure that
        paths both with and without a suffixed sheet name are correctly parsed and then read as excel files into a
        dataframe.

        :param expected_path: expected path of the excel sheet after the input_path has been parsed
        :param expected_sheet_name: expected sheet of the excel file
        """
        with patch("pandas.read_excel") as read_excel_mock:
            lpt.read_input(path=input_path)
            read_excel_mock.assert_called_once_with(
                expected_path, sheet_name=expected_sheet_name
            )
Exemplo n.º 5
0
    def transactions(r=None):
        if args.transactions:
            df = lpt.read_input(args.transactions)
            if args.map:
                mi.map_instruments(api, df, "instrument_uid"),

            def load_transactions(portfolio, txns):
                return api.call.upsert_transactions(
                    args.scope,
                    portfolio,
                    transactions=api.from_df(txns, api.models.TransactionRequest),
                )

            if args.portfolio.lower().startswith("col:"):
                # Multiple portfolios contained in the file. Read the ID from the columns

                portfolio_column = args.portfolio[4:]

                def load_groups(iterator):
                    try:
                        portfolio, df = next(iterator)
                        print("Transactions: {}".format(portfolio))
                        return load_transactions(
                            portfolio, df.drop(portfolio_column, axis=1)
                        ).bind(lambda r: load_groups(iterator))
                    except StopIteration:
                        return Either.Right(None)

                return load_groups(iter(df.groupby(portfolio_column)))
            else:
                # one-off load. The portfolio id is provided
                return load_transactions(args.portfolio, df)

        return Either.Right(None)
Exemplo n.º 6
0
    def create_groups():
        df = lpt.read_input(args.input[0])

        # Build the group dictionaries {'group_name': {'Portfolios: ['portfolio1]', 'portfolio2'], 'Sub-groups': ['sub-group1', 'sub-group2']}
        group_dict = {}
        group_dict.update({
            group_name: {
                SUB_GROUPS:
                resource_id_vars(group_df[SUB_GROUPS].dropna().unique()),
                PORTFOLIOS:
                resource_id_vars(group_df[PORTFOLIOS].dropna().unique()),
            }
            for group_name, group_df in df.groupby(GROUP_NAME)
        })

        df = sorted_group_df(df)
        df.drop_duplicates(subset=[GROUP_NAME, DISPLAY_NAME],
                           keep="last",
                           inplace=True)
        return load_groups(df.iterrows(), group_dict)
Exemplo n.º 7
0
    def positions(r=None):
        if args.positions:
            taxlot_fields = [
                "units",
                "cost.amount",
                "cost.currency",
                "portfolio_cost",
                "price",
                "purchase_date",
                "settlement_date",
            ]

            df = lpt.read_input(args.positions[0])

            # Get unique key fields to group by
            keys = ["instrument_uid"
                    ] + [c for c in df.columns.values if c.startswith("SHK:")]
            # Fill down any blanks
            df[keys] = df[keys].fillna(method="ffill")

            def set_holdings(portfolio, holdings):
                # Group by the keys and build request for each group
                return api.call.set_holdings(
                    args.scope,
                    portfolio,
                    lpt.to_date(args.positions[1]),
                    adjust_holding_request=[
                        api.models.AdjustHoldingRequest(
                            instrument_identifiers=lpt.
                            to_instrument_identifiers(i if len(keys) ==
                                                      1 else i[0]),
                            sub_holding_keys=lpt.perpetual_upsert(
                                api.models, hld_df, "SHK:"),
                            properties=lpt.perpetual_upsert(
                                api.models, hld_df),
                            tax_lots=api.from_df(
                                hld_df[taxlot_fields],
                                api.models.TargetTaxLotRequest),
                        ) for i, hld_df in holdings.groupby(keys)
                    ],
                )

            if args.portfolio.lower().startswith("col:"):
                # Multiple portfolios contained in the file. Read the ID from the columns

                def load_groups(iterator):
                    try:
                        portfolio, df = next(iterator)
                        print("Holdings: {}".format(portfolio))
                        return set_holdings(
                            portfolio,
                            df).bind(lambda r: load_groups(iterator))
                    except StopIteration:
                        return Either.Right(None)

                return load_groups(iter(df.groupby(args.portfolio[4:])))
            else:
                # one-off load. The portfolio id is provided
                return set_holdings(args.portfolio, df)

        return Either.Right(None)
Exemplo n.º 8
0
def process_args(api, args):
    aliases = {
        "CINT": "ClientInternal",
        "FIGI": "Figi",
        "RIC": "P:Instrument/default/RIC",
        "TICKER": "P:Instrument/default/Ticker",
        "ISIN": "P:Instrument/default/Isin",
    }

    if args.input:
        df = pd.concat(
            [
                lpt.read_input(input_file, dtype=str)
                for input_file in args.input
            ],
            ignore_index=True,
            sort=False,
        )

        if args.mappings:
            df.rename(
                columns=dict([(s[1], aliases.get(s[0], s[0]))
                              for s in [m.split("=") for m in args.mappings]]),
                inplace=True,
            )

        prop_keys = [col for col in df.columns.values if col.startswith("P:")]

        identifiers = [
            col for col in df.columns.values if col in args.identifiers
        ]

        # Identifiers have to be unique
        df = df.drop_duplicates(identifiers)

        def make_identifiers(row):
            return {
                identifier: api.models.InstrumentIdValue(row[identifier])
                for identifier in identifiers if pd.notna(row[identifier])
            }

        def make_properties(row):
            return [
                api.models.ModelProperty(key[2:],
                                         api.models.PropertyValue(row[key]))
                for key in prop_keys if pd.notna(row[key])
            ]

        def success(r):
            df = lpt.to_df([err[1] for err in r.content.failed.items()],
                           ["id", "detail"])
            df.columns = ["FAILED-INSTRUMENT", "ERROR"]
            return lpt.trim_df(df, args.limit, sort="FAILED-INSTRUMENT")

        has_lookthrough = LT_SCOPE in df.columns.values

        requests = [
            api.models.InstrumentDefinition(
                row["name"],
                make_identifiers(row),
                make_properties(row),
                api.models.ResourceId(row[LT_SCOPE], row[LT_CODE]) if
                (has_lookthrough and pd.notna(row[LT_SCOPE])) else None,
            ) for idx, row in df.iterrows()
        ]

        # Convert valid requests to dictionary
        def make_key(r):
            sec_id = list(r.identifiers.items())[0]
            return "{}:{}".format(sec_id[0], sec_id[1].value)

        requests = {
            make_key(r): r
            for r in requests if len(r.identifiers.keys()) > 0
        }

        if args.test:
            lpt.display_df(df[identifiers + prop_keys + ["name"]])
            print(requests)
            exit()

        return api.call.upsert_instruments(instruments=requests).bind(success)