async def test_select_reflects_archive_events(sandbox):
    notification_count = 3

    # we expect that, upon each on_created notification of an OperatorNotification contract,
    # when we query the ACS, we get precisely the same number of contracts.
    expected_select_count = notification_count * notification_count
    actual_select_count = 0

    def on_notification_contract(event):
        nonlocal actual_select_count
        actual_select_count += len(event.acs_find_active(OperatorNotification))

    async with async_network(url=sandbox, dars=Simple) as network:
        client = network.aio_new_party()
        client.add_ledger_ready(
            lambda e: create(OperatorRole, {'operator': client.party}))
        client.add_ledger_created(
            OperatorRole,
            lambda e: exercise(e.cid, 'PublishMany', dict(count=3)))
        client.add_ledger_created(OperatorNotification,
                                  lambda e: exercise(e.cid, 'Archive'))
        client.add_ledger_created(OperatorNotification,
                                  on_notification_contract)

        network.start()

        final_select_count = len(client.find_active(OperatorNotification))

    assert actual_select_count == expected_select_count
    assert 0 == final_select_count
Exemple #2
0
 async def archive_stale_messages(event):  # pylint: disable=unused-variable
     logging.info(f'On {CHAT.ArchiveMessagesRequest} created!')
     try:
         (_,
          settings_cdata) = await client.find_one(CHAT.UserSettings,
                                                  {'user': client.party})
         time_thresh = datetime.datetime.now() - datetime.timedelta(
             days=settings_cdata['archiveMessagesAfter'])
         thresh_seconds = (time_thresh - EPOCH).total_seconds()
         logging.info(
             f"time_thresh: {time_thresh}, thresh_seconds: {thresh_seconds}"
         )
         user_messages = client.find_active(CHAT.Message,
                                            {'sender': client.party})
         commands = [
             exercise(cid, 'Archive')
             for (cid, cdata) in user_messages.items()
             if int(cdata['postedAt']) < thresh_seconds
         ]
         logging.info(f"Will archive {len(commands)} message(s)")
         commands.append(exercise(event.cid, 'Archive'))
         await client.submit(commands)
     except:
         logging.error(f"Could not archive stale messages")
         await client.submit_exercise(event.cid, 'Archive')
Exemple #3
0
    async def handle_execution_report(event):
        execution = event.cdata

        taker_cid, taker = await client.find_one(MARKETPLACE.Order, {
            'orderId': execution['takerMpOrderId']
        })
        maker_cid, maker = await client.find_one(MARKETPLACE.Order, {
            'orderId': execution['makerMpOrderId']
        })

        commands = [exercise(event.cid, 'Archive', {})]

        commands.append(exercise(taker_cid, 'Order_Fill', {
            'fillQty': execution['executedQuantity'],
            'fillPrice': execution['executedPrice'],
            'counterOrderId': maker['orderId'],
            'counterParty': maker['exchParticipant'],
            'timestamp': execution['eventTimestamp']
        }))
        commands.append(exercise(maker_cid, 'Order_Fill', {
            'fillQty': execution['executedQuantity'],
            'fillPrice': execution['executedPrice'],
            'counterParty': taker['exchParticipant'],
            'counterOrderId': taker['orderId'],
            'timestamp': execution['eventTimestamp']
        }))

        return commands
 async def on_account_request(event):
     counter_cid, counter_cdata = await event.acs_find_one(Counter)
     return [
         exercise(event.cid, "CreateAccount",
                  dict(accountId=counter_cdata["value"])),
         exercise(counter_cid, "Increment"),
     ]
Exemple #5
0
    def on_initiate_micro_deposits_request(event):
        funding_source_url = f"{dwolla_hostname}/funding-sources/{event.cdata['fundingSourceId']}"
        new_micro_deposits_resp = app_token.post(
            f"{funding_source_url}/micro-deposits")
        LOG.debug(
            f"new_micro_deposits_resp status: {new_micro_deposits_resp.status}, "
            f"headers: {new_micro_deposits_resp.headers}, "
            f"body: {new_micro_deposits_resp.body}")

        if new_micro_deposits_resp.status >= 400:
            LOG.error(
                f"Could not initiate Micro Deposits. response: {new_micro_deposits_resp}"
            )
            return exercise(event.cid,
                            const.C_INITIATE_MICRO_DEPOSITS_REQUEST_REJECT, {})

        micro_deposits_resp = app_token.get(
            f"{funding_source_url}/micro-deposits")
        LOG.debug(f"micro_deposits_resp status: {micro_deposits_resp.status}, "
                  f"headers: {micro_deposits_resp.headers}, "
                  f"body: {micro_deposits_resp.body}")

        micro_deposits = micro_deposits_resp.body

        return exercise(
            event.cid, const.C_INITIATE_MICRO_DEPOSITS_REQUEST_ACCEPT, {
                'created':
                datetime.strptime(micro_deposits['created'],
                                  "%Y-%m-%dT%H:%M:%S.%fZ"),
                'status':
                micro_deposits['status'],
                'optFailure':
                micro_deposits.get('failure', None)
            })
 async def on_account_request(event):
     counter_cid, counter_cdata = await event.acs_find_one(Counter)
     return [
         exercise(event.cid, 'CreateAccount',
                  dict(accountId=counter_cdata['value'])),
         exercise(counter_cid, 'Increment')
     ]
Exemple #7
0
    def on_funding_source_verification_request(event):
        cdata = event.cdata
        LOG.debug(f"FUNDING_SOURCE_VERIFICATION_REQUEST cdata: {cdata}")
        request_body = {
            'amount1': {
                'value': cdata['amount1']['value'],
                'currency': cdata['amount1']['currency']
            },
            'amount2': {
                'value': cdata['amount2']['value'],
                'currency': cdata['amount2']['currency']
            }
        }

        funding_source_url = f"{dwolla_hostname}/funding-sources/{cdata['fundingSourceId']}"
        verify_micro_deposits_resp = app_token.post(
            '%s/micro-deposits' % funding_source_url, request_body)
        LOG.debug(
            f"verify_micro_deposits_resp status: {verify_micro_deposits_resp.status}, "
            f"headers: {verify_micro_deposits_resp.headers}, "
            f"body: {verify_micro_deposits_resp.body}")

        if verify_micro_deposits_resp.status != 200:
            LOG.error(
                f"Could not verify micro deposits. response: {verify_micro_deposits_resp}"
            )
            return exercise(event.cid,
                            const.C_FUNDING_SOURCE_VERIFICATION_REQUEST_REJECT,
                            {})

        return exercise(event.cid,
                        const.C_FUNDING_SOURCE_VERIFICATION_REQUEST_ACCEPT, {})
    def test_select_reflects_archive_events(self):
        notification_count = 3

        # we expect that, upon each on_created notification of an OperatorNotification contract,
        # when we query the ACS, we get precisely the same number of contracts.
        expected_select_count = notification_count * notification_count
        actual_select_count = 0

        def on_notification_contract(event):
            nonlocal actual_select_count
            actual_select_count += len(
                event.acs_find_active(OperatorNotification))

        with sandbox(Simple) as proc:
            network = Network()
            network.set_config(url=proc.url)

            party_client = network.aio_party(PARTY)
            party_client.add_ledger_ready(
                lambda e: create(OperatorRole, {'operator': PARTY}))
            party_client.add_ledger_created(
                OperatorRole,
                lambda e: exercise(e.cid, 'PublishMany', dict(count=3)))
            party_client.add_ledger_created(
                OperatorNotification, lambda e: exercise(e.cid, 'Archive'))
            party_client.add_ledger_created(OperatorNotification,
                                            on_notification_contract)
            network.run_until_complete()

            final_select_count = len(
                party_client.find_active(OperatorNotification))

        self.assertEqual(actual_select_count, expected_select_count)
        self.assertEqual(0, final_select_count)
    def test_select_reflects_archive_events(self):
        notification_count = 3

        # we expect that, upon each on_created notification of an OperatorNotification contract,
        # when we query the ACS, we get precisely the same number of contracts.
        expected_select_count = notification_count * notification_count
        actual_select_count = 0

        def on_notification_contract(_, __):
            nonlocal actual_select_count
            actual_select_count += len(
                party_client.select(OperatorNotification))

        with sandbox(DAML_FILE) as proc:
            with create_client(participant_url=proc.url,
                               parties=[PARTY]) as client:
                party_client = client.client(PARTY)
                party_client.on_ready(lambda *args, **kwargs: create(
                    OperatorRole, {'operator': PARTY}))
                party_client.on_created(
                    OperatorRole, lambda cid, cdata: exercise(
                        cid, 'PublishMany', dict(count=3)))
                party_client.on_created(
                    OperatorNotification,
                    lambda cid, _: exercise(cid, 'Archive'))
                party_client.on_created(OperatorNotification,
                                        on_notification_contract)
                client.run_until_complete()

                final_select_count = len(
                    party_client.select(OperatorNotification))

        self.assertEqual(actual_select_count, expected_select_count)
        self.assertEqual(0, final_select_count)
    async def handle_new_order_success(event):
        logging.info(f"Handling NewOrderSuccess")

        event_sid = event.cdata['sid']
        order = sid_to_order.pop(event_sid)

        sid_is_cleared[event_sid] = order['isCleared']

        query = {'order': order}

        if order['isCleared']:
            logging.info(f"Acknowledging Cleared Order Success")
            req_cid, _ = await client.find_one(MARKETPLACE.ClearedOrderRequest,
                                               query)

            return [
                exercise(req_cid, 'ClearedOrderRequest_Ack',
                         {'orderId': event_sid}),
                exercise(event.cid, 'Archive', {})
            ]
        else:
            logging.info(f"Acknowledging Order Success")
            req_cid, _ = await client.find_one(MARKETPLACE.OrderRequest, query)
            return [
                exercise(req_cid, 'OrderRequest_Ack', {'orderId': event_sid}),
                exercise(event.cid, 'Archive', {})
            ]
Exemple #11
0
    async def handle_create_instrument_request(event):
        LOG.info(f"{EXBERRY.CreateInstrumentRequest} created!")
        instrument = event.cdata

        LOG.info('Creating instrument...')
        data_dict = Endpoints.make_create_instrument_req(instrument)

        # check if calendarId exists
        requested_calendar_id = data_dict['calendarId']
        calendar_resp = await integration.get_admin(
            {}, f"{Endpoints.Calendars}/{requested_calendar_id}")
        if 'code' in calendar_resp and calendar_resp[
                'code'] == 10005:  # calendarId not found error
            LOG.info('calendarId not found, using default...')
            calendars = await integration.get_admin({}, Endpoints.Calendars)
            data_dict['calendarId'] = calendars[0]['id']

        json_resp = await integration.post_admin(data_dict,
                                                 Endpoints.Instruments)
        if 'data' in json_resp:
            LOG.error(f'Error in instrument creation: {json_resp}')
            return exercise(
                event.cid, 'CreateInstrumentRequest_Failure', {
                    'message': json_resp['message'],
                    'name': json_resp['data'],
                    'code': json_resp['code']
                })
        elif 'id' in json_resp:
            await integration.resubscribe()
            return exercise(event.cid, 'CreateInstrumentRequest_Success',
                            {'instrumentId': json_resp['id']})
        else:
            LOG.warning(f"Unknown response ¯\\_(ツ)_/¯ : {json_resp}")
Exemple #12
0
    async def handle_new_order_failure(event):
        order = sid_to_order.pop(event.cdata['sid'])

        req_cid, _ = await client.find_one(MARKETPLACE.OrderRequest, {
            'order': order
        })

        return [exercise(req_cid, 'OrderRequest_Reject', {}),
                exercise(event.cid, 'Archive', {})]
Exemple #13
0
    def on_verified_customer_request(event):
        cdata = event.cdata
        LOG.debug(f"VERIFIED_CUSTOMER_REQUEST cdata: {cdata}")
        request_body = {
            'firstName':
            cdata['firstName'],
            'lastName':
            cdata['lastName'],
            'email':
            cdata['email'],
            'type':
            'personal',
            'address1':
            cdata['address1'],
            'address2':
            cdata['optAddress2'] if cdata['optAddress2'] is not None else '',
            'city':
            cdata['city'],
            'state':
            cdata['state'],
            'postalCode':
            cdata['postalCode'],
            'dateOfBirth':
            datetime.strftime(cdata['dateOfBirth'], "%Y-%m-%d"),
            'ssn':
            cdata['ssn']
        }

        new_customer_resp = app_token.post('customers', request_body)
        LOG.debug(f"new_customer_resp status: {new_customer_resp.status}, "
                  f"headers: {new_customer_resp.headers}, "
                  f"body: {new_customer_resp.body}")

        if new_customer_resp.status >= 400:
            LOG.error(
                f"Could not create Verified Customer. response: {new_customer_resp}"
            )
            return exercise(event.cid,
                            const.C_VERIFIED_CUSTOMER_REQUEST_REJECT, {})

        customer_resp = app_token.get(new_customer_resp.headers['location'])
        LOG.debug(f"customer status: {customer_resp.status}, "
                  f"headers: {customer_resp.headers}, "
                  f"body: {customer_resp.body}")

        customer = customer_resp.body

        return exercise(
            event.cid, const.C_VERIFIED_CUSTOMER_REQUEST_ACCEPT, {
                'customerId':
                customer['id'],
                'created':
                datetime.strptime(customer['created'],
                                  "%Y-%m-%dT%H:%M:%S.%fZ"),
                'status':
                customer['status']
            })
def _create_complicated_notifications(e) -> list:
    return [
        exercise(e.cid, 'PublishFormula', dict(formula={'Tautology': {}})),
        exercise(e.cid, 'PublishFormula', dict(formula={'Contradiction': {}})),
        exercise(e.cid, 'PublishFormula',
                 dict(formula={'Proposition': 'something'})),
        exercise(
            e.cid, 'PublishFormula',
            dict(formula={'Conjunction': [{
                'Proposition': 'something_else'
            }]})),
    ]
Exemple #15
0
def _create_complicated_notifications(e) -> list:
    return [
        exercise(e.cid, "PublishFormula", dict(formula={"Tautology": {}})),
        exercise(e.cid, "PublishFormula", dict(formula={"Contradiction": {}})),
        exercise(e.cid, "PublishFormula",
                 dict(formula={"Proposition": "something"})),
        exercise(
            e.cid,
            "PublishFormula",
            dict(formula={"Conjunction": [{
                "Proposition": "something_else"
            }]}),
        ),
    ]
 def __(event):
     return [
         exercise(event.cid, 'InviteParticipant', {
             'party': party,
             'address': 'whatevs'
         }) for party in ('Party A', 'Party B', 'Party C')
     ]
    def test_threadsafe_methods(self):
        with sandbox(DAML_FILE) as proc:
            with simple_client(proc.url, PARTY) as client:
                client.ready()
                client.submit_create(OperatorRole, {'operator': PARTY})

                operator_cid, _ = client.find_one(OperatorRole)

                client.submit_exercise(operator_cid, 'PublishMany',
                                       dict(count=5))

                notifications = client.find_nonempty(OperatorNotification,
                                                     {'operator': PARTY},
                                                     min_count=5)
                contracts_to_delete = []
                for cid, cdata in notifications.items():
                    if int(cdata['text']) <= 3:
                        contracts_to_delete.append(cid)

                client.submit(
                    [exercise(cid, 'Archive') for cid in contracts_to_delete])

                client.submit_exercise(operator_cid, 'PublishMany',
                                       dict(count=3))

                print(client.find_active('*'))
def test_threadsafe_methods(sandbox):
    party = blocking_setup(sandbox, Simple)

    with simple_client(url=sandbox, party=party) as client:
        client.ready()
        client.submit_create(OperatorRole, {"operator": party})

        operator_cid, _ = client.find_one(OperatorRole)

        client.submit_exercise(operator_cid, "PublishMany", dict(count=5))

        notifications = client.find_nonempty(OperatorNotification,
                                             {"operator": party},
                                             min_count=5)
        contracts_to_delete = []
        for cid, cdata in notifications.items():
            if int(cdata["text"]) <= 3:
                contracts_to_delete.append(cid)

        client.submit(
            [exercise(cid, "Archive") for cid in contracts_to_delete])

        client.submit_exercise(operator_cid, "PublishMany", dict(count=3))

        print(client.find_active("*"))
Exemple #19
0
async def async_test_case(client: AIOPartyClient):
    await client.ready()

    ensure_future(
        client.submit_create(OperatorRole, {'operator': client.party}))

    operator_cid, _ = await client.find_one(OperatorRole)

    ensure_future(
        client.submit_exercise(operator_cid, 'PublishMany', dict(count=5)))

    # this should actually be a no-op; we're just making sure that calls to ready() that are
    # "too late" are not treated strangely
    await wait_for(client.ready(), timeout=0.1)

    notifications = await client.find_nonempty(OperatorNotification,
                                               {'operator': client.party},
                                               min_count=5)
    contracts_to_delete = []
    for cid, cdata in notifications.items():
        if int(cdata['text']) <= 3:
            contracts_to_delete.append(cid)

    ensure_future(
        client.submit(
            [exercise(cid, 'Archive') for cid in contracts_to_delete]))

    ensure_future(
        client.submit_exercise(operator_cid, 'PublishMany', dict(count=3)))
    async def main(self):
        # begin both clients
        m1 = ensure_future(self.manager1.aio_run())
        m2 = ensure_future(self.manager2.aio_run())

        # wait for the first manager to shut itself down; this will have happened in
        # the _handle_postman_role_1 callback
        await gather(m1, self.manager2.aio_party(PARTY).ready())

        # now ensure that the second client's time is in sync with the Sandbox
        await self.manager2.aio_global().get_time()
        # TODO: Come up with a better signal to be ABSOLUTELY sure that the second client is
        #  "caught up" with the current time
        from asyncio import sleep
        await sleep(1.0)

        # this call can only succeed if the second client knows of the time as it was set by the
        # first party
        await self.manager2.aio_party(PARTY).submit(
            exercise(self.postman_cid, 'InviteParticipant',
                     dict(party=PARTY, address='something')))

        # shut down the second client
        self.manager2.shutdown()

        # wait for it to stop cleanly
        await m2
Exemple #21
0
 def operator_role_created(event):
     return [
         exercise(event.cid, 'InviteParticipant', {
             'party': party,
             'address': 'whatevs'
         }) for party in (party_a_client.party, party_b_client.party,
                          party_c_party)
     ]
def run_test(url, keepalive=False):
    start_time = datetime.now()

    members = [dict(party=f'Member {i}', address=address(i)) for i in range(0, MEMBER_PARTY_COUNT)]

    network = Network()
    network.set_config(url=url, server_port=8105)

    postman_client = network.aio_party(POSTMAN_PARTY)
    member_clients = [network.aio_party(m['party']) for m in members]

    post_office = PostOffice()

    postman_client.add_ledger_ready(lambda event: create('Main.PostmanRole', dict(postman=POSTMAN_PARTY)))
    postman_client.add_ledger_created('Main.PostmanRole', lambda event: [exercise(event.cid, 'InviteParticipant', m) for m in members])
    postman_client.add_ledger_created('Main.UnsortedLetter', post_office.sort_and_deliver_letter)
    postman_client.add_ledger_created('Main.ReceiverRole', post_office.register_address)
    postman_client.add_ledger_created('Main.SortedLetter', lambda event: exercise(event.cid, 'Deliver'))

    @postman_client.ledger_exercised('Main.PostmanRole', 'InviteParticipant')
    def log(event):
        LOG.info('Observing the exercise of an InviteParticipant: %s', event)

    for member_client in member_clients:
        bot = PartyBot(member_client.party)
        # every member automatically accepts
        member_client.add_ledger_created('Main.InviteAuthorRole', lambda event: exercise(event.cid, 'AcceptInviteAuthorRole'))
        member_client.add_ledger_created('Main.InviteReceiverRole', lambda event: exercise(event.cid, 'AcceptInviteReceiverRole'))
        # every member, upon joining, sends messages to five of their best friends
        member_client.add_ledger_created('Main.AuthorRole', bot.send_to_five_friends)

    try:
        if not keepalive:
            network.run_until_complete()
        else:
            network.run_forever()
        ledger_run_failed = False
    except:
        LOG.exception('The run failed!')
        ledger_run_failed = True

    # Display everything, including archived contracts if we terminated for an unexpected reason.
    write_acs(sys.stdout, network, include_archived=ledger_run_failed)

    LOG.info('%s letters were received before the recipients were known.', post_office._unsorted_letter_count)
    LOG.info('Test finished in %s.', datetime.now() - start_time)
Exemple #23
0
 async def handle_cancel_order_success(event):
     return [
         exercise_by_key(MARKETPLACE.CancelOrderRequest, {
             '_1': client.party,
             '_2': event.cdata['sid']
         }, 'AcknowledgeCancel', {}),
         exercise(event.cid, 'Archive', {})
     ]
Exemple #24
0
    def on_unverified_funding_source_request(event):
        cdata = event.cdata
        LOG.debug(f"UNVERIFIED_FUNDING_SOURCE_REQUEST cdata: {cdata}")
        request_body = {
            'routingNumber': cdata['routingNumber'],
            'accountNumber': cdata['accountNumber'],
            'bankAccountType': cdata['bankAccountType'],
            'name': cdata['name']
        }

        customer_url = f"{dwolla_hostname}/customers/{cdata['customerId']}"
        new_funding_source_resp = app_token.post(
            f"{customer_url}/funding-sources", request_body)
        LOG.debug(
            f"new_funding_source_resp status: {new_funding_source_resp.status}, "
            f"headers: {new_funding_source_resp.headers}, "
            f"body: {new_funding_source_resp.body}")

        if new_funding_source_resp.status >= 400:
            LOG.error(
                f"Could not create Unverified Funding Source. response: {new_funding_source_resp}"
            )
            return exercise(event.cid,
                            const.C_UNVERIFIED_FUNDING_SOURCE_REQUEST_REJECT,
                            {})

        funding_source_resp = app_token.get(
            new_funding_source_resp.headers['location'])
        LOG.debug(f"funding_source_resp status: {funding_source_resp.status}, "
                  f"headers: {funding_source_resp.headers}, "
                  f"body: {funding_source_resp.body}")

        funding_source = funding_source_resp.body

        return exercise(
            event.cid, const.C_UNVERIFIED_FUNDING_SOURCE_REQUEST_ACCEPT, {
                'fundingSourceId':
                funding_source['id'],
                'created':
                datetime.strptime(funding_source['created'],
                                  "%Y-%m-%dT%H:%M:%S.%fZ"),
                'channels':
                funding_source['channels'],
                'bankName':
                funding_source['bankName']
            })
Exemple #25
0
async def transition_to_ticket_transactions_in_progress(cid, cdata):
    await contract_store.find('ticketSellerRole1')
    await contract_store.find('ticketBuyerRole1')
    workflow_ticket_transactions_in_progress = [
        exercise(cid, 'TicketTransactionsInProgress', {})
    ]
    contract_store.save('workflowTicketTransactionsInProgress', cid, cdata)
    return workflow_ticket_transactions_in_progress
Exemple #26
0
 def say_hello(event):
     logging.info("DA Marketplace <> Exberry adapter is ready!")
     sids = client.find_active(MARKETPLACE.ExberrySID)
     global SID
     for (_,item) in sids.items():
         SID = item['sid']
         logging.info(f'Changed current SID to {SID}')
     return [exercise(cid, 'ExberrySID_Ack') for cid in sids.keys()]
Exemple #27
0
    def invite_users(event):  # pylint: disable=unused-variable
        logging.info(f'On PollApp created!')
        user_sessions = client.find_active("Main.UserSession")
        logging.info(f'found {len(user_sessions)} UserSession contracts')

        return [
            exercise(cid, 'UserSessionAck') for cid in user_sessions.keys()
        ]
 async def _handle_postman_role(event):
     try:
         LOG.info('Received the postman role contract.')
         await network.aio_global().set_time(datetime(1980, 1, 1))
         return exercise(event.cid, 'InviteParticipant',
                         dict(party=PARTY, address='something'))
     except BaseException as ex:
         LOG.exception(ex)
 def operator_role_created(event):
     return [
         exercise(event.cid, "InviteParticipant", {
             "party": party,
             "address": "whatevs"
         }) for party in (party_a_client.party, party_b_client.party,
                          party_c_party)
     ]
Exemple #30
0
    def invite_users(event):  # pylint: disable=unused-variable
        logging.info(f'On {CHAT.Operator} created!')
        user_sessions = client.find_active(CHAT.UserSession)
        logging.info(
            f'found {len(user_sessions)} {CHAT.UserSession} contracts')

        return [
            exercise(cid, 'UserSessionAck') for cid in user_sessions.keys()
        ]