def test_parameter_error(self):
        # Parameter error
        args = self.parser.parse_args([])

        f = StringIO()
        with capture_logs():
            with redirect_stdout(f):
                execute(args)

        # Transforming prints str in array
        output = f.getvalue().strip().splitlines()

        self.assertEqual(
            'The command expects raw_tx or hash and url as parameters',
            output[0])
    def test_struct_error(self):
        # Struct error
        tx_hex = self.tx.get_struct().hex()
        params = ['--raw_tx', tx_hex + 'aa']
        args = self.parser.parse_args(params)

        f = StringIO()
        with capture_logs():
            with redirect_stdout(f):
                execute(args)

        # Transforming prints str in array
        output = f.getvalue().strip().splitlines()

        self.assertEqual('Error getting transaction from bytes', output[0])
Exemple #3
0
def test_post_pubsub_token_unknown_key(dbsession, anon_client,
                                       test_pubsub_settings):
    """An error is returned if the jwt 'kid' doesn't refer to a known cert."""
    with patch("ctms.app.get_claim_from_pubsub_token") as mock_get:
        mock_get.side_effect = ValueError(
            "Certificate for key id the_kid_value not found.")
        with capture_logs() as caplog:
            resp = anon_client.post(
                "/stripe_from_pubsub?pubsub_client=a_shared_secret",
                headers={"Authorization": "Bearer a_fake_token"},
                json=pubsub_wrap(stripe_customer_data()),
            )
    assert resp.status_code == 401
    assert resp.json() == {"detail": "Could not validate credentials"}
    assert len(caplog) == 1
    assert caplog[0]["auth_fail"] == "Unknown key"
Exemple #4
0
def test_put_create_with_trace(client, dbsession):
    """PUT traces new contacts by email address"""
    email_id = str(uuid4())
    data = {
        "email": {
            "email_id": email_id,
            "primary_email": "*****@*****.**",
        }
    }
    with capture_logs() as caplogs:
        resp = client.put(f"/ctms/{email_id}", json=data)
    assert resp.status_code == 201
    assert len(caplogs) == 1
    assert caplogs[0][
        "trace"] == "*****@*****.**"
    assert caplogs[0]["trace_json"] == data
Exemple #5
0
 def up_with_logs(self) -> list[str]:
     try:
         all_logs = []
         for reconcile_key in self.reconcile_order:
             if reconcile_key in self.outpost.config.kubernetes_disabled_components:
                 all_logs += [f"{reconcile_key.title()}: Disabled"]
                 continue
             with capture_logs() as logs:
                 reconciler = self.reconcilers[reconcile_key](self)
                 reconciler.up()
             all_logs += [
                 f"{reconcile_key.title()}: {x['event']}" for x in logs
             ]
         return all_logs
     except (OpenApiException, HTTPError, ServiceConnectionInvalid) as exc:
         raise ControllerException(str(exc)) from exc
Exemple #6
0
def test_post_pubsub_token_fail_ssl_fetch(dbsession, anon_client,
                                          test_pubsub_settings):
    """An error is returned if there is an issue fetching certs."""
    with patch("ctms.app.get_claim_from_pubsub_token") as mock_get:
        mock_get.side_effect = TransportError(
            "Could not fetch certificates at https://example.com")
        with capture_logs() as caplog:
            resp = anon_client.post(
                "/stripe_from_pubsub?pubsub_client=a_shared_secret",
                headers={"Authorization": "Bearer a_fake_token"},
                json=pubsub_wrap(stripe_customer_data()),
            )
    assert resp.status_code == 401
    assert resp.json() == {"detail": "Could not validate credentials"}
    assert len(caplog) == 1
    assert caplog[0]["auth_fail"] == "Google authentication failure"
def test_configuration_without_consumers(
    db: MockFixture,
    setup: tuple[User, User, list[ExternalMessage]],
) -> None:
    user1, user2, messages_should_be_consumed = setup
    configuration = ConfigurationFactory(
        consumers=(ConsumerModelFactory(), ),
        users=(user1, user2),
    )
    with capture_logs() as cap_logs:
        configuration.run()
        assert {
            'configuration_id': configuration.id,
            'event': 'No producers or consumers, skipping...',
            'log_level': 'info',
        } in cap_logs
Exemple #8
0
def test_post_pubsub_token(dbsession, anon_client, test_pubsub_settings):
    """A PubSub client can authenticate."""

    with patch("ctms.app.get_claim_from_pubsub_token") as mock_get:
        mock_get.return_value = SAMPLE_GCP_JWT_CLAIM
        with capture_logs() as caplog:
            resp = anon_client.post(
                "/stripe_from_pubsub?pubsub_client=a_shared_secret",
                headers={"Authorization": "Bearer gcp_generated_token"},
                json=pubsub_wrap(stripe_customer_data()),
            )
    assert resp.status_code == 200, resp.json()
    assert len(caplog) == 1
    log = caplog[0]
    assert "auth_fail" not in log
    assert log["client_allowed"]
    assert log["pubsub_email"] == "*****@*****.**"
Exemple #9
0
def test_get_ctms_with_expired_token_fails(example_contact, anon_client,
                                           test_token_settings,
                                           client_id_and_secret):
    """Calling an authenticated API with an expired token is an error"""
    yesterday = datetime.now(timezone.utc) - timedelta(days=1)
    client_id = client_id_and_secret[0]
    token = create_access_token({"sub": f"api_client:{client_id}"},
                                **test_token_settings,
                                now=yesterday)
    with capture_logs() as caplog:
        resp = anon_client.get(
            f"/ctms/{example_contact.email.email_id}",
            headers={"Authorization": f"Bearer {token}"},
        )
    assert resp.status_code == 401
    assert resp.json() == {"detail": "Could not validate credentials"}
    assert caplog[0]["auth_fail"] == "No or bad token"
def test_fill_fields_and_timeseries_from_column():
    # Timeseries in existing_df and new_df are merged together.
    existing_df = pd.read_csv(
        StringIO(
            "fips,state,aggregate_level,county,cnt,date,foo\n"
            "55005,ZZ,county,North County,1,2020-05-01,ab\n"
            "55005,ZZ,county,North County,2,2020-05-02,cd\n"
            "55005,ZZ,county,North County,,2020-05-03,ef\n"
            "55006,ZZ,county,South County,4,2020-05-04,gh\n"
            "55,ZZ,state,Grand State,41,2020-05-01,ij\n"
            "55,ZZ,state,Grand State,43,2020-05-03,kl\n"
        )
    )
    new_df = pd.read_csv(
        StringIO(
            "fips,state,aggregate_level,county,cnt,date\n"
            "55006,ZZ,county,South County,44,2020-05-04\n"
            "55007,ZZ,county,West County,28,2020-05-03\n"
            "55005,ZZ,county,North County,3,2020-05-03\n"
            "55,ZZ,state,Grand State,42,2020-05-02\n"
        )
    )

    with testing.capture_logs() as logs:
        log = get_logger()
        result = fill_fields_and_timeseries_from_column(
            log, existing_df, new_df, "fips state aggregate_level county".split(), "date", "cnt",
        )
    expected = pd.read_csv(
        StringIO(
            "fips,state,aggregate_level,county,cnt,date,foo\n"
            "55005,ZZ,county,North County,,2020-05-01,ab\n"
            "55005,ZZ,county,North County,,2020-05-02,cd\n"
            "55005,ZZ,county,North County,3,2020-05-03,ef\n"
            "55006,ZZ,county,South County,44,2020-05-04,gh\n"
            "55007,ZZ,county,West County,28,2020-05-03,\n"
            "55,ZZ,state,Grand State,,2020-05-01,ij\n"
            "55,ZZ,state,Grand State,42,2020-05-02,\n"
            "55,ZZ,state,Grand State,,2020-05-03,kl\n"
        )
    )
    assert to_dict(["fips", "date"], result) == to_dict(["fips", "date"], expected)
    assert one(logs)["event"] == "Duplicate timeseries data"
    assert one(logs)["log_level"] == "error"
    assert "55006" in repr(one(logs)["common_labels"])
    assert "55007" not in repr(one(logs)["common_labels"])
Exemple #11
0
def test_get_ctms_with_invalid_token_fails(example_contact, anon_client,
                                           test_token_settings,
                                           client_id_and_secret):
    """Calling an authenticated API with an invalid token is an error"""
    client_id = client_id_and_secret[0]
    token = create_access_token(
        {"sub": f"api_client:{client_id}"},
        secret_key="secret_key_from_other_deploy",
        expires_delta=test_token_settings["expires_delta"],
    )
    with capture_logs() as caplog:
        resp = anon_client.get(
            f"/ctms/{example_contact.email.email_id}",
            headers={"Authorization": f"Bearer {token}"},
        )
    assert resp.status_code == 401
    assert resp.json() == {"detail": "Could not validate credentials"}
    assert caplog[0]["auth_fail"] == "No or bad token"
Exemple #12
0
def test_token_request_log(anon_client, client_id_and_secret):
    """A token request log has omitted headers."""
    client_id, client_secret = client_id_and_secret
    with capture_logs() as cap_logs:
        resp = anon_client.post(
            "/token",
            {"grant_type": "client_credentials"},
            auth=HTTPBasicAuth(client_id, client_secret),
            cookies={"csrftoken": "0WzT-base64-string"},
        )
    assert resp.status_code == 200
    assert len(cap_logs) == 1
    log = cap_logs[0]
    assert log["client_id"] == client_id
    assert log["token_creds_from"] == "header"
    assert log["headers"]["authorization"] == "[OMITTED]"
    assert log["headers"]["content-length"] == "29"
    assert log["headers"]["cookie"] == "[OMITTED]"
Exemple #13
0
def test_api_post_conflicting_fxa_id(dbsession, client,
                                     contact_with_stripe_customer):
    """An existing customer with an FxA ID conflict is deleted."""
    data = stripe_customer_data()
    old_id = data["id"]
    new_id = old_id + "_new"
    data["id"] = new_id
    with capture_logs() as caplog:
        resp = client.post("/stripe", json=data)
    assert resp.status_code == 200
    assert resp.json() == {"status": "OK"}
    assert len(caplog) == 1
    log = caplog[0]
    assert log["ingest_actions"] == {
        "created": [f"customer:{new_id}"],
        "deleted": [f"customer:{old_id}"],
    }
    assert log["fxa_id_conflict"] == data["description"]
def test_fill_fields_with_data_source_timeseries():
    # Timeseries in existing_df and new_df are merged together.
    existing_df = pd.read_csv(
        StringIO(
            "fips,state,aggregate_level,county,cnt,date,foo\n"
            "55005,ZZ,county,North County,1,2020-05-01,ab\n"
            "55005,ZZ,county,North County,2,2020-05-02,cd\n"
            "55005,ZZ,county,North County,,2020-05-03,ef\n"
            "55006,ZZ,county,South County,4,2020-05-04,gh\n"
            "55,ZZ,state,Grand State,41,2020-05-01,ij\n"
            "55,ZZ,state,Grand State,43,2020-05-03,kl\n"
        )
    )
    new_df = pd.read_csv(
        StringIO(
            "fips,state,aggregate_level,county,cnt,date\n"
            "55006,ZZ,county,South County,44,2020-05-04\n"
            "55007,ZZ,county,West County,28,2020-05-03\n"
            "55005,ZZ,county,North County,3,2020-05-03\n"
            "55,ZZ,state,Grand State,42,2020-05-02\n"
        )
    )

    with testing.capture_logs() as logs:
        log = get_logger()
        result = fill_fields_with_data_source(
            log, existing_df, new_df, "fips state aggregate_level county date".split(), ["cnt"],
        )
    expected = pd.read_csv(
        StringIO(
            "fips,state,aggregate_level,county,cnt,date,foo\n"
            "55005,ZZ,county,North County,1,2020-05-01,ab\n"
            "55005,ZZ,county,North County,2,2020-05-02,cd\n"
            "55005,ZZ,county,North County,3,2020-05-03,ef\n"
            "55006,ZZ,county,South County,44,2020-05-04,gh\n"
            "55007,ZZ,county,West County,28,2020-05-03,\n"
            "55,ZZ,state,Grand State,41,2020-05-01,ij\n"
            "55,ZZ,state,Grand State,42,2020-05-02,\n"
            "55,ZZ,state,Grand State,43,2020-05-03,kl\n"
        )
    )

    assert to_dict(["fips", "date"], result) == to_dict(["fips", "date"], expected)
    assert logs == []
Exemple #15
0
def test_get_ctms_with_tracing(client, dbsession):
    """The log parameter trace is set when a traced email is requested."""
    email_id = uuid4()
    email = "*****@*****.**"
    record = Email(
        email_id=email_id,
        primary_email=email,
        double_opt_in=False,
        email_format="T",
        has_opted_out_of_email=False,
    )
    dbsession.add(record)
    dbsession.commit()
    with capture_logs() as caplog:
        resp = client.get(f"/ctms/{email_id}")
    assert resp.status_code == 200
    assert len(caplog) == 1
    assert caplog[0]["trace"] == email
    assert "trace_json" not in caplog[0]
Exemple #16
0
def test_api_post_pubsub_deadlock_is_409(dbsession, pubsub_client):
    """A deadlock is turned into a 409 Conflict"""
    data = stripe_customer_data()
    err = OperationalError("INSERT INTO...", {"stripe_id": data["id"]},
                           "Deadlock")
    with capture_logs() as caplog, mock.patch.object(dbsession,
                                                     "commit",
                                                     side_effect=err):
        resp = pubsub_client.post("/stripe_from_pubsub",
                                  json=pubsub_wrap(data))
    assert resp.status_code == 409
    assert resp.json() == {"detail": "Deadlock or other issue, try again"}
    assert len(caplog) == 2
    assert caplog[0] == {
        "exc_info": True,
        "event": "OperationalError converted to 409",
        "log_level": "error",
    }
    assert caplog[1]["status_code"] == 409
Exemple #17
0
def test_log_omits_emails(client, maximal_contact):
    """The logger omits emails from query params."""
    email_id = str(maximal_contact.email.email_id)
    email = maximal_contact.email.primary_email
    fxa_email = maximal_contact.fxa.primary_email
    url = (
        f"/ctms?primary_email={email}&fxa_primary_email={fxa_email}"
        f"&email_id={email_id}"
    )
    with capture_logs() as cap_logs:
        resp = client.get(url)
    assert resp.status_code == 200
    assert len(cap_logs) == 1
    log = cap_logs[0]
    assert log["query"] == {
        "email_id": email_id,
        "fxa_primary_email": "[OMITTED]",
        "primary_email": "[OMITTED]",
    }
Exemple #18
0
def test_get_ctms_with_disabled_client_fails(dbsession, example_contact,
                                             anon_client, test_token_settings,
                                             client_id_and_secret):
    """Calling an authenticated API with a valid token for an expired client is an error."""
    client_id = client_id_and_secret[0]
    token = create_access_token({"sub": f"api_client:{client_id}"},
                                **test_token_settings)
    api_client = get_api_client_by_id(dbsession, client_id)
    api_client.enabled = False
    dbsession.commit()

    with capture_logs() as caplog:
        resp = anon_client.get(
            f"/ctms/{example_contact.email.email_id}",
            headers={"Authorization": f"Bearer {token}"},
        )
    assert resp.status_code == 400
    assert resp.json() == {"detail": "API Client has been disabled"}
    assert caplog[0]["auth_fail"] == "Client disabled"
Exemple #19
0
    def test_twin_different(self):
        server = run_server()

        # Unlock wallet to start mining
        request_server('wallet/unlock', 'POST', data={'passphrase': '123'})

        # Mining
        execute_mining(count=2)

        # Generating txs
        execute_tx_gen(count=4)

        response = request_server('transaction', 'GET', data={b'count': 4, b'type': 'tx'})
        tx = response['transactions'][-1]

        response = request_server('transaction', 'GET', data={b'id': tx['tx_id']})
        tx = response['tx']

        # Twin different weight and parents
        host = 'http://localhost:8085/{}/'.format(settings.API_VERSION_PREFIX)
        params = ['--url', host, '--hash', tx['hash'], '--parents', '--weight', '14']
        args = self.parser.parse_args(params)

        f = StringIO()
        with capture_logs():
            with redirect_stdout(f):
                execute(args)

        # Transforming prints str in array
        output = f.getvalue().strip().splitlines()

        twin_tx = Transaction.create_from_struct(bytes.fromhex(output[0]))
        # Parents are differents
        self.assertNotEqual(twin_tx.parents[0], tx['parents'][0])
        self.assertNotEqual(twin_tx.parents[0], tx['parents'][1])
        self.assertNotEqual(twin_tx.parents[1], tx['parents'][0])
        self.assertNotEqual(twin_tx.parents[1], tx['parents'][1])

        self.assertNotEqual(twin_tx.weight, tx['weight'])
        self.assertEqual(twin_tx.weight, 14.0)

        server.terminate()
Exemple #20
0
def test_post_pubsub_token_fail_unverified_email(dbsession, anon_client,
                                                 test_pubsub_settings):
    """An error is returned if the claim email isn't verified."""
    claim = SAMPLE_GCP_JWT_CLAIM.copy()
    del claim["email_verified"]
    with patch("ctms.app.get_claim_from_pubsub_token") as mock_get:
        mock_get.return_value = claim
        with capture_logs() as caplog:
            resp = anon_client.post(
                "/stripe_from_pubsub?pubsub_client=a_shared_secret",
                headers={"Authorization": "Bearer a_fake_token"},
                json=pubsub_wrap(stripe_customer_data()),
            )
    assert resp.status_code == 401
    assert resp.json() == {"detail": "Could not validate credentials"}
    assert len(caplog) == 1
    log = caplog[0]
    assert log["auth_fail"] == "Email not verified"
    assert log["pubsub_email"] == "*****@*****.**"
    assert "pubsub_email_verified" not in log
Exemple #21
0
def test_get_multiple_with_tracing(client, dbsession):
    """Multiple traced emails are comma-joined."""
    email_id1 = uuid4()
    email1 = "*****@*****.**"
    dbsession.add(
        Email(
            email_id=email_id1,
            primary_email=email1,
            double_opt_in=False,
            email_format="T",
            has_opted_out_of_email=False,
        ))
    dbsession.add(
        AmoAccount(email_id=email_id1,
                   user_id="amo123",
                   email_opt_in=False,
                   user=True))
    email_id2 = uuid4()
    email2 = "*****@*****.**"
    dbsession.add(
        Email(
            email_id=email_id2,
            primary_email=email2,
            double_opt_in=False,
            email_format="T",
            has_opted_out_of_email=False,
        ))
    dbsession.add(
        AmoAccount(email_id=email_id2,
                   user_id="amo123",
                   email_opt_in=True,
                   user=True))
    dbsession.commit()
    with capture_logs() as caplog:
        resp = client.get("/ctms", params={"amo_user_id": "amo123"})
    assert resp.status_code == 200
    data = resp.json()
    assert len(data) == 2
    assert len(caplog) == 1
    assert caplog[0]["trace"] == f"{email1},{email2}"
    assert "trace_json" not in caplog[0]
Exemple #22
0
def test_ctms_to_acoustic_traced_email(
    base_ctms_acoustic_service,
    example_contact,
):
    """A contact requesting tracing is traced in the logs."""
    email = "*****@*****.**"
    example_contact.email.primary_email = email
    acoustic_mock: MagicMock = MagicMock()
    base_ctms_acoustic_service.acoustic = acoustic_mock
    _main, _newsletter, _product = base_ctms_acoustic_service.convert_ctms_to_acoustic(
        example_contact
    )  # To be used as in testing, for expected inputs to downstream methods
    assert _main is not None
    assert _newsletter is not None
    assert len(_product) == 0
    with capture_logs() as caplog:
        results = base_ctms_acoustic_service.attempt_to_upload_ctms_contact(
            example_contact)
    assert results  # success

    acoustic_mock.add_recipient.assert_called_once_with(
        list_id=CTMS_ACOUSTIC_MAIN_TABLE_ID,
        created_from=3,
        update_if_found="TRUE",
        allow_html=False,
        sync_fields={"email_id": _main["email_id"]},
        columns=_main,
    )
    acoustic_mock.insert_update_relational_table.assert_not_called()
    acoustic_mock.insert_update_product_table.assert_not_called()

    assert len(caplog) == 1
    expected_log = EXPECTED_LOG.copy()
    expected_log.update({
        "email_id":
        "332de237-cab7-4461-bcc3-48e68f42bd5c",
        "newsletters_skipped": ["firefox-welcome", "mozilla-welcome"],
        "trace":
        email,
    })
    assert caplog[0] == expected_log
Exemple #23
0
def test_ctms_to_acoustic_mocked(
    base_ctms_acoustic_service,
    maximal_contact,
):
    acoustic_mock: MagicMock = MagicMock()
    base_ctms_acoustic_service.acoustic = acoustic_mock
    _main, _newsletter, _product = base_ctms_acoustic_service.convert_ctms_to_acoustic(
        maximal_contact
    )  # To be used as in testing, for expected inputs to downstream methods
    assert _main is not None
    assert _newsletter is not None
    assert len(_product) == 0
    with capture_logs() as caplog:
        results = base_ctms_acoustic_service.attempt_to_upload_ctms_contact(
            maximal_contact)
    assert results  # success
    acoustic_mock.add_recipient.assert_called()
    acoustic_mock.insert_update_relational_table.assert_called()

    acoustic_mock.add_recipient.assert_called_with(
        list_id=CTMS_ACOUSTIC_MAIN_TABLE_ID,
        created_from=3,
        update_if_found="TRUE",
        allow_html=False,
        sync_fields={"email_id": _main["email_id"]},
        columns=_main,
    )

    acoustic_mock.insert_update_relational_table.assert_called_with(
        table_id=CTMS_ACOUSTIC_NEWSLETTER_TABLE_ID, rows=_newsletter)

    acoustic_mock.insert_update_product_table.assert_not_called()

    assert len(caplog) == 1
    expected_log = EXPECTED_LOG.copy()
    expected_log.update({
        "email_id": "67e52c77-950f-4f28-accb-bb3ea1a2c51a",
        "newsletter_count": 5,
        "newsletters_skipped": ["ambassadors", "firefox-os"],
    })
    assert caplog[0] == expected_log
Exemple #24
0
def test_faculty_registration_email(client, requests_mock):
    email = "*****@*****.**"

    requests_mock.post("https://api.sparkpost.com/api/v1/transmissions", {},
                       reason="OK")

    with capture_logs() as cap_logs:
        response = client.post("/api/send-faculty-verification-email",
                               json={"email": email})
        assert len(cap_logs) >= 1

        # Should log token id
        assert any("token_id" in log for log in cap_logs)

    assert response.json["email"] == email

    verification_email_id = response.json["id"]

    verification_email = VerificationEmail.query.get(verification_email_id)

    assert verification_email.email == email
def test_main_old_update(tmp_path):
    """healthcheck_sync fails on old update."""
    health_path = tmp_path / "healthcheck"
    old_date = datetime.now(tz=timezone.utc) - timedelta(seconds=120)
    old_date_iso = old_date.isoformat()
    with open(health_path, "w", encoding="utf8") as health_file:
        health_file.write(old_date_iso)
    settings = Settings(
        background_healthcheck_path=str(health_file),
        background_healthcheck_age_s=30,
    )
    with capture_logs() as caplogs:
        exit_code = main(settings)
    assert exit_code == 1
    assert len(caplogs) == 1
    log = caplogs[0]
    assert log == {
        "event": "Healthcheck failed",
        "exc_info": True,
        "log_level": "error",
    }
def test_fill_fields_with_data_source_empty_input():
    existing_df = pd.DataFrame()
    new_df = pd.read_csv(
        StringIO("fips,state,aggregate_level,county,current_icu\n" "55,ZZ,state,Grand State,64\n")
    )
    with testing.capture_logs() as logs:
        result = fill_fields_with_data_source(
            get_logger(),
            existing_df,
            new_df,
            "fips state aggregate_level county".split(),
            ["current_icu"],
        )

    expected = pd.read_csv(
        StringIO(
            "fips,state,aggregate_level,county,current_icu,preserved\n"
            "55,ZZ,state,Grand State,64,\n"
        )
    )
    assert to_dict(["fips"], result) == to_dict(["fips"], expected)
    assert logs == []
Exemple #27
0
 def test_captures_logs(self):
     """
     Log entries are captured and retain their structure.
     """
     with testing.capture_logs() as logs:
         get_logger().bind(x="y").info("hello", answer=42)
         get_logger().bind(a="b").info("goodbye", foo={"bar": "baz"})
     assert [
         {
             "event": "hello",
             "log_level": "info",
             "x": "y",
             "answer": 42
         },
         {
             "a": "b",
             "event": "goodbye",
             "log_level": "info",
             "foo": {
                 "bar": "baz"
             },
         },
     ] == logs
Exemple #28
0
    def test_twin_human(self):
        # Twin in human form
        params = ['--raw_tx', self.tx.get_struct().hex(), '--human']
        args = self.parser.parse_args(params)

        f = StringIO()
        with capture_logs():
            with redirect_stdout(f):
                execute(args)

        # Transforming prints str in array
        output = f.getvalue().strip().splitlines()

        human = output[0].replace("'", '"')
        tx_data = json.loads(human)

        self.assertTrue(isinstance(tx_data, dict))
        self.assertTrue('hash' in tx_data)
        self.assertTrue('timestamp' in tx_data)

        self.assertEqual(tx_data['parents'][0], self.tx.parents[1].hex())
        self.assertEqual(tx_data['parents'][1], self.tx.parents[0].hex())
        self.assertEqual(tx_data['weight'], self.tx.weight)
    def test_twin(self):
        # Normal twin
        params = ['--raw_tx', self.tx.get_struct().hex()]
        args = self.parser.parse_args(params)

        f = StringIO()
        with capture_logs():
            with redirect_stdout(f):
                execute(args)

        # Transforming prints str in array
        output = f.getvalue().strip().splitlines()

        twin_tx = Transaction.create_from_struct(bytes.fromhex(output[0]))
        # Parents are the same but in different order
        self.assertEqual(twin_tx.parents[0], self.tx.parents[1])
        self.assertEqual(twin_tx.parents[1], self.tx.parents[0])

        # Testing metadata creation from json
        meta_before_conflict = self.tx.get_metadata()
        meta_before_conflict_json = meta_before_conflict.to_json()
        del meta_before_conflict_json['conflict_with']
        del meta_before_conflict_json['voided_by']
        del meta_before_conflict_json['twins']
        new_meta = TransactionMetadata.create_from_json(
            meta_before_conflict_json)
        self.assertEqual(meta_before_conflict, new_meta)

        self.manager.propagate_tx(twin_tx)

        # Validate they are twins
        meta = self.tx.get_metadata(force_reload=True)
        self.assertEqual(meta.twins, [twin_tx.hash])

        meta2 = twin_tx.get_metadata()
        self.assertFalse(meta == meta2)
Exemple #30
0
    def test_spend_multisig(self):
        # Adding funds to the wallet
        # XXX: note further down the test, 20.00 HTR will be used, block_count must yield at least that amount
        block_count = 3  # 3 * 8.00 -> 24.00 HTR is enough
        blocks = add_new_blocks(self.manager, block_count, advance_clock=15)
        add_blocks_unlock_reward(self.manager)
        blocks_tokens = [sum(txout.value for txout in blk.outputs) for blk in blocks]
        available_tokens = sum(blocks_tokens)
        self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID], WalletBalance(0, available_tokens))

        # First we send tokens to a multisig address
        block_reward = blocks_tokens[0]
        outputs = [WalletOutputInfo(address=self.multisig_address, value=block_reward, timelock=None)]

        tx1 = self.manager.wallet.prepare_transaction_compute_inputs(Transaction, outputs, self.manager.tx_storage)
        tx1.weight = 10
        tx1.parents = self.manager.get_new_tx_parents()
        tx1.timestamp = int(self.clock.seconds())
        tx1.resolve()
        self.manager.propagate_tx(tx1)
        self.clock.advance(10)

        wallet_balance = WalletBalance(0, available_tokens - block_reward)
        self.assertEqual(self.manager.wallet.balance[settings.HATHOR_TOKEN_UID], wallet_balance)

        # Then we create a new tx that spends this tokens from multisig wallet
        tx = Transaction.create_from_struct(tx1.get_struct())
        tx.weight = 10
        tx.parents = self.manager.get_new_tx_parents()
        tx.timestamp = int(self.clock.seconds())

        multisig_script = create_output_script(self.multisig_address)

        multisig_output = TxOutput(200, multisig_script)
        wallet_output = TxOutput(300, create_output_script(self.address))
        outside_output = TxOutput(block_reward - 200 - 300, create_output_script(self.outside_address))

        tx.outputs = [multisig_output, wallet_output, outside_output]

        tx_input = TxInput(tx1.hash, 0, b'')
        tx.inputs = [tx_input]

        signatures = []
        for private_key_hex in self.private_keys:
            signature = generate_signature(tx, bytes.fromhex(private_key_hex), password=b'1234')
            signatures.append(signature)

        parser = create_parser()
        # Generate spend tx
        args = parser.parse_args([
            tx.get_struct().hex(), '{},{}'.format(signatures[0].hex(), signatures[1].hex()),
            self.redeem_script.hex()
        ])
        f = StringIO()
        with capture_logs():
            with redirect_stdout(f):
                execute(args)
        # Transforming prints str in array
        output = f.getvalue().strip().splitlines()

        tx_raw = output[0].split(':')[1].strip()

        tx = Transaction.create_from_struct(bytes.fromhex(tx_raw))
        self.assertTrue(self.manager.propagate_tx(tx, False))