Beispiel #1
0
    def test_get_bulk_import_job_results(self, m):

        results_tbl = Table([['BulkUploadDataID', 'ULFileID', 'PrimaryKey',
                              'PrimaryKeyType', 'MailingAddress_3581'],
                             ['1', '1983', '101596008', 'VanID', 'Processed']])

        bulk_import_job = {'id': 92,
                           'status': 'Completed',
                           'resourceType': 'Contacts',
                           'webhookUrl': None,
                           'resultFileSizeLimitKb': 5000,
                           'errors': [],
                           'resultFiles': [{
                               'url': Table.to_csv(results_tbl),
                               'dateExpired': '2020-09-04T22:07:04.0770295-04:00'
                           }]
                           }

        m.get(self.van.connection.uri +
              'bulkImportJobs/53407', json=bulk_import_job)
        assert_matching_tables(
            self.van.get_bulk_import_job_results(53407), results_tbl)
Beispiel #2
0
    def test_table_sync_full_read_chunk(self):
        dbsync = DBSync(self.fake_source,
                        self.fake_destination,
                        read_chunk_size=2)
        source_data = Table([
            {
                'id': 1,
                'value': 11
            },
            {
                'id': 2,
                'value': 121142
            },
            {
                'id': 3,
                'value': 111
            },
            {
                'id': 4,
                'value': 12211
            },
            {
                'id': 5,
                'value': 1231
            },
        ])
        self.fake_source.setup_table('source', source_data)

        dbsync.table_sync_full('source', 'destination')

        destination = self.fake_destination.table('destination')

        # Make sure the data came through
        assert_matching_tables(source_data, destination.data)

        # Make sure copy was called the expected number of times
        # read chunks of 2, 5 rows to write.. should be 3 copy calls
        self.assertEqual(len(self.fake_destination.copy_call_args), 3,
                         self.fake_destination.copy_call_args)
Beispiel #3
0
    def setUp(self):

        self.temp_schema = TEMP_SCHEMA
        self.pg = Postgres()

        self.tbl = Table([['ID', 'Name'], [1, 'Jim'], [2, 'John'],
                          [3, 'Sarah']])

        # Create a schema, create a table, create a view
        setup_sql = f"""
                    drop schema if exists {self.temp_schema} cascade;
                    create schema {self.temp_schema};
                    """

        other_sql = f"""
                    create table {self.temp_schema}.test (id smallint,name varchar(5));
                    create view {self.temp_schema}.test_view as (select * from {self.temp_schema}.test);
                    """  # noqa: E501

        self.pg.query(setup_sql)

        self.pg.query(other_sql)
Beispiel #4
0
    def test_get_standard_object(self, m):

        processed_people_emails = Table([
            {'id': 78757050, 'emails_category': 'work', 'emails_email': '*****@*****.**'},
            {'id': 78477076, 'emails_category': 'work', 'emails_email': '*****@*****.**'},
            {'id': 78839154, 'emails_category': 'work',
             'emails_email': '*****@*****.**'}
        ])

        m.post(
            self.cp.uri + '/people/search',
            json=self.paginate_callback,
            headers={"filename": "people_search.txt"})

        # Object-specific get_ functions are just wrappers for get_standard_object()
        # So the following line is the only difference from test_get_people()
        processed_blob = self.cp.get_standard_object("people")
        blob_people = [f for f in processed_blob if f['name'] == "people"][0]['tbl']
        blob_people_emails = [f for f in processed_blob if f['name'] == "people_emails"][0]['tbl']

        assert_matching_tables(self.processed_people, blob_people)
        assert_matching_tables(processed_people_emails, blob_people_emails)
Beispiel #5
0
def test_group_by_and_count():
    input = [
        {
            "outreach_id": 10591961,
            "created_date": "2021-04-22",
            "target_names": "Jack Reed, Sheldon Whitehouse, David Cicilline",
            "name": "Matthew Mellea",
            "phone": ["(650) 946-7412"],
        },
        {
            "outreach_id": 10592577,
            "created_date": "2021-04-22",
            "target_names": "Jack Reed, Sheldon Whitehouse, David Cicilline",
            "name": "Matthew Mellea",
            "phone": ["(650) 946-7412"],
        },
        {
            "outreach_id": 10613970,
            "created_date": "2021-04-23",
            "target_names":
            "Kirsten E. Gillibrand, Charles E. Schumer, Yvette D. Clarke",
            "name": "Aracely Jimenez-Hudis",
            "phone": ["(347) 204-7223"],
        },
    ]

    output = Table([
        {
            "created_date": "2021-04-22",
            "num_calls": 2
        },
        {
            "created_date": "2021-04-23",
            "num_calls": 1
        },
    ])

    assert outreach_analytics.group_by_and_count(input,
                                                 "created_date") == output
Beispiel #6
0
    def process_custom_fields(self, json_blob):
        # Internal method to convert custom fields responses into a list of Parsons tables

        # Original table & columns
        custom_fields = Table(json_blob)

        # Available On
        available_on = custom_fields.long_table(['id'], 'available_on')

        # Options
        options = custom_fields.long_table(['id', 'name'], 'options')

        return [{
            'name': 'custom_fields',
            'tbl': custom_fields
        }, {
            'name': 'custom_fields_available',
            'tbl': available_on
        }, {
            'name': 'custom_fields_options',
            'tbl': options
        }]
Beispiel #7
0
    def get_activity_types(self):
        """
        Get activity types

        `Args:`
            `filters: dict`
            Optional; pass additional parameters to filter the records returned.
            See `Copper documentation <https://developer.copper.com/?version=latest#6bd339f1-f0de-48b4-8c34-5a5e245e036f>`_ for choices

        `Returns:`
            List of dicts of Parsons Tables:
                * activitiy_types
        """ # noqa: E501,E261

        logger.info("Retrieving activity types.")

        response = self.paginate_request('/activity_types/', req_type='GET')
        orig_table = Table(response)
        at_user = orig_table.long_table([], 'user', prepend=False)
        at_sys = orig_table.long_table([], 'system', prepend=False)
        Table.concat(at_sys, at_user)

        return [{'name': 'activity_types', 'tbl': at_sys}]
Beispiel #8
0
    def get_leaderboard(self,
                        start_date=None,
                        end_date=None,
                        list_ids=None,
                        account_ids=None):
        """
        Return advocates (person records).

        `Args:`
            start_date: str
                Filter to the earliest date at which a post could be posted.
                The time is formatted as UTC (e.g. ``yyyy-mm-ddThh:mm:ss``).
            end_date: str
                Filter to the latest date at which a post could be posted.
                The time is formatted as UTC (e.g. ``yyyy-mm-ddThh:mm:ss``).
            list_ids: list
                Filter to the ids of lists or saved searches to retrieve.
            account_ids: list
                A list of CrowdTangle accountIds to retrieve leaderboard data for.
                This and ``list_id`` are mutually exclusive; if both are sent, the
                ``account_ids`` value will be used.
        `Returns:`
            Parsons Table
                See :ref:`parsons-table` for output options.
        """

        args = {
            'startDate': start_date,
            'endDate': end_date,
            'listIds': self.list_to_string(list_ids),
            'accountIds': self.list_to_string(account_ids)
        }

        pt = Table(self.base_request('leaderboard', args=args))
        logger.info(f'Retrieved {pt.num_rows} records from the leaderbooard.')
        self.unpack(pt)
        return pt
Beispiel #9
0
    def get_advocates(self, state=None, campaign_id=None, updated_since=None):
        """
        Return advocates (person records).

        `Args:`
            state: str
                Filter by US postal abbreviation for a state
                or territory e.g., "CA" "NY" or "DC"
            campaign_id: int
                Filter to specific campaign
            updated_since: str
                Fetch all advocates updated since UTC date time provided
                using (ex. '2014-01-05 23:59:43')
        `Returns:`
            A dict of parsons tables:
                * emails
                * phones
                * memberships
                * tags
                * ids
                * fields
                * advocates
        """

        url = self.uri + 'advocates'

        args = {
            'state': state,
            'campaignid': campaign_id,
            'updatedSince': updated_since
        }

        logger.info('Retrieving advocates...')
        json = self._paginate_request(url, args=args)

        return self._advocates_tables(Table(json))
Beispiel #10
0
    def test_transaction_search(self, m):
        m.post(
            'https://api.braintreegateway.com:443'
            '/merchants/abcd1234abcd1234/transactions/advanced_search_ids',
            text="""
               <search-results>
                  <page-size type="integer">50</page-size>
                  <ids type="array"><item>1234abcd</item> <item>0987asdf</item> </ids>
               </search-results>
        """)
        table = self.braintree.get_transactions(
            disbursement_start_date="2020-01-01",
            disbursement_end_date="2020-01-02",
            just_ids=True)
        assert_matching_tables(table,
                               Table([['id'], ['1234abcd'], ['0987asdf']]))
        m.post(
            'https://api.braintreegateway.com:443'
            '/merchants/abcd1234abcd1234/transactions/advanced_search',
            text=open(f'{_dir}/test_data/transaction_example.xml').read())
        full_table = self.braintree.get_transactions(
            disbursement_start_date="2020-01-01",
            disbursement_end_date="2020-01-02",
            table_of_ids=table)
        self.assertEqual(len(table.table), 3)
        self.assertEqual(len(full_table.table), 3)
        self.assertEqual(table[0]['id'], '1234abcd')
        self.assertEqual(table[1]['id'], '0987asdf')
        self.assertEqual(len(table[0].keys()), 1)
        self.assertEqual(len(full_table[0].keys()), 64)

        self.assertEqual(full_table[0]['disbursement_date'],
                         datetime.date(2019, 12, 30))
        self.assertEqual(full_table[0]['credit_card_bin'], '789234')
        self.assertEqual(full_table[0]['disbursement_success'], True)
        self.assertEqual(full_table[0]['amount'], decimal.Decimal('150.00'))
Beispiel #11
0
    def test_get_contact_types(self, m):

        processed_ct = Table([
            {'id': 501947, 'name': 'Potential Customer'},
            {'id': 501948, 'name': 'Current Customer'},
            {'id': 501949, 'name': 'Uncategorized'},
            {'id': 501950, 'name': 'Group Leader'},
            {'id': 540331, 'name': 'Partner'},
            {'id': 540333, 'name': 'Funder'},
            {'id': 540334, 'name': 'Potential Funder'},
            {'id': 540335, 'name': 'Other'},
            {'id': 547508, 'name': 'Local Group'},
            {'id': 575833, 'name': 'Group Member'},
            {'id': 744795, 'name': 'Hill Contact'},
            {'id': 967249, 'name': 'State Leg Contact'}
        ])

        m.get(
            self.cp.uri + '/contact_types/',
            json=self.paginate_callback,
            headers={"filename": "contact_types_list.json"})

        processed_blob = self.cp.get_contact_types()
        assert_matching_tables(processed_ct, processed_blob)
Beispiel #12
0
    def test_convert_columns_to_str(self):
        # Test that all columns are string
        mixed_raw = [{
            'col1': 1,
            'col2': 2,
            'col3': 3
        }, {
            'col1': 'one',
            'col2': 2,
            'col3': [3, 'three', 3.0]
        }, {
            'col1': {
                'one': 1,
                "two": 2.0
            },
            'col2': None,
            "col3": 'three'
        }]
        tbl = Table(mixed_raw)
        tbl.convert_columns_to_str()

        cols = tbl.get_columns_type_stats()
        type_set = {i for x in cols for i in x['type']}
        self.assertTrue('str' in type_set and len(type_set) == 1)
Beispiel #13
0
    def test_table_split(self):
        test1 = Table([('x', 'y', 'z'), ('a', 'b', ''), ('1', '', '3'),
                       ('4', '', '6')])
        tables = self.actionkit._split_tables_no_empties(test1, True, [])
        self.assertEqual(len(tables), 2)
        assert_matching_tables(tables[0], Table([('x', 'y'), ('a', 'b')]))
        assert_matching_tables(tables[1],
                               Table([('x', 'z'), ('1', '3'), ('4', '6')]))

        test2 = Table([('x', 'y', 'z'), ('a', 'b', 'c'), ('1', '2', '3'),
                       ('4', '5', '6')])
        tables2 = self.actionkit._split_tables_no_empties(test2, True, [])
        self.assertEqual(len(tables2), 1)
        assert_matching_tables(tables2[0], test2)

        test3 = Table([('x', 'y', 'z'), ('a', 'b', ''), ('1', '2', '3'),
                       ('4', '5', '6')])
        tables3 = self.actionkit._split_tables_no_empties(test3, False, ['z'])
        self.assertEqual(len(tables3), 2)
        assert_matching_tables(tables3[0], Table([('x', 'y'), ('a', 'b')]))
        assert_matching_tables(
            tables3[1],
            Table([('x', 'y', 'z'), ('1', '2', '3'), ('4', '5', '6')]))
Beispiel #14
0
    def _request(self,
                 url,
                 req_type='GET',
                 post_data=None,
                 args=None,
                 limit=None):
        # Make sure to have a current token before we make another request
        now = datetime.now(timezone.utc)
        if now > self.session_exp:
            self._get_session_token()

        # Based on PDI docs
        # https://api.bluevote.com/docs/index
        LIMIT_MAX = 2000

        headers = {
            "Content-Type": "application/json",
            "Authorization": f"Bearer {self.session_token}",
        }

        request_fn = {
            "GET": requests.get,
            "POST": requests.post,
            "PUT": requests.put,
            "DELETE": requests.delete,
        }

        if limit and limit <= LIMIT_MAX:
            args = args or {}
            args["limit"] = limit

        args = self._clean_dict(args) if args else args
        post_data = self._clean_dict(post_data) if post_data else post_data

        res = request_fn[req_type](url,
                                   headers=headers,
                                   json=post_data,
                                   params=args)

        logger.debug(f"{res.url} - {res.status_code}")
        logger.debug(res.request.body)

        res.raise_for_status()

        if not res.text:
            return None

        logger.debug(res.text)

        try:
            res_json = res.json()
        except JSONDecodeError:
            res_json = None

        if "data" not in res_json:
            return res_json

        total_count = (0 if "totalCount" not in res_json else
                       res_json["totalCount"])
        data = res_json["data"]

        if not limit:
            # We don't have a limit, so let's get everything
            # Start a page 2 since we already go page 1
            cursor = 2
            while len(data) < total_count:
                args = args or {}
                args["cursor"] = cursor
                args["limit"] = LIMIT_MAX
                res = request_fn[req_type](url,
                                           headers=headers,
                                           json=post_data,
                                           params=args)

                data.extend(res.json()["data"])

                cursor += 1

            return Table(data)

        else:
            total_need = min(limit, total_count)

            cursor = 2
            while len(data) < total_need:
                args = args or {}
                args["cursor"] = cursor
                args["limit"] = min(LIMIT_MAX, total_need - len(data))
                res = request_fn[req_type](url,
                                           headers=headers,
                                           json=post_data,
                                           params=args)

                data.extend(res.json()["data"])

                cursor += 1

            return Table(data)
Beispiel #15
0
    def get_posts(self,
                  start_date=None,
                  end_date=None,
                  language=None,
                  list_ids=None,
                  min_interations=None,
                  search_term=None,
                  types=None):
        """
        Return advocates (person records).

        `Args:`
            start_date: str
                Filter to the earliest date at which a post could be posted.
                The time is formatted as UTC (e.g. ``yyyy-mm-ddThh:mm:ss``).
            end_date: str
                Filter to the latest date at which a post could be posted.
                The time is formatted as UTC (e.g. ``yyyy-mm-ddThh:mm:ss``).
            language: str
                Filter to 2-character Locale code. Some languages require more
                than two characters: Chinese (Simplified) is zh-CN and
                Chinese (Traditional) is zh-TW.
            list_ids: list
                Filter to the ids of lists or saved searches to retrieve.
            min_interactions: int
                Filter to posts with total interactions above this threshold.
            search_team: str
                Returns only posts that match this search term. For multiple terms, separate
                with commas for OR, use quotes for phrases.
            types: list
                Filter to post types including:
                * ``episode``
                * ``extra_clip``
                * ``link``
                * ``live_video``
                * ``live_video_complete``
                * ``live_video_scheduled``
                * ``native_video``
                * ``photo``
                * ``status``
                * ``trailer``
                * ``tweet``
                * ``vimeo``
                * ``vine``
                * ``youtube``

                If you want all live videos (whether currently or formerly live),
                pass include both ``live_video`` and ``live_video_complete``
                parameters.

                The ``video`` type does not mean all videos, it refers to videos
                that are not ``native_video``, ``youtube`` or ``vine``.

        `Returns:`
            Parsons Table
                See :ref:`parsons-table` for output options.
        """

        args = {
            'startDate': start_date,
            'endDate': end_date,
            'language': language,
            'listIds': self.list_to_string(list_ids),
            'minInteractions': min_interations,
            'searchTerm': search_term,
            'types': types
        }

        logger.info("Retrieving posts.")
        pt = Table(self.base_request('posts', args=args))
        logger.info(f'Retrieved {pt.num_rows} posts.')
        self.unpack(pt)
        return pt
Beispiel #16
0
 def test_get_people(self, m):
     m.get(f"{self.api_url}/people?page=1&per_page=25", text=json.dumps(self.fake_people_list_1))
     m.get(f"{self.api_url}/people?page=2&per_page=25", text=json.dumps(self.fake_people_list_2))
     m.get(f"{self.api_url}/people?page=3&per_page=25",
           text=json.dumps({'_embedded': {"osdi:people": []}}))
     assert_matching_tables(self.an.get_people(), Table(self.fake_people_list))
loaded = [['id', 'voterbase_id', 'date_updated']]  # column names for log table

source_table = 'schema.table'  # this is the table with the information I'm pushing to ActionKit
log_table = 'schema.table'  # this is where we will log every user id that gets marked with a voterbase_id

logger.info("Running query to get matches...")
query = '''
    select distinct id, voterbase_id
    from {source_table}
    left join {log_table} using (id, voterbase_id)
    where voterbase_id is not null and date_updated is null
    '''

source_data = rs.query(query)

if source_data.num_rows > 0:
    logger.info(f"Will be updating voterbase_id for {source_data.num_rows}...")
    for row in source_data:
        user = ak.get_user(user_id=row['id'])
        user_dict = {"fields": {"vb_voterbase_id": row['voterbase_id']}}
        update_user = ak.update_user(user_id=row['id'], **user_dict)
        user = ak.get_user(user_id=row['id'])
        if user['fields']['vb_voterbase_id'] == row['voterbase_id']:
            loaded.append([row['id'], row['voterbase_id'], timestamp])

    logger.info("Done with loop! Loading into log table...")
    Table(loaded).to_redshift(log_table, if_exists='append')

else:
    logger.info(f"No one to update today...")
Beispiel #18
0
 def test_get_users_by_email(self, m):
     email = '*****@*****.**'
     mock_users = [{'email': '*****@*****.**', 'id': 2}]
     m.get(f'{self.auth0.base_url}/api/v2/users-by-email?email={email}', json=mock_users)
     assert_matching_tables(self.auth0.get_users_by_email(email), Table(mock_users), True)
Beispiel #19
0
    def test_get_organizations(self, m):

        m.get(HUSTLE_URI + 'organizations', json=expected_json.organizations)
        orgs = self.hustle.get_organizations()
        assert_matching_tables(orgs,
                               Table(expected_json.organizations['items']))
Beispiel #20
0
    def test_get_tags(self, m):

        m.get(HUSTLE_URI + 'organizations/LePEoKzD3/tags',
              json=expected_json.tags)
        tags = self.hustle.get_tags(organization_id='LePEoKzD3')
        assert_matching_tables(tags, Table(expected_json.tags['items']))
Beispiel #21
0
 def test_get_tags(self, m):
     m.get(f"{self.api_url}/tags?page=1&per_page=25", text=json.dumps(self.fake_tag_list))
     m.get(f"{self.api_url}/tags?page=2&per_page=25",
           text=json.dumps({'_embedded': {"osdi:tags": []}}))
     assert_matching_tables(self.an.get_tags(),
                            Table(self.fake_tag_list['_embedded']['osdi:tags']))
Beispiel #22
0
 def test_from_empty_list(self):
     # Just ensure this doesn't throw an error
     Table()
     Table([])
     Table([[]])
Beispiel #23
0
    for contact_vanid in contact_vanids:
        
        try:
            person = ea.get_person(
                contact_vanid,
                id_type="vanid",
                expand_fields=["reported_demographics", "custom_fields"]
            )
            transformed_person = transform_person_for_redshift(person)
            extra_fields.append(transformed_person)
        except HTTPError as e:
            print(e)
            error = {
                "vanid": contact_vanid,
                "error": str(e)[:999]
                }   
            errors.append(error)   
        
    logger.info(f'Found {len(extra_fields)} new contacts to add to contacts_extra_fields')
    logger.info(f'Identified {len(errors)} errors. Appending to errors table.')

    # convert to Parsons table
    tbl = Table(extra_fields)
    errors_tbl = Table(errors)

    tbl.to_csv('extra_fields_test.csv')
    errors_tbl.to_csv('extra_fields_errors.csv')

    # copy Table into Redshift, append new rows
    rs.copy(tbl, 'sunrise.contacts_extra_fields' ,if_exists='append', distkey='vanid', sortkey = None, alter_table = True)
    rs.copy(errors_tbl, 'sunrise.get_extra_fields_errors' ,if_exists='append', distkey='vanid', sortkey = None, alter_table = True)
Beispiel #24
0
 def test_to_dicts(self):
     self.assertEqual(self.lst, Table(self.lst).to_dicts())
     self.assertEqual(self.lst_dicts, self.tbl.to_dicts())
Beispiel #25
0
    def test_get_agents(self, m):

        m.get(HUSTLE_URI + 'groups/Qqp6o90SiE/agents',
              json=expected_json.agents)
        agents = self.hustle.get_agents(group_id='Qqp6o90SiE')
        assert_matching_tables(agents, Table(expected_json.agents['items']))
Beispiel #26
0
    def test_materialize_to_file(self):
        # Simple test that materializing doesn't change the table
        tbl_materialized = Table(self.lst_dicts)
        _ = tbl_materialized.materialize_to_file()

        assert_matching_tables(self.tbl, tbl_materialized)
Beispiel #27
0
    def test_get_groups(self, m):

        m.get(HUSTLE_URI + 'organizations/LePEoKzD3/groups',
              json=expected_json.groups)
        groups = self.hustle.get_groups('LePEoKzD3')
        assert_matching_tables(groups, Table(expected_json.groups['items']))
Beispiel #28
0
    def test_bool(self):
        empty = Table()
        not_empty = Table([{'one': 1, 'two': 2}])

        self.assertEqual(not empty, True)
        self.assertEqual(not not_empty, False)
Beispiel #29
0
class TestTargets(unittest.TestCase):

    mock_data = mock_data = (
        '12827,Volunteer Recruitment Tiers,Tier,109957740\n'
        '12827,Volunteer Recruitment Tiers,Tier,109957754')
    mock_result = Table([
        ('12827', 'Volunteer Recruitment Tiers', 'Tier', '109957740'),
        ('12827', 'Volunteer Recruitment Tiers', 'Tier', '109957754')])

    def setUp(self):

        self.van = VAN(os.environ['VAN_API_KEY'], db="MyVoters", raise_for_status=False)

    def tearDown(self):

        pass

    @requests_mock.Mocker()
    def test_get_targets(self, m):

        # Create response
        json = {u'count': 2, u'items':
                [{u'targetId': 12827,
                  u'type': u'TEST CODE',
                  u'name': u'TEST CODE',
                  u'description': None,
                  u'points': 20,
                  u'areSubgroupsSticky': False,
                  u'status': u'Active',
                  u'subgroups': None,
                  u'markedSubgroup': None}],
                u'nextPageLink': None}

        m.get(self.van.connection.uri + 'targets', json=json)

        # Expected Structure
        expected = ['targetId', 'type', 'name', 'description',
                    'points', 'areSubgroupsSticky', 'status', 'subgroups', 'markedSubgroup']

        # Assert response is expected structure
        self.assertTrue(validate_list(expected, self.van.get_targets()))

        # To Do: Test what happens when it doesn't find any targets

    @requests_mock.Mocker()
    def test_get_target(self, m):

        # Create response
        json = {u'targetId': 15723,
                u'name': u'Mail_VR_Chase',
                u'type': u'Dynamic',
                u'description': None,
                u'points': 15,
                u'areSubgroupsSticky': False,
                u'status': u'Active',
                u'subgroups':
                [{u'targetId': 12827,
                  u'fullName': u'April_VR_Chase Calls',
                  u'name': u'April_Chase_20',
                  u'subgroupId': 46803,
                  u'isAssociatedWithBadges': True}],
                u'markedSubgroup': None}

        m.get(self.van.connection.uri + 'targets/15723', json=json)

        self.assertEqual(json, self.van.get_target(15723))

    @requests_mock.Mocker()
    def test_create_target_export(self, m):

        export_job_id = '{"exportJobId": "455961790"}'
        target_id = 12827

        m.post(self.van.connection.uri + 'targetExportJobs', json=export_job_id, status_code=204)

        # Test that it doesn't throw and error
        r = self.van.create_target_export(target_id, webhook_url=None)

        self.assertEqual(r, export_job_id)

    @requests_mock.Mocker()
    def test_get_target_export(self, m):

        export_job_id = 455961790
        json = [{
            "targetId": 12827,
            "file": {
                "downloadUrl": (
                    "https://ngpvan.blob.core.windows.net/"
                    "target-export-files/TargetExport_455961790.csv"),
                "dateExpired": "null",
                "recordCount": 1016883},
            "webhookUrl": "null",
            "exportJobId": 455961790,
            "jobStatus": "Complete"}]

        download_url = (
            'https://ngpvan.blob.core.windows.net/target-export-files/TargetExport_455961790.csv')

        m.post(self.van.connection.uri + 'targetExportJobs', json=export_job_id, status_code=204)
        m.get(self.van.connection.uri + 'targetExportJobs/455961790', json=json)
        m.get(download_url, text=self.mock_data)
        assert_matching_tables(self.van.get_target_export(export_job_id),
                               self.mock_result)
Beispiel #30
0
import unittest
import os

from parsons import FacebookAds, Table

users_table = Table([
    {
        "first": "Bob",
        "middle": "J",
        "last": "Smith",
        "phone": "1234567890",
        "cell": None,
        "vb_voterbase_dob": "19820413"
    },
    {
        "first": "Sue",
        "middle": "Lucy",
        "last": "Doe",
        "phone": None,
        "cell": "2345678901",
        "vb_voterbase_dob": None
    },
])


@unittest.skipIf(not os.environ.get('LIVE_TEST'),
                 'Skipping because not running live test')
class TestFacebookAdsIntegration(unittest.TestCase):
    def setUp(self):

        self.fb_ads = FacebookAds()