def test_convert_sf_types(sf_types, json_type, with_raise): for sf_type in sf_types: if with_raise: with pytest.raises(TypeSalesforceException): Salesforce.field_to_property_schema({"type": sf_type}) else: assert json_type in Salesforce.field_to_property_schema({"type": sf_type})["type"]
def _stream_api(stream_config): sf_object = Salesforce(**stream_config) sf_object.login = Mock() sf_object.access_token = Mock() sf_object.instance_url = "https://fase-account.salesforce.com" sf_object.describe = Mock(return_value={"fields": [{"name": "LastModifiedDate", "type": "string"}]}) return sf_object
def test_discover_only_queryable(stream_config): sf_object = Salesforce(**stream_config) sf_object.login = Mock() sf_object.access_token = Mock() sf_object.instance_url = "https://fase-account.salesforce.com" sf_object.describe = Mock( return_value={ "sobjects": [ {"name": "Account", "queryable": True}, {"name": "Leads", "queryable": False}, ] } ) filtered_streams = sf_object.get_validated_streams(config=stream_config) assert list(filtered_streams.keys()) == ["Account"]
def test_discover_with_streams_criteria_param(streams_criteria, predicted_filtered_streams, stream_rest_config): updated_config = { **stream_rest_config, **{ "streams_criteria": streams_criteria } } sf_object = Salesforce(**stream_rest_config) sf_object.login = Mock() sf_object.access_token = Mock() sf_object.instance_url = "https://fase-account.salesforce.com" sf_object.describe = Mock( return_value={ "sobjects": [ { "name": "Account" }, { "name": "AIApplications" }, { "name": "Leads" }, { "name": "LeadHistory" }, { "name": "Orders" }, { "name": "OrderHistory" }, { "name": "CustomStream" }, { "name": "CustomStreamHistory" }, ] }) filtered_streams = sf_object.get_validated_streams(config=updated_config) assert sorted(filtered_streams) == sorted(predicted_filtered_streams)
def test_parallel_discover(input_sandbox_config): sf = Salesforce(**input_sandbox_config) sf.login() stream_objects = sf.get_validated_streams(config=input_sandbox_config) # try to load all schema with the old consecutive logic consecutive_schemas = {} start_time = datetime.now() for stream_name, sobject_options in stream_objects.items(): consecutive_schemas[stream_name] = sf.generate_schema( stream_name, sobject_options) consecutive_loading_time = (datetime.now() - start_time).total_seconds() start_time = datetime.now() parallel_schemas = sf.generate_schemas(stream_objects) parallel_loading_time = (datetime.now() - start_time).total_seconds() print( f"\nparallel discover ~ {round(consecutive_loading_time/parallel_loading_time, 1)}x faster over traditional.\n" ) assert parallel_loading_time < consecutive_loading_time, "parallel should be more than 10x faster" assert set(consecutive_schemas.keys()) == set(parallel_schemas.keys()) for stream_name, schema in consecutive_schemas.items(): assert schema == parallel_schemas[stream_name]
def sf(input_sandbox_config): sf = Salesforce(**input_sandbox_config) sf.login() return sf