import os.path

from artefact import FIXTURES_DIR
from artefact.utils.harness import TestHelper, MockObject


h = TestHelper(os.path.join(FIXTURES_DIR, 'smart', 'clicks', 'tests'))

lines = h.r('smart_raw.txt')
outputs = h.je('smart_lines.json')

TEST_CASES = [(val.strip()[1:-1], outputs[idx]) for idx, val in enumerate(lines)]
import os.path

from artefact import FIXTURES_DIR
from artefact.utils.harness import TestHelper, MockObject


h = TestHelper(os.path.join(FIXTURES_DIR, 'bluekai', 'sdt', 'line'))

lines = h.r('line_raw.txt')
outputs = h.je('line.json')

TEST_CASES = [(val.strip(), outputs[idx]) for idx, val in enumerate(lines)]
import os.path
from artefact import FIXTURES_DIR
from artefact.utils.harness import TestHelper

import unittest
import mock

from artefact.connectors.bluekai.api.client import Client
from ...tests import common
from ..connector import ReferenceConnector, Taxonomy

h = TestHelper(os.path.join(FIXTURES_DIR, 'dataiku', 'bluekai', 'reference', 'taxonomy'))

NODE_LIST = h.je('nodes.json')
EXPECTED_NODES_WITH_PATHS = h.je('nodes_with_paths.json')


def cache_bypass(key, fetcher):
    return fetcher()


class TaxonomyTest(unittest.TestCase):

    def test_read_schema(self):
        common.test_read_schema_structure(self, ReferenceConnector.build_schema(Taxonomy.schema))

    @mock.patch.object(Taxonomy, 'nodes', autospec=True)
    def test_size(self, mocked_nodes):
        mocked_nodes.return_value = NODE_LIST['nodeList']
        taxonomy = Taxonomy({})
import os.path
import json

from artefact import FIXTURES_DIR
from artefact.utils.harness import TestHelper, MockObject

from ..report_field import ReportField

h = TestHelper(os.path.join(FIXTURES_DIR, 'adwords', 'api', 'report'))

SAMPLE_REPORT_FILE_PATH = h.e('sample_report.csv.gz')
SAMPLE_SUBSET_REPORT_FILE_PATH = h.e('sample_report_subset.csv.gz')
SAMPLE_REPORT_SCHEMA_PATH = h.e('reportDefinition.xsd')
SAMPLE_REPORT_SCHEMA = open(SAMPLE_REPORT_SCHEMA_PATH).read()
SAMPLE_REPORT_TYPE_FIELDS_RAW = [MockObject(field) for field in h.je('sample_report_type_fields.json')]
SAMPLE_REPORT_TYPE_FIELDS = [ReportField(field) for field in SAMPLE_REPORT_TYPE_FIELDS_RAW]


TRICKY_ROW_NUMBER = 10
EXPECTED_TRICKY_ROW = h.je('expected_tricky_row.json')

EXPECTED_LENGTH = 14
EXPECTED_HEADERS = set(h.je('expected_headers.json'))
EXPECTED_SUBSET_HEADERS = set(h.je('expected_subset_headers.json'))
EXPECTED_FIRST_ROW = h.je('expected_first_row.json')
EXPECTED_SUBSET_FIRST_ROW = h.je('expected_subset_first_row.json')
EXPECTED_REPORT_TYPES = h.je('expected_report_types.json')
EXPECTED_DATE_TYPES = h.je('expected_date_types.json')
EXPECTED_REPORT_REQUEST = h.je('expected_request.json')

FAKE_CREDENTIALS = {
import os.path

from artefact import FIXTURES_DIR
from artefact.utils.harness import TestHelper, MockObject


h = TestHelper(os.path.join(FIXTURES_DIR, 'aam', 'cdf', 'tests'))

lines = h.r('aam_raw.txt')
outputs = h.je('aam_lines.json')

TEST_CASES = [(val.strip()[1:-1], outputs[idx]) for idx, val in enumerate(lines)]
import os.path
from artefact import FIXTURES_DIR
from artefact.utils.harness import TestHelper

h = TestHelper(os.path.join(FIXTURES_DIR, 'dataiku', 'adwords', 'api', 'report'))

EXPECTED_SCHEMA = h.je('expected_schema.json')
EXPECTED_FIRST_ROW_FIRST_ACCOUNT = h.je('expected_first_row_first_account.json')
EXPECTED_FIRST_ROW_SECOND_ACCOUNT = h.je('expected_first_row_second_account.json')

SAMPLE_UNICODE_ISSUE_REPORT_FILE_PATH = h.e('sample_report_unicode_issue.csv.gz')
import os.path

from artefact import FIXTURES_DIR
from artefact.utils.harness import TestHelper, MockObject


h = TestHelper(os.path.join(FIXTURES_DIR, 'smart', 'impressions', 'tests'))

lines = h.r('smart_impressions_raw.txt')
outputs = h.je('smart_impressions_lines.json')

TEST_CASES = [(val.strip(), outputs[idx]) for idx, val in enumerate(lines)]