Exemplo n.º 1
0
def test_mocked_merge_error(m):
    datadir = tempfile.mkdtemp()
    setdatadir(datadir)
    mock_oauth(m)
    mock_login(m)

    # test bigger part of merge flow but don't check output
    m.post(url='https://fake-host.salesforce.com/services/Soap/c/37.0',
           text=MERGE_HTTP_FAULT_RESP,
           status_code=201)
    with open(os.path.join(datadir, 'mergedata.csv'), 'w') as mergedata_f:
        csv_writer = CsvWriter(mergedata_f, False)
        csv_writer.write_csv([BULK_DATA_IN.fields])
        csv_writer.write_csv(BULK_DATA_IN.rows)

    config_filename = 'test-config.ini'
    endpoint_names = {'dst': 'test', 'src': 'test'}
    with open(config_filename) as config_f:
        with open('tests/sql/merge.sql') as job_f:
            try:
                run_job_from_file(config_f, job_f, endpoint_names, {}, False,
                                  False)
                # expected to fail
                assert (0)
            except SoapException:
                pass
def test_empty_query_res(mock_docall, m):
    # mock setup    
    mock_oauth(m)
    mock_login(m)
    sf_bulk_connector.JOB_CHECK_TIMER = 0
    mockers.mock_empty_query_res(mock_docall, m)
    
    setdatadir(tempfile.mkdtemp())
    with open(config_file) as conf_file:
        with open('tests/sql/empty_query_res.sql') as job_file:
            run_job_from_file(conf_file, job_file,
                              {'src':'test', 'dst':'test'}, {}, None, None)
def test_merge_bad_ascii_error(m):
    mock_oauth(m)
    mock_login(m)
    setdatadir(tempfile.mkdtemp())
    with open(config_file) as conf_file:
        with open('tests/sql/merge_bad_ascii.sql') as job_file:
            try:
                run_job_from_file(conf_file, job_file,
                                  {'src':'test', 'dst':'test'}, {}, None, None)
                # it should fail
                assert(0)
            except UnicodeEncodeError:
                pass 
def test_merge_required_columns_error(m):
    mock_oauth(m)
    mock_login(m)
    setdatadir(tempfile.mkdtemp())
    with open(config_file) as conf_file:
        with open('tests/sql/merge_required_columns_error.sql') as job_file:
            try:
                run_job_from_file(conf_file, job_file,
                                  {'src':'test', 'dst':'test'}, {}, None, None)
                # it should fail
                assert(0)
            except SystemExit:
                pass 
def test_upsert_unsupported(m):
    mock_oauth(m)
    mock_login(m)
    setdatadir(tempfile.mkdtemp())
    with open(config_file) as conf_file:
        with open('tests/sql/upsert_unsupported.sql') as job_file:
            try:
                run_job_from_file(conf_file, job_file,
                                  {'src':'test', 'dst':'test'}, {}, None, None)
                # it should fail
                assert(0)
            except SystemExit:
                pass
Exemplo n.º 6
0
def test_sqlite_csv_import():
    setdatadir(tempfile.mkdtemp())
    table_name = 'test2'
    # export csv data
    export_script = EXPORT_SCRIPT_FMT.format(name='foo')
    tmp_data = create_table_get_csv_data('foo', export_script)
    with open(table_name + '.csv', 'w') as csv_file:
        csv_file.write(tmp_data)
    # import csv data
    input_csv_stream = BytesIO(CSV_DATA)
    import_script = csv_import_config(table_name, input_csv_stream.readline())
    input_csv_stream.close()
    csv_data = create_table_get_csv_data(table_name, import_script)
    assert csv_data == CSV_DATA
Exemplo n.º 7
0
def test_graph():
    setdatadir(tempfile.mkdtemp())
    run_mriya(datadir())
    sqlsdir = './tests'
    print "sqlsdir", sqlsdir
    relative_path = relpath(datadir(), sqlsdir)
    print "relpath", relative_path
    datapath = join(sqlsdir, relative_path)
    print "datapath", datapath
    res = aggregate_csvs(sqlscripts=SQL_SCRIPTS, csvdirpath=datapath)
    print res
    assert res['aggregation_test'] == 10001
    assert res['aggregation_test2'] == 10001
    assert res['test_fields_table'] == 1
def test_delete_syntax(m):
    # this delete operation should fail anyway but improves coverage
    mock_oauth(m)
    mock_login(m)
    setdatadir(tempfile.mkdtemp())
    with open(config_file) as conf_file:
        with open('tests/sql/delete_fake.sql') as job_file:
            try:
                run_job_from_file(conf_file, job_file,
                                  {'src':'test', 'dst':'test'}, {}, None, None)
                # it should fail
                assert(0)
            except:
                pass
Exemplo n.º 9
0
def test_job_controller(mock_docall, m):
    # mock setup
    sf_bulk_connector.JOB_CHECK_TIMER = 0
    mockers.mock_job_controller(mock_docall, m)
    # test itself
    setdatadir(tempfile.mkdtemp())
    # test debug coverage
    mriya.log.INITIALIZED_LOGGERS = {}
    mriya.log.LOGGING_LEVEL = logging.DEBUG
    loginit(__name__)
    print "test_job_controller"

    # prepare test_csv.csv
    test_csv = ['"Alexa__c"', '"hello\n\n2"']
    with open(SqlExecutor.csv_name('test_csv'), "w") as test_csv_f:
        test_csv_f.write('"Alexa__c"\n"hello<N CR><N CR>2"\n')

    notch = 'test1234567'
    with open('tests/sql/complicated.sql') as sql_f:
        lines = sql_f.readlines()
    job_syntax = JobSyntaxExtended(lines)
    with open(config_filename) as conf_file:
        job_controller = JobController(conf_file.name, endpoint_names,
                                       job_syntax, {}, False)
    job_controller.run_job()
    del job_controller
    # check resulted data
    with open(SqlExecutor.csv_name('some_data_staging')) as resulted_file:
        csv_data = get_bulk_data_from_csv_stream(resulted_file)
        name_idx = csv_data.fields.index('Name')
        assert 1 == len(csv_data.rows)
        assert csv_data.rows[0][name_idx] == notch
    with open(SqlExecutor.csv_name('newids')) as newids_file:
        csv_data = get_bulk_data_from_csv_stream(newids_file)
        id_idx = csv_data.fields.index('Id')
        try:
            assert 1 == len(csv_data.rows)
        except:
            print "len(csv_data.rows)", len(csv_data.rows)
            raise
        for row in csv_data.rows:
            assert len(row[id_idx]) >= 15

    assert open(SqlExecutor.csv_name('test_csv')).read() \
        == open(SqlExecutor.csv_name('test_csv_2')).read()
    # run another test using created data
    run_test_graph(datadir(), "tests/sql/complicated.sql")
Exemplo n.º 10
0
def fast_merge_mock(m, response_list):
    datadir = tempfile.mkdtemp()
    setdatadir(datadir)
    mock_oauth(m)
    mock_login(m)

    # test smaller part of merge flow but checking output
    # Also test how merge splits payload to sequence of chunks
    loginit(STDERR)
    fake_bulk_connector = mock.Mock()
    m.register_uri(method='POST',
                   url='https://fake-localhost/services/Soap/c/37.0',
                   response_list=response_list)
    sm = SfSoapMergeWrapper(fake_bulk_connector, 'Account', BULK_DATA_IN, 2)
    sm.sf_bulk_connector.bulk.sessionid = 'fake-sessionid'
    sm.sf_bulk_connector.instance_url = 'https://fake-localhost'
    res = sm.validate()
    assert res != None
    print res
    bulk_data = sm.run_merge()
    print bulk_data
    assert BULK_DATA_OUT == bulk_data
Exemplo n.º 11
0
def test_var_csv(mock_docall, m):
    # mock setup
    sf_bulk_connector.JOB_CHECK_TIMER = 0
    mockers.mock_var_csv(mock_docall, m)
    # test itself
    setdatadir(tempfile.mkdtemp())
    loginit(__name__)
    print "test_var_csv"
    macro_lines = [
        'SELECT i from csv.ints10000 WHERE i>=CAST(10 as INTEGER) \
LIMIT 2; => batch_begin:i:NESTED', 'SELECT {NESTED}; => var:foo2',
        '=> batch_end:NESTED', "SELECT '{STATIC_VAR}'; => var:static_var"
    ]

    lines = [
        'SELECT 1; => var:one',
        "SELECT 'csv.ints10000'; => var:CSV_INTS10000 => const:",
        "SELECT * FROM {CSV_INTS10000} LIMIT 1; => var:VAR_0",
        'SELECT "9+0"; => var:nine',
        'SELECT Id FROM src.Account LIMIT 1 => csv:sfvar',
        'SELECT * FROM csv.sfvar => var:sfvar',
        'SELECT {one} as f1, {nine}+1 as f2; => csv:one_ten',
        "SELECT 'one_nine_ten' => var:ONENINETEN",
        'SELECT f1, {nine} as f9, (SELECT f2 FROM csv.one_ten) as f10 \
FROM csv.one_ten; => csv:{ONENINETEN}',
        'SELECT i from csv.ints10000 WHERE i>=2 LIMIT 2; \
=> batch_begin:i:PARAM', 'SELECT {PARAM}; => var:foo',
        '=> macro:macro_test_batch:STATIC_VAR:something', '=> batch_end:PARAM',
        'SELECT {PARAM}; => var:final_test'
    ]

    expected = [{
        'from': 'csv',
        'query': 'SELECT 1;',
        'var': 'one'
    }, {
        'from': 'csv',
        'query': "SELECT 'csv.ints10000';",
        'var': 'CSV_INTS10000',
        'const': '',
        'from': 'csv'
    }, {
        'from': 'csv',
        'query': "SELECT * FROM {CSV_INTS10000} LIMIT 1;",
        'var': 'VAR_0'
    }, {
        'from': 'csv',
        'query': 'SELECT "9+0";',
        'var': 'nine'
    }, {
        'objname': 'Account',
        'query': 'SELECT Id FROM Account LIMIT 1',
        'csv': 'sfvar',
        'from': 'src',
        'objname': 'Account'
    }, {
        'query': 'SELECT * FROM sfvar',
        'var': 'sfvar',
        'csvlist': ['sfvar'],
        'from': 'csv'
    }, {
        'query': 'SELECT {one} as f1, {nine}+1 as f2;',
        'csv': 'one_ten'
    }, {
        'from': 'csv',
        'query': "SELECT 'one_nine_ten'",
        'var': 'ONENINETEN'
    }, {
        'query': 'SELECT f1, {nine} as f9, (SELECT f2 FROM \
one_ten) as f10 FROM one_ten;',
        'csvlist': ['one_ten'],
        'csv': '{ONENINETEN}',
        'from': 'csv'
    }, {
        'batch': [{
            'line': 'SELECT {PARAM}; => var:foo',
            'query': 'SELECT {PARAM};',
            'var': 'foo',
            'from': 'csv'
        }, {
            'batch': [{
                'line': 'SELECT {NESTED}; => var:foo2',
                'query': 'SELECT {NESTED};',
                'var': 'foo2',
                'from': 'csv'
            }],
            'batch_begin': ('i', 'NESTED'),
            'csvlist': ['ints10000'],
            'from':
            'csv',
            'line':
            'SELECT i from csv.ints10000 WHERE i>=CAST(10 as INTEGER) LIMIT 2; => batch_begin:i:NESTED',
            'query':
            'SELECT i from ints10000 WHERE i>=CAST(10 as INTEGER) LIMIT 2;'
        }, {
            'line': "SELECT 'something'; => var:static_var",
            'query': "SELECT 'something';",
            'var': 'static_var',
            'from': 'csv'
        }],
        'batch_begin': ('i', 'PARAM'),
        'csvlist': ['ints10000'],
        'from':
        'csv',
        'query':
        'SELECT i from ints10000 WHERE i>=2 LIMIT 2;'
    }, {
        'from': 'csv',
        'query': 'SELECT {PARAM};',
        'var': 'final_test'
    }]

    job_syntax_extended = JobSyntaxExtended(lines,
                                            {'macro_test_batch': macro_lines})
    assert_job_syntax_lines(job_syntax_extended.items(), expected)
    try:
        os.remove(SqlExecutor.csv_name('one_nine_ten'))
    except:
        pass
    with open(config_filename) as conf_file:
        job_controller = JobController(conf_file.name, endpoint_names,
                                       job_syntax_extended, {}, False)
    job_controller.run_job()
    res_batch_params = job_controller.variables[BATCH_PARAMS_KEY]
    assert res_batch_params == ['2', '3']
    sfvar = job_controller.variables['sfvar']
    assert len(sfvar) >= 15
    final_param = job_controller.variables['final_test']
    assert final_param == '3'
    del job_controller
    with open(SqlExecutor.csv_name('one_nine_ten')) as resulted_file:
        assert resulted_file.read() == 'f1,f9,f10\n1,9,10\n'
Exemplo n.º 12
0
def test_job_syntax():
    setdatadir(tempfile.mkdtemp())
    loginit(__name__)
    lines = [
        '--something',  #this is a comment line, will not be added to parsed values
        'SELECT 1 => csv:const1',
        'SELECT 1 => var:MIN',
        'SELECT f1, (SELECT f2 FROM csv.one_ten) as f10 FROM \
csv.one_ten, 9; => csv:final:cache => dst:insert:foo:1:res => type:sequential',
        'SELECT 1 as bacth1 from csv.some_csv; \
=> batch_begin:batch1:BATCH',
        'SELECT 1 from dst.some_object WHERE b=a \
=> csv:some_csv => batch_end:BATCH',
        '=> batch_end:BATCH',
        'SELECT 1 as test, 2 as test2; => csv:foo:cache \
=> dst:insert:test_table:1:new_ids',
        'SELECT 1 as test, 2 as test2; => csv:foo \
=> dst:insert:test_table:1:res'
    ]
    expected = [{
        'query': 'SELECT 1',
        'csv': 'const1'
    }, {
        'from': 'csv',
        'query': 'SELECT 1',
        'var': 'MIN'
    }, {
        'query': 'SELECT f1, (SELECT f2 FROM one_ten) as f10 FROM one_ten, 9;',
        'csv': 'final',
        'from': 'csv',
        'dst': 'foo',
        'op': 'insert',
        'type': 'sequential',
        'cache': '',
        'csvlist': ['one_ten'],
        'batch_size': '1',
        'new_ids_table': 'res'
    }, {
        'query': 'SELECT 1 as bacth1 from some_csv;',
        'batch_begin': ('batch1', 'BATCH'),
        'from': 'csv',
        'csvlist': ['some_csv']
    }, {
        'query': 'SELECT 1 from some_object WHERE b=a',
        'csv': 'some_csv',
        'from': 'dst',
        'objname': 'some_object',
        'batch_end': 'BATCH'
    }, {
        'query': '',
        'batch_end': 'BATCH'
    }, {
        'query': 'SELECT 1 as test, 2 as test2;',
        'op': 'insert',
        'dst': 'test_table',
        'csv': 'foo',
        'cache': '',
        'new_ids_table': 'new_ids',
        'batch_size': '1'
    }, {
        'query': 'SELECT 1 as test, 2 as test2;',
        'csv': 'foo',
        'op': 'insert',
        'dst': 'test_table',
        'batch_size': '1',
        'new_ids_table': 'res'
    }]

    job_syntax = JobSyntax(lines)
    assert_job_syntax_lines(job_syntax.items(), expected)
Exemplo n.º 13
0
def test_sqlite_csv_export():
    setdatadir(tempfile.mkdtemp())
    export_script = EXPORT_SCRIPT_FMT.format(name='test')
    create_table_get_csv_data('test', export_script)
Exemplo n.º 14
0
    config.read_file(args.conf_file)

    if variables:
        print "Recognize variables", variables

    # Get logfilenae
    if args.logdir:
        logdirname = args.logdir
    else:
        logdirname = config[DEFAULT_SETTINGS_SECTION][LOGDIR_SETTING]
    if args.datadir:
        datadirname = args.datadir
    else:
        datadirname = config[DEFAULT_SETTINGS_SECTION][DATADIR_SETTING]
    # update data path
    sql_executor.setdatadir(datadirname)
    try:
        os.makedirs(datadirname)
    except OSError, e:
        if e.errno != errno.EEXIST:
            raise

    loginit(STDOUT)
    loginit(STDERR)

    for input_file in jobs:
        getLogger(STDOUT).info('Starting %s' % input_file.name)
        # prepare log path
        logpath = os.path.join(logdirname,
                               os.path.basename(input_file.name).split('.')[0])
        try: