示例#1
0
def main():
    config = experiments.JOB_FULL
    join_spec = join_utils.get_join_spec(config)
    prepare_utils.prepare(join_spec)
    loaded_tables = []
    for t in join_spec.join_tables:
        print('Loading', t)
        table = datasets.LoadImdb(t, use_cols=config["use_cols"])
        table.data.info()
        loaded_tables.append(table)

    t_start = time.time()
    join_iter_dataset = FactorizedSamplerIterDataset(
        loaded_tables,
        join_spec,
        sample_batch_size=1000 * 100,
        disambiguate_column_names=True)

    table = common.ConcatTables(loaded_tables,
                                join_spec.join_keys,
                                sample_from_join_dataset=join_iter_dataset)

    join_iter_dataset = common.FactorizedSampleFromJoinIterDataset(
        join_iter_dataset,
        base_table=table,
        factorize_blacklist=[],
        word_size_bits=10,
        factorize_fanouts=True)
    t_end = time.time()
    log.info(f"> Initialization took {t_end - t_start} seconds.")

    join_iter_dataset.join_iter_dataset._sample_batch()
    print('-' * 60)
    print("Done")
示例#2
0
def generate_title_movie_companies(p):
    table2alias = {'title': 't', 'movie_companies': 'mc', 'company_name': 'cn'}
    join_tables = ['title', 'movie_companies', 'company_name']
    join_keys = {'title': ['id'], 'movie_companies': ['movie_id', 'company_id'], 'company_name': ['id']}
    join_clauses = {'title': 'title.id=movie_companies.movie_id',
                    'company_name': 'company_name.id=movie_companies.company_id'}
    # all_cols = {
    #         'title': [
    #             'title','kind_id','production_year','id2', 'id'
    #         ],
    #         'movie_companies': [
    #             'company_type_id', 'company_id', 'movie_id'
    #         ],
    #         'company_name': ['name', 'country_code', 'id'],
    #     }

    config = JOB_jintao
    p = p + ['movie_companies']
    key = '_'.join(sorted([table2alias[x] for x in p]))
    join_spec = join_utils.get_join_spec(config)
    prepare_utils.prepare(join_spec)
    loaded_tables = []
    for t in join_spec.join_tables:
        print('Loading', t)
        table = datasets.LoadImdb(t, use_cols=config["use_cols"])
        table.data.info()
        loaded_tables.append(table)
    t_start = time.time()
    join_iter_dataset = FactorizedSamplerIterDataset(
        loaded_tables,
        join_spec,
        sample_batch_size=51000 * 100,
        disambiguate_column_names=True)
    table = common.ConcatTables(loaded_tables,
                                join_spec.join_keys,
                                sample_from_join_dataset=join_iter_dataset)

    join_iter_dataset = common.FactorizedSampleFromJoinIterDataset(
        join_iter_dataset,
        base_table=table,
        factorize_blacklist=[],
        word_size_bits=10,
        factorize_fanouts=True)
    t_end = time.time()
    log.info(f"> Initialization took {t_end - t_start} seconds.")
    print(join_iter_dataset.join_iter_dataset.combined_columns)
    samples = []
    for i in tqdm(range(5000000)):
        samples.append(next(join_iter_dataset.join_iter_dataset))
    df = pd.DataFrame(data=pd.concat(samples, axis=1)).T
    df.to_csv('/home/jintao/{}.csv'.format(key), index=False)
示例#3
0
    def MakeSamplerDatasetLoader(self, loaded_tables):
        assert self.sampler in ['fair_sampler',
                                'factorized_sampler'], self.sampler
        join_spec = join_utils.get_join_spec(self.__dict__)
        if self.sampler == 'fair_sampler':
            klass = fair_sampler.FairSamplerIterDataset
        else:
            klass = factorized_sampler.FactorizedSamplerIterDataset
        join_iter_dataset = klass(
            loaded_tables,
            join_spec,
            sample_batch_size=self.sampler_batch_size,
            disambiguate_column_names=True,
            # Only initialize the sampler if training.
            initialize_sampler=self.checkpoint_to_load is None,
            save_samples=self._save_samples,
            load_samples=self._load_samples)

        table = common.ConcatTables(loaded_tables,
                                    self.join_keys,
                                    sample_from_join_dataset=join_iter_dataset)

        if self.factorize:
            join_iter_dataset = common.FactorizedSampleFromJoinIterDataset(
                join_iter_dataset,
                base_table=table,
                factorize_blacklist=self.dmol_cols if self.num_dmol else
                self.factorize_blacklist if self.factorize_blacklist else [],
                word_size_bits=self.word_size_bits,
                factorize_fanouts=self.factorize_fanouts)

        loader = data.DataLoader(join_iter_dataset,
                                 batch_size=self.bs,
                                 num_workers=self.loader_workers,
                                 worker_init_fn=lambda worker_id: np.random.
                                 seed(np.random.get_state()[1][0] + worker_id),
                                 pin_memory=True)
        return join_spec, join_iter_dataset, loader, table
示例#4
0
def test_job_m():
    print("==== JOB-M example ====")
    config = experiments.JOB_M
    join_spec = join_utils.get_join_spec(config)
    test_sampler(join_spec)
示例#5
0
def test_job_light():
    print("==== Single equivalence class example (JOB-Light) ====")
    config = experiments.JOB_LIGHT_BASE
    join_spec = join_utils.get_join_spec(config)
    test_sampler(join_spec)
示例#6
0
def MakeQueries(spark, cursor, num_queries, tables_in_templates, table_names,
                join_keys, rng):
    """Sample a tuple from actual join result then place filters."""
    spark.catalog.clearCache()

    # TODO: this assumes single equiv class.
    join_items = list(join_keys.items())
    lhs = join_items[0][1]
    join_clauses_list = []
    for rhs in join_items[1:]:
        rhs = rhs[1]
        join_clauses_list.append('{} = {}'.format(lhs, rhs))
        lhs = rhs
    join_clauses = '\n AND '.join(join_clauses_list)

    # Take only the content columns.
    content_cols = []
    categoricals = []
    numericals = []
    for table_name in table_names:
        categorical_cols = datasets.TPC_DS.CATEGORICAL_COLUMNS[table_name]
        for c in categorical_cols:
            disambiguated_name = common.JoinTableAndColumnNames(table_name,
                                                                c,
                                                                sep='.')
            content_cols.append(disambiguated_name)
            categoricals.append(disambiguated_name)

        range_cols = datasets.TPC_DS.RANGE_COLUMNS[table_name]
        for c in range_cols:
            disambiguated_name = common.JoinTableAndColumnNames(table_name,
                                                                c,
                                                                sep='.')
            content_cols.append(disambiguated_name)
            numericals.append(disambiguated_name)

    # Build a concat table representing the join result schema.
    join_keys_list = [join_keys[n] for n in table_names]
    join_spec = join_utils.get_join_spec({
        "join_tables":
        table_names,
        "join_keys":
        dict(zip(table_names, [[k.split(".")[1]] for k in join_keys_list])),
        "join_root":
        "item",
        "join_how":
        "inner",
    })
    ds = FactorizedSamplerIterDataset(tables_in_templates,
                                      join_spec,
                                      sample_batch_size=num_queries,
                                      disambiguate_column_names=False,
                                      add_full_join_indicators=False,
                                      add_full_join_fanouts=False)
    concat_table = common.ConcatTables(tables_in_templates,
                                       join_keys_list,
                                       sample_from_join_dataset=ds)

    template_for_execution = template.Template(
        textwrap.dedent("""
        SELECT COUNT(*)
        FROM ${', '.join(table_names)}
        WHERE ${join_clauses}
        AND ${filter_clauses};
    """).strip())

    true_inner_join_card = ds.sampler.join_card
    true_full_join_card = TDS_LIGHT_OUTER_CARDINALITY
    print('True inner join card', true_inner_join_card, 'true full',
          true_full_join_card)

    ncols = len(content_cols)
    queries = []
    filter_strings = []
    sql_queries = []  # To get true cardinalities.

    while len(queries) < num_queries:
        sampled_df = ds.sampler.run()[content_cols]

        for r in sampled_df.iterrows():
            tup = r[1]
            num_filters = rng.randint(FLAGS.min_filters,
                                      max(ncols // 2, FLAGS.max_filters))

            # Positions where the values are non-null.
            non_null_indices = np.argwhere(~pd.isnull(tup).values).reshape(
                -1, )
            if len(non_null_indices) < num_filters:
                continue
            print('{} filters out of {} content cols'.format(
                num_filters, ncols))

            # Place {'<=', '>=', '='} on numericals and '=' on categoricals.
            idxs = rng.choice(non_null_indices,
                              replace=False,
                              size=num_filters)
            vals = tup[idxs].values
            cols = np.take(content_cols, idxs)
            ops = rng.choice(['<=', '>=', '='], size=num_filters)
            sensible_to_do_range = [c in numericals for c in cols]
            ops = np.where(sensible_to_do_range, ops, '=')

            print('cols', cols, 'ops', ops, 'vals', vals)

            queries.append((cols, ops, vals))
            filter_strings.append(','.join([
                ','.join((c, o, str(v))) for c, o, v in zip(cols, ops, vals)
            ]))

            # Quote string literals & leave other literals alone.
            filter_clauses = '\n AND '.join([
                '{} {} {}'.format(col, op, val) if concat_table[col].data.dtype
                in [np.int64, np.float64] else '{} {} \'{}\''.format(
                    col, op, val) for col, op, val in zip(cols, ops, vals)
            ])

            sql = template_for_execution.render(table_names=table_names,
                                                join_clauses=join_clauses,
                                                filter_clauses=filter_clauses)
            sql_queries.append(sql)

            if len(queries) >= num_queries:
                break

    true_cards = []

    for i, sql_query in enumerate(sql_queries):
        DropBufferCache()
        spark.catalog.clearCache()

        print('  Query',
              i,
              'out of',
              len(sql_queries),
              '[{}]'.format(filter_strings[i]),
              end='')

        t1 = time.time()

        true_card = ExecuteSql(spark, sql_query)[0][0]

        # cursor.execute(sql_query)
        # result = cursor.fetchall()
        # true_card = result[0][0]

        dur = time.time() - t1

        true_cards.append(true_card)
        print(
            '...done: {} (inner join sel {}; full sel {}; inner join {}); dur {:.1f}s'
            .format(true_card, true_card / true_inner_join_card,
                    true_card / true_full_join_card, true_inner_join_card,
                    dur))

        # if i > 0 and i % 1 == 0:
        #     spark = StartSpark(spark)

    df = pd.DataFrame({
        'tables': [','.join(table_names)] * len(true_cards),
        'join_conds':
        [','.join(map(lambda s: s.replace(' ', ''), join_clauses_list))] *
        len(true_cards),
        'filters':
        filter_strings,
        'true_cards':
        true_cards,
    })
    df.to_csv(FLAGS.output_csv, sep='#', mode='a', index=False, header=False)
    print('Template done.')
    return queries, true_cards
示例#7
0
def main(argv):
    del argv  # Unused.

    # conn = pg.connect(FLAGS.db)
    # conn.set_session(autocommit=True)
    # cursor = conn.cursor()
    cursor = None

    tables = datasets.LoadImdb(use_cols=None)

    # Load all templates in original JOB-light.
    queries = utils.JobToQuery(FLAGS.tds_light_csv, use_alias_keys=False)
    tables_to_join_keys = {}
    for query in queries:
        key = MakeTablesKey(query[0])
        if key not in tables_to_join_keys:
            join_dict = query[1]
            # Disambiguate: title->id changed to title->title.id.
            for table_name in join_dict.keys():
                # TODO: only support a single join key
                join_key = next(iter(join_dict[table_name]))
                join_dict[table_name] = common.JoinTableAndColumnNames(
                    table_name, join_key, sep='.')
            tables_to_join_keys[key] = join_dict

    num_templates = len(tables_to_join_keys)
    num_queries_per_template = FLAGS.num_queries // num_templates
    logging.info('%d join templates', num_templates)

    rng = np.random.RandomState(1234)
    queries = []  # [(cols, ops, vals)]

    # Disambiguate to not prune away stuff during join sampling.
    for table_name, table in tables.items():
        for col in table.columns:
            col.name = common.JoinTableAndColumnNames(table.name,
                                                      col.name,
                                                      sep='.')
        table.data.columns = [col.name for col in table.columns]

    if FLAGS.print_sel:
        # Print true selectivities.
        df = pd.read_csv(FLAGS.output_csv, sep='#', header=None)
        assert len(df) == FLAGS.num_queries, (len(df), FLAGS.num_queries)

        inner = []
        true_inner_card_cache = {}

        for row in df.iterrows():
            vs = row[1]
            table_names, join_clauses, true_card = vs[0], vs[1], vs[3]
            table_names = table_names.split(',')
            print('Template: {}\tTrue card: {}'.format(table_names, true_card))

            # JOB-light: contains 'full_name alias'.
            # JOB-light-ranges: just 'full_name'.
            if ' ' in table_names[0]:
                table_names = [n.split(' ')[0] for n in table_names]

            tables_in_templates = [tables[n] for n in table_names]
            key = MakeTablesKey(table_names)
            join_keys_list = tables_to_join_keys[key]

            if key not in true_inner_card_cache:
                join_spec = join_utils.get_join_spec({
                    "join_tables":
                    table_names,
                    "join_keys":
                    dict(
                        zip(table_names,
                            [[k.split(".")[1]] for k in join_keys_list])),
                    "join_root":
                    "item",
                    "join_how":
                    "inner",
                })
                ds = FactorizedSamplerIterDataset(
                    tables_in_templates,
                    join_spec,
                    sample_batch_size=num_queries,
                    disambiguate_column_names=False,
                    add_full_join_indicators=False,
                    add_full_join_fanouts=False)
                true_inner_card_cache[key] = ds.sampler.join_card
            inner.append(true_inner_card_cache[key])

        pd.DataFrame({
            'true_cards': df[3],
            'true_inner': inner,
            'inner_sel': df[3] * 1.0 / inner,
            'outer_sel': df[3] * 1.0 / TDS_LIGHT_OUTER_CARDINALITY
        }).to_csv(FLAGS.output_csv + '.sel', index=False)
        print('Done:', FLAGS.output_csv + '.sel')

    else:
        # Generate queries.
        last_run_queries = file_len(FLAGS.output_csv) if os.path.exists(
            FLAGS.output_csv) else 0
        next_template_idx = last_run_queries // num_queries_per_template
        print('next_template_idx', next_template_idx)
        print(tables_to_join_keys.items())

        spark = StartSpark()
        for i, (tables_to_join,
                join_keys) in enumerate(tables_to_join_keys.items()):

            if i < next_template_idx:
                print('Skipping template:', tables_to_join)
                continue
            print('Template:', tables_to_join)

            if i == num_templates - 1:
                num_queries_per_template += FLAGS.num_queries % num_templates

            # Generate num_queries_per_template.
            table_names = tables_to_join.split('-')

            tables_in_templates = [tables[n] for n in table_names]

            queries.extend(
                MakeQueries(spark, cursor, num_queries_per_template,
                            tables_in_templates, table_names, join_keys, rng))
示例#8
0
def main():
    table2alias = {'title': 't', 'cast_info': 'ci', 'movie_companies': 'mc', 'movie_info': 'mi',
                   'movie_info_idx': 'mi_idx', 'movie_keyword': 'mk'}
    join_tables = ['title', 'cast_info', 'movie_companies', 'movie_info', 'movie_info_idx', 'movie_keyword']
    join_keys = {'title': ['id'], 'cast_info': ['movie_id'], 'movie_companies': ['movie_id'],
                 'movie_info': ['movie_id'], 'movie_info_idx': ['movie_id'], 'movie_keyword': ['movie_id']}
    join_clauses = {'cast_info': 'title.id=cast_info.movie_id', 'movie_companies': 'title.id=movie_companies.movie_id',
                    'movie_info': 'title.id=movie_info.movie_id', 'movie_info_idx': 'title.id=movie_info_idx.movie_id',
                    'movie_keyword': 'title.id=movie_keyword.movie_id'}
    all_cols = {
        'title': [
            'kind_id', 'production_year', 'episode_nr', 'imdb_index', 'phonetic_code', 'season_nr', 'series_years'
        ],
        'cast_info': [
            'nr_order', 'role_id'
        ],
        'movie_companies': [
            'company_type_id'
        ],
        'movie_info_idx': ['info_type_id'],
        'movie_info': ['info_type_id'],
        'movie_keyword': ['keyword_id']
    }

    tables = ['cast_info', 'movie_companies', 'movie_info', 'movie_info_idx', 'movie_keyword']
    for num in range(1, 6):
        for p in combinations(tables, num):
            config = JOB_MY
            config['join_clauses'] = []
            p = [x for x in p]
            for t in p:
                config['join_clauses'].append(join_clauses[t])
            p = p + ['title']
            key = '_'.join(sorted([table2alias[x] for x in p]))
            config['join_tables'] = p
            config['join_keys'] = {}
            for t in p:
                config['join_keys'][t] = join_keys[t]
            col_num = 0
            for t in p:
                col_num += len(all_cols[t])
            join_spec = join_utils.get_join_spec(config)
            prepare_utils.prepare(join_spec)
            loaded_tables = []
            for t in join_spec.join_tables:
                print('Loading', t)
                table = datasets.LoadImdb(t, use_cols=config["use_cols"])
                table.data.info()
                loaded_tables.append(table)

            t_start = time.time()
            join_iter_dataset = FactorizedSamplerIterDataset(
                loaded_tables,
                join_spec,
                sample_batch_size=1000 * 100,
                disambiguate_column_names=True)

            table = common.ConcatTables(loaded_tables,
                                        join_spec.join_keys,
                                        sample_from_join_dataset=join_iter_dataset)

            join_iter_dataset = common.FactorizedSampleFromJoinIterDataset(
                join_iter_dataset,
                base_table=table,
                factorize_blacklist=[],
                word_size_bits=10,
                factorize_fanouts=True)
            t_end = time.time()
            log.info(f"> Initialization took {t_end - t_start} seconds.")
            print(join_iter_dataset.join_iter_dataset.combined_columns)
            samples = []
            for i in tqdm(range(1000000)):
                samples.append(next(join_iter_dataset.join_iter_dataset))
            df = pd.DataFrame(data=pd.concat(samples, axis=1)).T.iloc[:, :col_num]
            df.to_csv('../train-test-data/join_samples/{}.csv'.format(key), index=False)
            # join_iter_dataset.join_iter_dataset._sample_batch()
            print('-' * 60)
            print("Done {}".format(key))