def test_read(self):
        with patch('os.path.exists', new=MagicMock(return_value=False)):
            assert JSONFile.read(self.filename) is None

        with patch('os.path.exists', new=MagicMock(return_value=True)):
            with patch('__builtin__.open', mock_open(read_data='[]')):
                assert JSONFile.read(self.filename) == []
Esempio n. 2
0
    def top_users_papers_selecting_categories(cls,
                                              year,
                                              month,
                                              top_categories=10,
                                              different_papers=20):
        """
        Selects only users and papers in the top_categories based on the data generated by the
        previous function.
        :param year:
        :param month:
        :param top_categories:
        :param different_papers
        :return: [ {'paper': (int) number of times seen},
            {'user': {'stats': ((int) # entries, (int) # different papers in the top_n papers),
                      'diff_papers': [str]
                }
            }
        ]
        """
        categories = JSONFile.read(
            cls._name_file_categories(year=year, month=month))
        papers_cat = pd.DataFrame.from_records([categories]).transpose()

        users_cg, user_cat = cls.assign_categories_to_users(year, month)
        user_cat = pd.DataFrame.from_records([user_cat]).transpose()

        pap_1 = set(user_cat[0].sort_values().index.values[-top_categories:])
        pap_2 = set(papers_cat[0].value_counts().sort_values().index.
                    values[-top_categories:])

        top_cat = pap_1.intersection(pap_2)

        full_data = JSONFile.read(cls._name_file_final(year=year, month=month))

        papers_or = full_data[0]
        papers_new = {}
        for paper in papers_or:
            cat = categories[paper]
            if cat in top_cat:
                papers_new[paper] = papers_or[paper]

        users_new = {}
        for user in full_data[1]:
            paper_user = []
            for paper in full_data[1][user]['diff_papers']:
                cat = categories[paper]
                if cat in top_cat:
                    paper_user.append(paper)
            if len(paper_user) > different_papers:
                users_new[user] = full_data[1][user]
                users_new[user]['diff_papers'] = paper_user

        file_name = cls._name_file_final_categ(year=year, month=month)
        JSONFile.write([papers_new, users_new], file_name)

        logger.info('Number of papers is %d' % len(papers_new))
        logger.info('Number of users is %d' % len(users_new))

        return [papers_new, users_new]
Esempio n. 3
0
 def plot_histograms_papers_categories(cls, year, month):
     data = JSONFile.read(cls._name_file_categories(year=year, month=month))
     df = pd.DataFrame.from_records([data])
     df = df.transpose()
     hist = df[0].value_counts().plot(kind='bar')
     fig = hist.get_figure()
     hist_file = cls._histogram_papers(year=year, month=month)
     fig.savefig(hist_file)
Esempio n. 4
0
    def set_data_from_file(self):
        data = JSONFile.read(self.file_path)

        if data is None:
            return

        self.evaluated_points = data['evaluated_points']
        self.objective_values = data['objective_values']
        self.model_objective_values = data['model_objective_values']
        self.standard_deviation_evaluations = data[
            'standard_deviation_evaluations']
    def assign_categories_date_year(cls, year, month):
        """
        :param year: (str)
        :param month: (str) e.g. '1', '12'
        :return:
        """

        file_name = cls._name_file_final(year=year, month=month)
        data = JSONFile.read(file_name)
        papers = data[0].keys()
        papers = cls.assign_categories(papers, year, month)
        return papers
Esempio n. 6
0
    def get_training_data(cls, year, month, random_seed=1):
        """
        Creates a file with the training data:
            [[user_id, paper_id, rating]], where rating is 1 if the paper wasn't seen by the user,
            or 2 otherwise.

        :param year: str
        :param month: str (e.g. '1', '12')
        :param random_seed: int

        """
        random.seed(random_seed)
        file_name = cls._name_file_final_categ(year=year, month=month)
        data = JSONFile.read(file_name)

        papers = data[0].keys()

        users_data = data[1]
        users = users_data.keys()

        training_data = []

        key_paper = {}
        for i, paper in enumerate(papers):
            key_paper[paper] = i + 1

        for i, user in enumerate(users):
            for paper in users_data[user]['diff_papers']:
                training_data.append([i + 1, key_paper[paper], 2])

            other_papers = list(
                set(papers) - set(users_data[user]['diff_papers']))
            index_papers = range(len(other_papers))
            random.shuffle(index_papers)
            seen_papers = len(set(users_data[user]['diff_papers']))

            dislike_papers = np.random.randint(
                int(0.5 * seen_papers),
                min(int(1.8 * seen_papers), len(index_papers)), 1)

            index = dislike_papers[0]

            keep_index_papers = index_papers[0:index]
            for index in keep_index_papers:
                training_data.append(
                    [i + 1, key_paper[other_papers[index]], 1])

        file_name = cls._name_training_data(year=year, month=month)

        logger.info('There are %d training points' % len(training_data))
        JSONFile.write(training_data, file_name)
Esempio n. 7
0
    def assign_categories_to_users(cls, year, month):
        file_name = cls._name_file_final(year=year, month=month)
        full_data = JSONFile.read(file_name)
        users = full_data[1]

        paper_cat = JSONFile.read(
            cls._name_file_categories(year=year, month=month))

        users_cg = {}
        for user in users:
            diff_papers = users[user]['diff_papers']
            papers_cat = []
            for paper in diff_papers:
                papers_cat.append(paper_cat[paper])
            users_cg[user] = papers_cat

        JSONFile.write(users_cg,
                       cls._name_file_categories_users(year=year, month=month))

        user_cat = {}
        for user in users_cg:
            papers = users_cg[user]
            cat_us = {}
            for cat in papers:
                if cat not in cat_us:
                    cat_us[cat] = 0
                cat_us[cat] += 1
            for cat in cat_us:
                if cat_us[cat] >= 0.10 * len(papers):
                    if cat not in user_cat:
                        user_cat[cat] = 0
                    user_cat[cat] += 1
        JSONFile.write(
            user_cat,
            cls._name_file_categories_users_hist(year=year, month=month))
        return users_cg, user_cat
    def get_points_domain(cls,
                          n_training,
                          bounds_domain,
                          random_seed,
                          training_name,
                          problem_name,
                          type_bounds=None,
                          simplex_domain=None):
        """
        Get random points in the domain.

        :param n_training: (int) Number of points
        :param bounds_domain: [([float, float] or [float])], the first case is when the bounds are
            lower or upper bound of the respective entry; in the second case, it's list of finite
            points representing the domain of that entry.
        :param random_seed: (int)
        :param training_name: (str), prefix used to save the training data.
        :param problem_name: str
        :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
        :return: [[float]]
        """

        file_name = cls._filename_domain(
            problem_name=problem_name,
            training_name=training_name,
            n_points=n_training,
            random_seed=random_seed,
        )

        training_dir = path.join(PROBLEM_DIR, problem_name, 'data')
        training_path = path.join(training_dir, file_name)

        points = JSONFile.read(training_path)
        if points is not None:
            return points

        points = DomainService.get_points_domain(n_training,
                                                 bounds_domain,
                                                 type_bounds=type_bounds,
                                                 random_seed=random_seed,
                                                 simplex_domain=simplex_domain)
        print(points)
        JSONFile.write(points, training_path)

        return points
Esempio n. 9
0
    def load_discretization(cls, problem_name, bounds_domain_x,
                            number_points_each_dimension_x):
        """
        Try to load discretization for problem_name from file. If the file doesn't exist, will
        generate the discretization and store it.

        :param problem_name: (str)
        :param bounds_domain_x: ([BoundsEntity])
        :param number_points_each_dimension_x: ([int])

        :return: [[float]]
        """

        bounds_str = BoundsEntity.get_bounds_as_lists(bounds_domain_x)

        filename = cls._disc_x_filename(
            name=problem_name,
            bounds=bounds_str,
            number_points_each_dimension=number_points_each_dimension_x)

        if not os.path.exists(path.join(PROBLEM_DIR, problem_name)):
            os.mkdir(path.join(PROBLEM_DIR, problem_name))

        domain_dir = path.join(PROBLEM_DIR, problem_name, DOMAIN_DIR)

        if not os.path.exists(domain_dir):
            os.mkdir(domain_dir)

        domain_path = path.join(domain_dir, filename)

        discretization_data = JSONFile.read(domain_path)
        if discretization_data is not None:
            return discretization_data

        logger.info('Gnerating discretization of domain_x')
        discretization_data = DomainEntity.discretize_domain(
            bounds_domain_x, number_points_each_dimension_x)
        logger.info('Generated discretization of domain_x')

        JSONFile.write(discretization_data, domain_path)

        return discretization_data
Esempio n. 10
0
    def cv_data_sets(cls, year, month, n_folds=5, random_seed=1):
        """
        Creates n_folds files with pairs of datasets: (training_data, validation_data).

        :param year: str
        :param month: str (e.g. '1', '12')
        :param n_folds: int
        :param random_seed: int

        """
        random.seed(random_seed)

        file_name = cls._name_training_data(year=year, month=month)
        data = JSONFile.read(file_name)

        indexes_data = range(len(data))
        random.shuffle(indexes_data)

        n_batch = len(indexes_data) / n_folds
        random_indexes = [
            indexes_data[i * n_batch:n_batch + i * n_batch]
            for i in xrange(n_folds)
        ]

        extra = 0
        for j in xrange(len(indexes_data) % n_folds):
            random_indexes[j].append(indexes_data[n_batch + extra +
                                                  (n_folds - 1) * n_batch])
            extra += 1

        file_name = cls._name_fold_indexes(year=year, month=month)
        JSONFile.write(random_indexes, file_name)

        for i in xrange(n_folds):
            validation = [data[index] for index in random_indexes[i]]

            training_indexes = []
            for j in xrange(n_folds):
                if j != i:
                    training_indexes += random_indexes[j]

            training = [data[index] for index in training_indexes]

            file_name = cls._name_fold_data_training(year=year,
                                                     month=month,
                                                     fold=i)
            JSONFile.write(training, file_name)

            file_name = cls._name_fold_data_training_matlab(year=year,
                                                            month=month,
                                                            fold=i)
            sio.savemat(file_name, {'training': training})

            file_name = cls._name_fold_data_validation(year=year,
                                                       month=month,
                                                       fold=i)
            JSONFile.write(validation, file_name)

            file_name = cls._name_fold_data_validation_matlab(year=year,
                                                              month=month,
                                                              fold=i)
            sio.savemat(file_name, {'validation': validation})
Esempio n. 11
0
    def top_users_papers(cls,
                         year,
                         month,
                         n_entries=100,
                         different_papers=20,
                         top_n=5000,
                         n_users=None,
                         only_assign_categories=True):
        """
        Returns the users that accessed to at least n_entries papers, and at least different_papers
        were different and were in the top_n papers in the month of the year.

        Returns the top_n papers based on how many times they were seen.

        :param year: (str)
        :param month: (str) e.g. '1', '12'
        :param n_entries: (int)
        :param different_papers: int
        :param top_n: int
        :param n_users: (int) Maximum number of users allowed
        :return: [ {'paper': (int) number of times seen},
            {'user': {'stats': ((int) # entries, (int) # different papers in the top_n papers),
                      'diff_papers': [str]
                }
            }
        ]
        """

        file_name = cls._name_file_(year=year, month=month)
        data = JSONFile.read(file_name)

        users = data[0]
        papers = data[1]

        n_papers = []
        paper_ls = []
        for paper in papers:
            paper_ls.append(paper)
            n_papers.append(papers[paper]['views'])
        index_top_papers = sorted(range(len(n_papers)),
                                  key=lambda k: n_papers[k])
        index_top_papers = index_top_papers[-top_n:]

        rank_papers = {}
        for index in index_top_papers:
            rank_papers[paper_ls[index]] = n_papers[index]

        paper_ls = rank_papers.keys()

        cls.assign_categories(paper_ls)

        if only_assign_categories:
            return

        rank_user = {}

        users_ls = []
        n_entries_ls = []

        for user in users:
            users_ls.append(user)
            n_entries_ls.append(sum(users[user].values()))

        index_top_users = sorted(range(len(n_entries_ls)),
                                 key=lambda k: n_entries_ls[k])
        users_ls = [users_ls[i] for i in index_top_users]
        n_entries_ls = [n_entries_ls[i] for i in index_top_users]
        ind_bis = bisect_left(n_entries_ls, n_entries)

        users_ls = users_ls[ind_bis:]
        n_entries_ls = n_entries_ls[ind_bis:]

        final_users = []
        metric_users = []
        for user, n in zip(users_ls, n_entries_ls):
            diff_papers = set(users[user].keys()).intersection(set(paper_ls))
            n_diff = len(diff_papers)
            if n_diff < different_papers:
                continue
            final_users.append(user)
            metric_users.append(n_diff)
            rank_user[user] = {
                'stats': (n, n_diff),
                'diff_papers': diff_papers
            }

        index_top_users = sorted(range(len(final_users)),
                                 key=lambda k: metric_users[k])

        if n_users is not None and len(index_top_users) > n_users:
            index_top_users = index_top_users[-n_users:]

            rank_user_final = {}
            for ind in index_top_users:
                rank_user_final[final_users[ind]] = rank_user[final_users[ind]]
            rank_user = rank_user_final

        file_name = cls._name_file_final(year=year, month=month)
        JSONFile.write([rank_papers, rank_user], file_name)

        logger.info('Number of papers is %d' % len(rank_papers))
        logger.info('Number of users is %d' % len(rank_user))

        return [rank_papers, rank_user]
Esempio n. 12
0
from __future__ import absolute_import

from stratified_bayesian_optimization.util.json_file import JSONFile
import numpy as np

read = JSONFile.read(
    "problems/test_simulated_gp/simulated_function_with_1000_5")
points = read['points']
function = read['function']


def find_point_in_domain(x, array=np.array(points)):
    """
    Find the index of the closest point in array to x
    :param x: float
    :param array: np.array(float)
    :return: int
    """
    idx = np.abs(array - x).argmin()
    return idx


def toy_example(x):
    """

    :param x: [float, int]
    :return: [float]
    """
    id = find_point_in_domain(x[0])

    return [function[str(int(x[1]))][id]]
Esempio n. 13
0
    optimal_value = float(args.optimal_value)
    only_plot = bool(int(args.only_plot))
    n_iterations_plot = int(args.n_iterations_plot)

    np.random.seed(random_seed)

    method_ = method
    if method == 'lipschitz' or method == 'approx_lipschitz':
        method_ = 'real_gradient'

    name_model = 'std_%f_rs_%d_lb_%f_ub_%f_lr_%f_%s' % (std, random_seed, lb,
                                                        ub, lr, method_)

    dir_data = 'data/multi_start/' + problem_name + '/training_results/'

    data = JSONFile.read(dir_data + name_model)

    name_model = 'std_%f_rs_%d_lb_%f_ub_%f_lr_%f_%s' % (std, random_seed, lb,
                                                        ub, lr, method)

    if method == 'real_gradient':
        data['gradients'] = [-1.0 * np.array(t) for t in data['gradients']]
    elif method == 'grad_epoch':
        new_grads = {}
        for t in data['gradients']:
            new_grads[int(t)] = -1.0 * np.array(data['gradients'][t])
        data['gradients'] = new_grads

    data['values'] = [-1.0 * np.array(t) for t in data['values']]

    def get_values(i):
def create_model(args, n_training=3, n_epochs=100, burning=True, point=None):

    rs = int(args['rs'])
    lb = [float(t) for t in args['lb'] ]
    ub = [float(t) for t in args['ub']]
    std = float(args['std'])
    lr = float(args['lr'])
    method = args['method']
    problem_name = args['problem_name']

    #TODO: ADD THIS AS A PARAMETER
    lipschitz_cte = 2.0

    method_ = method
    if method == 'lipschitz' or method == 'approx_lipschitz':
        method_ = 'real_gradient'

    if point is None:
        name_model = 'std_%f_rs_%d_lb_%f_ub_%f_lr_%f_%s' % (std, rs, lb[0], ub[0], lr, method_)
    else:
        name_model = 'std_%f_rs_%d_lb_%f_ub_%f_lr_%f_%s_point_%d' % (std, rs, lb[0], ub[0], lr, method_, point)
    dir_data = 'data/multi_start/' + problem_name + '/' + 'training_results/'

    data = JSONFile.read(dir_data + name_model)

    if point is None:
        name_model = 'std_%f_rs_%d_lb_%f_ub_%f_lr_%f_%s' % (std, rs, lb[0], ub[0], lr, method)
    else:
        name_model = 'std_%f_rs_%d_lb_%f_ub_%f_lr_%f_%s_point_%d' % (std, rs, lb[0], ub[0], lr, method, point)

    if method == 'real_gradient':
        data['gradients'] = [-1.0 * np.array(t) for t in data['gradients']]
    elif method == 'grad_epoch':
        new_grads = {}
        for t in data['gradients']:
            new_grads[int(t)] = -1.0 * np.array(data['gradients'][t])
        data['gradients'] = new_grads

    data['stochastic_gradients'] = [-1.0 * np.array(t) for t in data['stochastic_gradients']]

    data['values'] = [-1.0 * np.array(t) for t in data['values']]
    data['points'] = [np.array(t) for t in data['points']]



    training_data = {'points': data['points'][0:n_training],
                     'values': data['values'][0:n_training], 'gradients': [],
                     'stochastic_gradients':data['stochastic_gradients'][0:n_training] }
    if method == 'real_gradient':
        training_data['gradients'] = data['gradients'][0:n_training]
    elif method == 'grad_epoch':
        training_data['gradients'] = {}
        for j in range(n_training):
            if j in data['gradients']:
                training_data['gradients'][j] = data['gradients'][j]

    points_domain = data['points'][0: n_training]
    best_results = np.max(training_data['values'])
    functions_get_value = get_values
    kwargs = {'data': data, 'method': method}

    n_burning = 50


    n_batches = 1
    total_iterations = n_epochs * n_batches

    if method == 'approx_lipschitz' or method == 'lipschitz':
        if method == 'approx_lipschitz':
            lipschitz_cte = None
        model = StatModelLipschitz(
            training_data, best_results, n_training, functions_get_value,
            points_domain[-1], 0,
            n_training, specifications=name_model,problem_name=problem_name,
            max_iterations=total_iterations, parametric_mean=False, lower=None, upper=None,
            n_burning=n_burning, total_batches=n_batches, type_model=method, lipschitz=lipschitz_cte,
            n_thinning=10, kwargs_get_value_next_iteration=kwargs, burning=burning)
    else:
        model = StatModel(
            training_data, best_results, n_training, functions_get_value,
            points_domain[-1], 0,
            n_training, specifications=name_model, problem_name=problem_name,
            max_iterations=total_iterations, parametric_mean=False, lower=None, upper=None,
            n_burning=n_burning, total_batches=n_batches,model_gradient=method,
            n_thinning=10, kwargs_get_value_next_iteration=kwargs, burning=burning)
    return model
    def collect_multi_spec_results(cls,
                                   multiple_spec,
                                   total_iterations=None,
                                   sign=True,
                                   sqr=False,
                                   same_random_seeds=False,
                                   rs_lw=0,
                                   rs_up=None):
        """
        Writes the files with the aggregated results
        :param multiple_spec:
        :param total_iterations: (int) Collect results until this iteration
        :param sign: (boolean) If true, we multiply the results by -1
        :param sqr: (boolean) If true, we take the square root of the results
        :param same_random_seeds: (boolean) If true, we use the same random seeds for both problems
        :return:
        """

        if total_iterations is None:
            total_iterations = 10000

        n_specs = len(multiple_spec.get('random_seeds'))

        results_dict = {}

        if sign:
            sign = -1.0
        else:
            sign = 1.0

        if sqr:
            f = lambda x: x**0.5
        else:
            f = lambda x: x

        if rs_up is not None:
            same_random_seeds = True

        if same_random_seeds:
            random_seeds = {}
            for method in set(multiple_spec.get('method_optimizations')):
                random_seeds[method] = []
            for i in xrange(n_specs):
                problem_name = multiple_spec.get('problem_names')[i]
                dir = path.join(PROBLEM_DIR, problem_name, PARTIAL_RESULTS)

                if not os.path.exists(dir):
                    continue

                training_name = multiple_spec.get('training_names')[i]
                n_training = multiple_spec.get('n_trainings')[i]
                random_seed = multiple_spec.get('random_seeds')[i]
                method = multiple_spec.get('method_optimizations')[i]
                n_samples_parameters = multiple_spec.get(
                    'n_samples_parameterss')[i]
                n_iterations = multiple_spec.get('n_iterationss')[i]

                file_name = cls._filename_results(
                    problem_name=problem_name,
                    training_name=training_name,
                    n_points=n_training,
                    random_seed=random_seed,
                    method=method,
                    n_samples_parameters=n_samples_parameters,
                )

                file_path = path.join(dir, file_name)
                if not os.path.exists(file_path):
                    continue
                random_seeds[method].append(random_seed)

            methods = list(set(multiple_spec.get('method_optimizations')))
            random_seeds_check = set(random_seeds[methods[0]])
            for i in xrange(1, len(methods)):
                random_seeds_check = random_seeds_check.intersection(
                    random_seeds[methods[i]])

            if rs_up is not None:
                random_seeds_check = random_seeds_check.intersection(
                    range(rs_lw, rs_up))

        for i in xrange(n_specs):
            problem_name = multiple_spec.get('problem_names')[i]
            dir = path.join(PROBLEM_DIR, problem_name, PARTIAL_RESULTS)

            if not os.path.exists(dir):
                continue

            training_name = multiple_spec.get('training_names')[i]
            n_training = multiple_spec.get('n_trainings')[i]
            random_seed = multiple_spec.get('random_seeds')[i]
            method = multiple_spec.get('method_optimizations')[i]
            n_samples_parameters = multiple_spec.get(
                'n_samples_parameterss')[i]
            n_iterations = multiple_spec.get('n_iterationss')[i]

            if same_random_seeds and random_seed not in random_seeds_check:
                continue

            file_name = cls._filename_results(
                problem_name=problem_name,
                training_name=training_name,
                n_points=n_training,
                random_seed=random_seed,
                method=method,
                n_samples_parameters=n_samples_parameters,
            )

            file_path = path.join(dir, file_name)

            if not os.path.exists(file_path):
                continue

            results = JSONFile.read(file_path)
            results = results['objective_values']

            key_dict = (problem_name, training_name, n_training, method)
            if key_dict not in results_dict:
                results_dict[key_dict] = \
                    [[] for _ in range(min(n_iterations + 1, total_iterations))]

            for iteration in range(
                    min(total_iterations, n_iterations + 1, len(results))):
                results_dict[key_dict][iteration].append(
                    f(sign * results[iteration]))

        problem_names = list(set(multiple_spec.get('problem_names')))
        training_names = set(multiple_spec.get('training_names'))
        n_trainings = set(multiple_spec.get('n_trainings'))
        methods = set(multiple_spec.get('method_optimizations'))

        aggregated_results = {}

        for problem in problem_names:
            for training in training_names:
                for n_training in n_trainings:
                    for method in methods:

                        key = (problem, training, n_training, method)
                        aggregated_results[key] = {}

                        if key not in results_dict:
                            continue

                        results = results_dict[key]

                        for iteration in xrange(
                                min(len(results), total_iterations)):
                            if len(results[iteration]) > 0:
                                values = results[iteration]
                                mean = np.mean(values)
                                std = np.std(values)
                                n_samples = len(results[iteration])
                                ci_low = mean - 1.96 * std / np.sqrt(n_samples)
                                ci_up = mean + 1.96 * std / np.sqrt(n_samples)

                                aggregated_results[key][iteration] = {}
                                aggregated_results[key][iteration][
                                    'mean'] = mean
                                aggregated_results[key][iteration]['std'] = std
                                aggregated_results[key][iteration][
                                    'n_samples'] = n_samples
                                aggregated_results[key][iteration][
                                    'ci_low'] = ci_low
                                aggregated_results[key][iteration][
                                    'ci_up'] = ci_up
                            else:
                                break

                        if len(aggregated_results[key]) > 0:
                            dir = path.join(PROBLEM_DIR, problem,
                                            AGGREGATED_RESULTS)

                            if not os.path.exists(dir):
                                os.mkdir(dir)

                            file_name = cls._aggregated_results(
                                problem_name=problem,
                                training_name=training,
                                n_points=n_training,
                                method=method,
                            )

                            file_path = path.join(dir, file_name)
                            JSONFile.write(aggregated_results[key], file_path)
Esempio n. 16
0
    def get_gp(cls,
               name_model,
               problem_name,
               type_kernel,
               dimensions,
               bounds_domain,
               type_bounds=None,
               n_training=0,
               noise=False,
               training_data=None,
               points=None,
               training_name=None,
               mle=True,
               thinning=0,
               n_burning=0,
               max_steps_out=1,
               n_samples=None,
               random_seed=DEFAULT_RANDOM_SEED,
               kernel_values=None,
               mean_value=None,
               var_noise_value=None,
               cache=True,
               same_correlation=False,
               use_only_training_points=True,
               optimization_method=None,
               n_samples_parameters=0,
               parallel_training=True,
               simplex_domain=None,
               objective_function=None,
               define_samplers=True):
        """
        Fetch a GP model from file if it exists, otherwise train a new model and save it locally.

        :param name_model: str
        :param problem_name: str
        :param type_kernel: [(str)] Must be in possible_kernels. If it's a product of kernels it
            should be a list as: [PRODUCT_KERNELS_SEPARABLE, NAME_1_KERNEL, NAME_2_KERNEL]
        :param dimensions: [int]. It has only the n_tasks for the task_kernels, and for the
            PRODUCT_KERNELS_SEPARABLE contains the dimensions of every kernel in the product
        :param bounds_domain: [([float, float] or [float])], the first case is when the bounds are
            lower or upper bound of the respective entry; in the second case, it's list of finite
            points representing the domain of that entry.
        :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
        :param n_training: int
        :param noise: (boolean) If true, we get noisy evaluations.
        :param training_data: {'points': [[float]], 'evaluations': [float],
            'var_noise': [float] or None}
        :param points: [[float]]. If training_data is None, we can evaluate the objective
            function in these points.
        :param training_name: (str), prefix used to save the training data.
        :param mle: (boolean) If true, fits the GP by MLE.
        :param thinning: (int)
        :param n_burning: (int) Number of burnings samples for the MCMC.
        :param max_steps_out: (int)  Maximum number of steps out for the stepping out  or
                doubling procedure in slice sampling.
        :param n_samples: (int) If the objective is noisy, we take n_samples of the function to
            estimate its value.
        :param random_seed: (int)
        :param kernel_values: [float], contains the default values of the parameters of the kernel
        :param mean_value: [float], It contains the value of the mean parameter.
        :param var_noise_value: [float], It contains the variance of the noise of the model
        :param cache: (boolean) Try to get model from cache
        :param same_correlation: (boolean) If true, it uses the same correlations for the task
            kernel.
        :param use_only_training_points (boolean) If the model is read, and the param is true,
            it uses only the training points in data. Otherwise, it also includes new points
            previously computed.
        :param optimization_method: (str)
        :param n_samples_parameters: (int)
        :param parallel_training: (boolean)
        :param define_samplers: (boolean) If False, samplers for the hyperparameters are not
            defined.

        :return: (GPFittingGaussian) - An instance of GPFittingGaussian
        """
        model_type = cls._model_map[name_model]

        if training_name is None:
            training_name = 'default_training_data_%d_points_rs_%d' % (
                n_training, random_seed)

        if use_only_training_points:
            f_name = cls._get_filename(model_type, problem_name, type_kernel,
                                       training_name)
            f_name_cache = cls._get_filename_modified(model_type, problem_name,
                                                      type_kernel,
                                                      training_name,
                                                      optimization_method,
                                                      n_samples_parameters)
        else:
            f_name = cls._get_filename_modified(model_type, problem_name,
                                                type_kernel, training_name,
                                                optimization_method,
                                                n_samples_parameters)

        if not os.path.exists('data'):
            os.mkdir('data')

        if not os.path.exists(GP_DIR):
            os.mkdir(GP_DIR)

        gp_dir = path.join(GP_DIR, problem_name)

        if not os.path.exists(gp_dir):
            os.mkdir(gp_dir)

        gp_path = path.join(gp_dir, f_name)

        gp_path_cache = path.join(gp_dir, f_name_cache)

        if cache:
            data = JSONFile.read(gp_path)
            data = None
        else:
            data = None

        if data is not None:
            return model_type.deserialize(
                data, use_only_training_points=use_only_training_points)

        if training_data is None or training_data == {}:
            training_data = TrainingDataService.get_training_data(
                problem_name,
                training_name,
                bounds_domain,
                n_training=n_training,
                points=points,
                noise=noise,
                n_samples=n_samples,
                random_seed=random_seed,
                type_bounds=type_bounds,
                cache=cache,
                parallel=parallel_training,
                gp_path_cache=gp_path_cache,
                simplex_domain=simplex_domain,
                objective_function=objective_function)

        logger.info("Training %s" % model_type.__name__)

        gp_model = model_type.train(type_kernel,
                                    dimensions,
                                    mle,
                                    training_data,
                                    bounds_domain,
                                    thinning=thinning,
                                    n_burning=n_burning,
                                    max_steps_out=max_steps_out,
                                    random_seed=random_seed,
                                    type_bounds=type_bounds,
                                    training_name=training_name,
                                    problem_name=problem_name,
                                    kernel_values=kernel_values,
                                    mean_value=mean_value,
                                    var_noise_value=var_noise_value,
                                    same_correlation=same_correlation,
                                    simplex_domain=simplex_domain,
                                    define_samplers=define_samplers)

        JSONFile.write(gp_model.serialize(), gp_path)

        return gp_model
    # parser.add_argument('method_2', help='real_gradient')

    args_ = parser.parse_args()
    n_iterations = int(args_.n_iterations)
    method = args_.method
    problem = args_.problem_name
    animation = bool(int(args_.animation))

    file_1 = 'data/multi_start/' + problem + '/' + 'greedy_policy/' + method + '.json'
    file_2 = 'data/multi_start/' + problem + '/' + 'uniform_policy/' + method + '.json'

    n_restarts = int(args_.n_starting_points)
    n_training = 3
    n = n_iterations

    data = JSONFile.read(file_1)
    data_2 = JSONFile.read(file_2)

    data_list = [data, data_2]

    best_values = {}
    data_dict = {}
    type_1 = 'greedy'  #+ method
    types = [type_1, 'equal_allocation']

    data_dict[type_1] = data
    data_dict['equal_allocation'] = data_2

    for t in types:
        best_values[t] = get_best_values(data_dict[t], n_restarts, n_training)
# num_user = 4815
num_item = 2018
num_user = 2752
# there are 263238 observations
total_obs = 263238
num_batches = int((n_folds - 1) * (float(total_obs) / float(n_folds)) / 500.0)

# num_item = 326
# num_user = 507
# there are 90271 observations

train=[]
validate=[]

file_name = TrainingData._name_fold_indexes(year=year, month=month)
random_indexes = JSONFile.read(file_name)

# file_name = TrainingData._name_training_data(year=year, month=month)
# training_data = JSONFile.read(file_name)

for i in range(n_folds):
    file_name = TrainingData._name_fold_data_training(year=year, month=month, fold=i)
    training = JSONFile.read(file_name)
    train.append(np.array(training))

    file_name = TrainingData._name_fold_data_validation(year=year, month=month, fold=i)
    validation = JSONFile.read(file_name)
    validate.append(np.array(validation))

def toy_example(x):
    """
    def get_training_data(cls,
                          problem_name,
                          training_name,
                          bounds_domain,
                          n_training=5,
                          points=None,
                          noise=False,
                          n_samples=None,
                          random_seed=DEFAULT_RANDOM_SEED,
                          parallel=True,
                          type_bounds=None,
                          cache=True,
                          gp_path_cache=None,
                          simplex_domain=None,
                          objective_function=None):
        """

        :param problem_name: str
        :param training_name: (str), prefix used to save the training data.
        :param bounds_domain: [([float, float] or [float])], the first case is when the bounds are
            lower or upper bound of the respective entry; in the second case, it's list of finite
            points representing the domain of that entry.
        :param n_training: (int), number of training points if points is None
        :param points: [[float]]
        :param noise: boolean, true if the evaluations are noisy
        :param n_samples: int. If noise is true, we take n_samples of the function to estimate its
            value.
        :param random_seed: int
        :param parallel: (boolean) Train in parallel if it's True.
        :param type_bounds: [0 or 1], 0 if the bounds are lower or upper bound of the respective
            entry, 1 if the bounds are all the finite options for that entry.
        :param cache: (boolean) Try to get model from cache
        :return: {'points': [[float]], 'evaluations': [float], 'var_noise': [float] or []}
        """

        if cache and gp_path_cache is not None:
            data = JSONFile.read(gp_path_cache)
            if data is not None:
                return data['data']

        logger.info("Getting training data")

        rs = random_seed
        if points is not None and len(points) > 0:
            n_training = len(points)
            rs = 0

        file_name = cls._filename(
            problem_name=problem_name,
            training_name=training_name,
            n_points=n_training,
            random_seed=rs,
        )

        if not os.path.exists(PROBLEM_DIR):
            os.mkdir(PROBLEM_DIR)

        training_dir = path.join(PROBLEM_DIR, problem_name, 'data')

        if not os.path.exists(path.join(PROBLEM_DIR, problem_name)):
            os.mkdir(path.join(PROBLEM_DIR, problem_name))

        if not os.path.exists(training_dir):
            os.mkdir(training_dir)

        training_path = path.join(training_dir, file_name)

        if cache:
            training_data = JSONFile.read(training_path)
        else:
            training_data = None

        if training_data is not None:
            return training_data

        if n_training == 0:
            return {'points': [], 'evaluations': [], 'var_noise': []}

        np.random.seed(random_seed)

        if points is None or len(points) == 0:
            points = cls.get_points_domain(n_training,
                                           bounds_domain,
                                           random_seed,
                                           training_name,
                                           problem_name,
                                           type_bounds,
                                           simplex_domain=simplex_domain)

        if objective_function is None:
            name_module = cls.get_name_module(problem_name)
            module = __import__(name_module, globals(), locals(), -1)
        else:
            name_module = None
            module = None

        training_data = {}
        training_data['points'] = points
        training_data['evaluations'] = []
        training_data['var_noise'] = []

        if not parallel:
            for point in points:
                if noise:
                    if module is not None:
                        evaluation = cls.evaluate_function(
                            module, point, n_samples)
                    else:
                        evaluation = objective_function(point, n_samples)
                    training_data['var_noise'].append(evaluation[1])
                else:
                    if module is not None:
                        evaluation = cls.evaluate_function(module, point)
                    else:
                        evaluation = objective_function(point)
                training_data['evaluations'].append(evaluation[0])
                JSONFile.write(training_data, training_path)
            JSONFile.write(training_data, training_path)
            return training_data

        arguments = convert_list_to_dictionary(points)

        if name_module is not None:
            kwargs = {
                'name_module': name_module,
                'cls_': cls,
                'n_samples': n_samples
            }
        else:
            kwargs = {
                'name_module': None,
                'cls_': cls,
                'n_samples': n_samples,
                'objective_function': objective_function
            }

        training_points = Parallel.run_function_different_arguments_parallel(
            wrapper_evaluate_objective_function, arguments, **kwargs)

        training_points = convert_dictionary_to_list(training_points)

        training_data['evaluations'] = [value[0] for value in training_points]

        if noise:
            training_data['var_noise'] = [
                value[1] for value in training_points
            ]

        if cache:
            JSONFile.write(training_data, training_path)

        return training_data
def plot_aggregate_results(multiple_spec,
                           negative=True,
                           square=True,
                           title_plot=None,
                           y_label=None,
                           n_iterations=None,
                           repeat_ei=1):
    """

    :param multiple_spec: (multiple_spec entity) Name of the files with the aggregate results
    :return:
    """

    problem_names = list(set(multiple_spec.get('problem_names')))
    training_names = set(multiple_spec.get('training_names'))
    n_trainings = set(multiple_spec.get('n_trainings'))
    methods = set(multiple_spec.get('method_optimizations'))

    results = {}
    file_path_plot = None
    for problem in problem_names:
        dir = path.join(PROBLEM_DIR, problem, AGGREGATED_RESULTS)
        if not os.path.exists(dir):
            continue
        for training in training_names:
            for n_training in n_trainings:
                file_name = _aggregated_results_plot(
                    problem_name=problem,
                    training_name=training,
                    n_points=n_training,
                )

                if file_path_plot is None:
                    logger.info('problem is: %s' % dir)
                    file_path_plot = path.join(dir, file_name)
                for method in methods:
                    if method in results:
                        continue

                    file_name = _aggregated_results(
                        problem_name=problem,
                        training_name=training,
                        n_points=n_training,
                        method=method,
                    )

                    file_path = path.join(dir, file_name)

                    if not os.path.exists(file_path):
                        continue

                    data = JSONFile.read(file_path)

                    x_axis = list(data.keys())
                    x_axis = [int(i) for i in x_axis]
                    x_axis.sort()

                    if repeat_ei > 1 and method == EI_METHOD:
                        new_x = []
                        for i in x_axis:
                            new_x += range(i * repeat_ei, (i + 1) * repeat_ei)
                        x_axis = new_x

                    if n_iterations is not None:
                        x_axis = x_axis[0:n_iterations]

                    y_values = []
                    ci_u = []
                    ci_l = []

                    for i in x_axis:
                        if repeat_ei > 1 and method == EI_METHOD:
                            j = i / repeat_ei
                        else:
                            j = i
                        y_values.append(data[str(j)]['mean'])
                        ci_u.append(data[str(j)]['ci_up'])
                        ci_l.append(data[str(j)]['ci_low'])

                    results[method] = [x_axis, y_values, ci_u, ci_l]

    colors = ['b', 'r', 'g']

    plt.figure()

    for id, method in enumerate(results):
        label = str(method)
        if label == SBO_METHOD:
            label = 'ibo'
        x_axis = results[method][0]
        y_values = results[method][1]
        ci_u = results[method][2]
        ci_l = results[method][3]
        col = colors[id]
        plt.plot(x_axis, y_values, color=col, linewidth=2.0, label=label)
        plt.plot(x_axis, ci_u, '--', color=col, label="95% CI")
        plt.plot(x_axis, ci_l, '--', color=col)

    if title_plot is None:
        title_plot = problem_names[0]

    if y_label is None:
        y_label = 'Cross Validation Error'

    plt.xlabel('Number of Samples', fontsize=22)
    plt.ylabel(y_label, fontsize=22)
    plt.legend(loc=3, ncol=2, mode="expand", borderaxespad=0.)
    plt.title(title_plot, fontsize=22)
    plt.subplots_adjust(left=0.13, right=0.99, top=0.92, bottom=0.12)
    plt.savefig(file_path_plot)
from stratified_bayesian_optimization.util.json_file import JSONFile
from stratified_bayesian_optimization.initializers.log import SBOLog

logger = SBOLog(__name__)

if __name__ == '__main__':
    # Example usage:
    # python -m problems.cnn_cifar10.scripts.maximum_runs 500 600

    parser = argparse.ArgumentParser()
    parser.add_argument('min_rs', help='e.g. 500')
    parser.add_argument('max_rs', help='e.g. 600')

    args = parser.parse_args()
    min_rs = int(args.min_rs)
    max_rs = int(args.max_rs)

    max_values = []
    for i in xrange(min_rs, max_rs):
        file_name = 'problems/cnn_cifar10/runs_random_seeds/' + 'rs_%d' % i + '.json'
        if not os.path.exists(file_name):
            continue
        data = JSONFile.read(file_name)
        max_values.append(data['test_error_images'])

    max = np.max(max_values)
    min = np.min(max_values)

    logger.info('max is: %f' % max)
    logger.info('min is: %f' % min)
Esempio n. 22
0
    def generate_evaluations(self,
                             problem_name,
                             model_type,
                             training_name,
                             n_training,
                             random_seed,
                             iteration,
                             n_points_by_dimension=None,
                             n_tasks=0):
        """
        Generates evaluations of SBO, and write them in the debug directory.

        :param problem_name: (str)
        :param model_type: (str)
        :param training_name: (str)
        :param n_training: (int)
        :param random_seed: (int)
        :param iteration: (int)
        :param n_points_by_dimension: [int] Number of points by dimension
        :param n_tasks: (int) n_tasks > 0 if the last element of the domain is a task

        """

        if not os.path.exists(DEBUGGING_DIR):
            os.mkdir(DEBUGGING_DIR)

        debug_dir = path.join(DEBUGGING_DIR, problem_name)

        if not os.path.exists(debug_dir):
            os.mkdir(debug_dir)

        kernel_name = ''
        for kernel in self.gp.type_kernel:
            kernel_name += kernel + '_'
        kernel_name = kernel_name[0:-1]

        f_name = self._filename_points_ei_evaluations(
            model_type=model_type,
            problem_name=problem_name,
            type_kernel=kernel_name,
            training_name=training_name,
            n_training=n_training,
            random_seed=random_seed)

        debug_path = path.join(debug_dir, f_name)

        vectors = JSONFile.read(debug_path)

        if vectors is None:
            bounds = self.gp.bounds
            n_points = n_points_by_dimension
            if n_points is None:
                n_points = (bounds[0][1] - bounds[0][0]) * 10

            if n_tasks > 0:
                bounds_x = [bounds[i] for i in xrange(len(bounds) - 1)]
                n_points_x = [n_points[i] for i in xrange(len(n_points))]
            else:
                n_points_x = n_points
                bounds_x = bounds

            points = []
            for bound, number_points in zip(bounds_x, n_points_x):
                points.append(np.linspace(bound[0], bound[1], number_points))

            vectors = []
            for point in itertools.product(*points):
                vectors.append(point)

            JSONFile.write(vectors, debug_path)

        n = len(vectors)
        points_ = deepcopy(vectors)

        vectors = np.array(vectors)

        if n_tasks > 0:
            vectors_ = None
            for i in xrange(n_tasks):
                task_vector = np.zeros(n) + i
                task_vector = task_vector.reshape((n, 1))
                points_ = np.concatenate((vectors, task_vector), axis=1)

                if vectors_ is not None:
                    vectors_ = np.concatenate((vectors_, points_), axis=0)
                else:
                    vectors_ = points_
            vectors = vectors_

        # TODO: extend to the case where w can be continuous

        n = vectors.shape[0]

        points = {}
        for i in xrange(n):
            points[i] = vectors[i, :]

        args = (
            False,
            None,
            False,
            0,
            self,
        )
        val = Parallel.run_function_different_arguments_parallel(
            wrapper_objective_acquisition_function, points, *args)

        values = np.zeros(n)
        for i in xrange(n):
            values[i] = val.get(i)

        f_name = self._filename_ei_evaluations(iteration=iteration,
                                               model_type=model_type,
                                               problem_name=problem_name,
                                               type_kernel=kernel_name,
                                               training_name=training_name,
                                               n_training=n_training,
                                               random_seed=random_seed)

        debug_path = path.join(debug_dir, f_name)

        JSONFile.write({'points': points_, 'evaluations': values}, debug_path)

        return values
    lower_random_seed = int(args_.lower_random_seed)
    upper_random_seed = int(args_.upper_random_seed)


    prefix_file_1 = 'data/multi_start/' + problem + '/' + 'greedy_policy/' + method + '_random_seed_'
    prefix_file_2 = 'data/multi_start/' + problem + '/' +'uniform_policy/' + method + '_random_seed_'
   # prefix_file_3 = 'data/multi_start/' + problem + '/' + 'random_policy/' + method + '_random_seed_'
    prefix_file_3 = 'data/multi_start/' + problem + '/' + 'swersky_greedy_policy/' + 'swersky' + '_random_seed_'

    data = {}
    data_2 = {}
    data_3 = {}
    for i in range(lower_random_seed, upper_random_seed):
        file_1 = prefix_file_1 + str(i) + '_n_restarts_' + str(n_restarts) + '.json'
        try:
            data[i] = JSONFile.read(file_1)
        except Exception as e:
            data[i] = None

        file_2 = prefix_file_2 + str(i) + '_n_restarts_' + str(n_restarts)+ '.json'
        try:
            data_2[i] = JSONFile.read(file_2)
        except Exception as e:
            data_2[i] = None

        file_3 = prefix_file_3 + str(i) + '_n_restarts_' + str(n_restarts)+ '.json'

        try:
            data_3[i] = JSONFile.read(file_3)
        except Exception as e:
            data_3[i] = None
    parser = argparse.ArgumentParser()
    parser.add_argument('starting_point', help='e.g. 0')

    args = parser.parse_args()

    starting_point_index = int(args.starting_point)

    dir_data = 'data/multi_start/neural_networks/training_results/'

    n_epochs = 20
    n_batches = 60
    total_iterations = n_epochs * n_batches

    cnn_data = {}
    cnn_data[starting_point_index] = JSONFile.read(dir_data +
                                                   str(starting_point_index))

    for j in cnn_data[starting_point_index]:
        cnn_data[starting_point_index][j] = [
            t / 100.0 for t in cnn_data[starting_point_index][j]
        ]

    def get_values(i, index):
        data = cnn_data[index]
        return data[str(i / (n_batches + 1) + 1)][(i - 1) % n_batches]

    training_data = {}
    best_results = {}
    functions_get_value = {}
    arguments = {}
    n_training = 3
    def from_spec(cls, spec):
        """
        Construct BGO instance from spec
        :param spec: RunSpecEntity

        :return: BGO
        # TO DO: It now only returns domain
        """

        random_seed = spec.get('random_seed')
        method_optimization = spec.get('method_optimization')

        logger.info("Training GP model")
        logger.info("Random seed is: %d" % random_seed)
        logger.info("Algorithm used is:")
        logger.info(method_optimization)

        gp_model = GPFittingService.from_dict(spec)
        noise = spec.get('noise')
        quadrature = None
        acquisition_function = None

        domain = DomainService.from_dict(spec)

        if method_optimization not in cls._possible_optimization_methods:
            raise Exception("Incorrect BGO method")

        if method_optimization == SBO_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution)

            acquisition_function = SBO(
                quadrature, np.array(domain.discretization_domain_x))
        elif method_optimization == MULTI_TASK_METHOD:
            x_domain = spec.get('x_domain')
            distribution = spec.get('distribution')
            parameters_distribution = spec.get('parameters_distribution')
            quadrature = BayesianQuadrature(
                gp_model,
                x_domain,
                distribution,
                parameters_distribution=parameters_distribution,
                model_only_x=True)
            acquisition_function = MultiTasks(
                quadrature, quadrature.parameters_distribution.get(TASKS))
        elif method_optimization == EI_METHOD:
            acquisition_function = EI(gp_model, noisy_evaluations=noise)
        elif method_optimization == SDE_METHOD:
            x_domain = len(spec.get('x_domain'))
            parameters_distribution = spec.get('parameters_distribution')
            domain_random = np.array(parameters_distribution['domain_random'])
            weights = np.array(parameters_distribution['weights'])
            acquisition_function = SDE(gp_model, domain_random, x_domain,
                                       weights)

        problem_name = spec.get('problem_name')
        training_name = spec.get('training_name')
        n_samples = spec.get('n_samples')
        minimize = spec.get('minimize')
        n_iterations = spec.get('n_iterations')
        name_model = spec.get('name_model')
        parallel = spec.get('parallel')
        n_training = spec.get('n_training')
        number_points_each_dimension_debug = spec.get(
            'number_points_each_dimension_debug')
        n_samples_parameters = spec.get('n_samples_parameters', 0)
        use_only_training_points = spec.get('use_only_training_points', True)

        n_iterations = n_iterations - (
            len(gp_model.training_data['evaluations']) - n_training)

        bgo = cls(acquisition_function,
                  gp_model,
                  n_iterations,
                  problem_name,
                  training_name,
                  random_seed,
                  n_training,
                  name_model,
                  method_optimization,
                  minimize=minimize,
                  n_samples=n_samples,
                  noise=noise,
                  quadrature=quadrature,
                  parallel=parallel,
                  number_points_each_dimension_debug=
                  number_points_each_dimension_debug,
                  n_samples_parameters=n_samples_parameters,
                  use_only_training_points=use_only_training_points)

        if n_training < len(bgo.gp_model.training_data['evaluations']):
            extra_iterations = len(
                bgo.gp_model.training_data['evaluations']) - n_training
            data = JSONFile.read(bgo.objective.file_path)
            bgo.objective.evaluated_points = data['evaluated_points'][
                0:extra_iterations]
            bgo.objective.objective_values = data['objective_values'][
                0:extra_iterations]
            bgo.objective.model_objective_values = \
                data['model_objective_values'][0:extra_iterations]
            bgo.objective.standard_deviation_evaluations = data[
                'standard_deviation_evaluations']

        return bgo