from problems.arxiv.process_raw_data import ProcessRawData import argparse from stratified_bayesian_optimization.initializers.log import SBOLog logger = SBOLog(__name__) if __name__ == '__main__': # python -m problems.arxiv.scripts.run_year_data '1' parser = argparse.ArgumentParser() parser.add_argument('month', help='e.g. 23') args = parser.parse_args() month = args.month files = ProcessRawData.generate_filenames_month(2016, int(args.month)) logger.info("Files to be processed: ") logger.info(files) ProcessRawData.get_click_data( files, "problems/arxiv/data/2016_%s_processed_data.json" % month)
from __future__ import absolute_import import numpy as np import os from scipy.stats import norm from scipy.stats import foldnorm from stratified_bayesian_optimization.initializers.log import SBOLog from stratified_bayesian_optimization.util.json_file import JSONFile logger = SBOLog(__name__) class RandomPolicy(object): def __init__(self, dict_stat_models, name_model, problem_name, type_model='grad_epoch', n_epochs=1, stop_iteration_per_point=100, random_seed=None, n_restarts=None): self.dict_stat_models = dict_stat_models self.points_index = range(len(self.dict_stat_models)) self.current_index = 0 self.type_model = type_model self.problem_name = problem_name self.name_model = name_model self.n_epochs = n_epochs self.chosen_points = {} self.evaluations_obj = {} self.stop_iteration_per_point = stop_iteration_per_point
import torch.nn.parallel import torch.backends.cudnn as cudnn import torch.optim as optim import torch.utils.data import torchvision.datasets as datasets import torchvision.transforms as transforms import torchvision.utils as vutils from torch.autograd import Variable import torch.nn.functional as F import math import numpy as np from stratified_bayesian_optimization.initializers.log import SBOLog from stratified_bayesian_optimization.util.json_file import JSONFile logger = SBOLog(__name__) class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) # self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2(x), 2)) x = x.view(-1, 320)
from __future__ import absolute_import import argparse import os import numpy as np from stratified_bayesian_optimization.util.json_file import JSONFile from stratified_bayesian_optimization.initializers.log import SBOLog logger = SBOLog(__name__) if __name__ == '__main__': # Example usage: # python -m problems.cnn_cifar10.scripts.maximum_runs 500 600 parser = argparse.ArgumentParser() parser.add_argument('min_rs', help='e.g. 500') parser.add_argument('max_rs', help='e.g. 600') args = parser.parse_args() min_rs = int(args.min_rs) max_rs = int(args.max_rs) max_values = [] for i in xrange(min_rs, max_rs): file_name = 'problems/cnn_cifar10/runs_random_seeds/' + 'rs_%d' % i + '.json' if not os.path.exists(file_name): continue data = JSONFile.read(file_name) max_values.append(data['test_error_images'])
from __future__ import absolute_import import numpy as np import os import argparse from stratified_bayesian_optimization.util.json_file import JSONFile from stratified_bayesian_optimization.initializers.log import SBOLog from multi_start.parametric_functions import ParametricFunctions logger = SBOLog(__name__) def SGD(start, gradient, n, function, exact_gradient=None, args=(), kwargs={}, bounds=None, learning_rate=0.1, momentum=0.0, maxepoch=250, adam=True, betas=None, eps=1e-8, simplex_domain=None, name_model='1', method='real_gradient', n_epochs=1,
from __future__ import absolute_import from stratified_bayesian_optimization.initializers.log import SBOLog from stratified_bayesian_optimization.services.validate_gp_model import ValidateGPService from stratified_bayesian_optimization.lib.constant import ( PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME, SAME_CORRELATION, ) logger = SBOLog(__name__) if __name__ == '__main__': # Example: # python -m scripts.run_validate_gp_model type_kernel = [PRODUCT_KERNELS_SEPARABLE, MATERN52_NAME, TASKS_KERNEL_NAME] n_training = 200 problem_name = "arxiv" bounds_domain = [[0.01, 1.01], [0.1, 2.1], [1, 21], [1, 201], [0, 1, 2, 3, 4]] type_bounds = [0, 0, 0, 0, 1] dimensions = [5, 4, 5] thinning = 5 n_burning = 100 max_steps_out = 1000 random_seed = 5 training_name = None points = None noise = False n_samples = 0
def test_info(self): logger = SBOLog(__name__) logger.info('testing')