def action_get_balance(self, **kwargs): endpoint = '/phone/balance' config_params = ('appIdVersion') response = self._make_request(endpoint, config_params=config_params) msg = 'Plan balance information:\n\n' for plan in response: if plan.get('voicePlanType') != 'MAIN': continue msg += 'Plan "%s":\n' % plan['name'] msg += '* Price: %.2f\n' % plan['price'] if plan.get('unlimitedVoice', False): msg += '* Unlimited calls\n' else: msg += ('* Calls: %d min (%d min remaining)\n' % (utils.s_to_m( plan['baseSeconds']), plan['balance']['remainingMinutes'])) if plan.get('unlimitedData', False): msg += '* Unlimited data\n' else: msg += ('* Data: %d MB remaining\n' % utils.b_to_mb(plan['balance']['remainingData'])) if 'baseSMS' in plan: msg += ('* SMS: %d (%d remaining)\n' % (plan['baseSMS'], plan['balance']['remainingSMS'])) msg += '\n' return msg
def action_get_phone_account_info(self, **kwargs): endpoint = '/phone/account/info' response = self._make_request(endpoint) msg = 'Account information:\n\n' msg += 'First Name: %s\n' % response['firstName'] msg += 'Last Name: %s\n' % response['lastName'] msg += 'e-mail: %s\n' % response['email'] msg += 'Plan: "%s"\n' % response['voiceplan']['name'] msg += 'Price: %.2f\n' % response['voiceplan']['price'] if response['voiceplan']['unlimitedVoice']: msg += 'Unlimited calls\n' else: msg += ('Base minutes: %d\n' % utils.s_to_m(response['voiceplan']['baseSeconds'])) if response['voiceplan']['unlimitedText']: msg += 'Unlimited SMS\n' else: msg += 'Base SMS: %d\n' % response['voiceplan']['baseSMS'] msg += ('\nDays until billing date: %d\n' % response['daysUntilBillingDate']) for sim in response['accounts']: msg += '\nSIM Id: %s\n' % sim['accountId'] msg += 'SIM name: %s\n' % sim['accountName'] msg += 'Phone number: %s\n' % sim['phoneNumber'] msg += ('\nAccount is %s\n' % response['accountStatusAndMessage']['status']) msg += '"%s"\n' % response['accountStatusAndMessage']['message'] return msg
def hyperloop_finite(model, resource_type, params, min_units, max_units, runtime, director, data, test, rng=np.random.RandomState(1234), eta=4., budget=0, n_hyperloops=1, s_run=None, doubling=False, problem='cont', verbose=False): """Hyperband with finite horizon. :param model: object with subroutines to generate arms and train models :param resource_type: type of resource to be allocated :param params: hyperparameter search space :param min_units: minimum units of resources can be allocated to one configuration :param max_units: maximum units of resources can be allocated to one configuration :param runtime: runtime patience (in min) :param director: path to the directory where output are stored :param data: dataset to use :param test: test set :param rng: random state :param eta: elimination proportion :param budget: total budget for one bracket :param n_hyperloops: maximum number of hyperloops to run :param s_run: option to repeat a specific bracket :param doubling: option to decide whether we want to double the per bracket budget in the outer loop :param problem: type of problem (classification or regression) :param verbose: verbose option :return: None """ start_time = timeit.default_timer() # result storage results = {} durations = [] # outer loop k = 0 while utils.s_to_m(start_time, timeit.default_timer()) < runtime and k < n_hyperloops: # initialize the budget according to whether we do doubling trick or not if budget == 0: if not doubling: budget = int( np.floor(utils.log_eta(max_units / min_units, eta)) + 1) * max_units else: budget = int((2**k) * max_units) k += 1 print('\nBudget B = %i' % budget) print('##################') big_r = float(max_units) r = float(min_units) s_max = int( min(budget / big_r - 1, int(np.floor(utils.log_eta(big_r / r, eta))))) s = s_max best_val = 1. track_valid = np.array([1.]) track_test = np.array([1.]) print('s_max = %i' % s_max) # inner loop while s >= 0 and utils.s_to_m(start_time, timeit.default_timer()) < runtime: # specify the number of configurations n = int(budget / big_r * eta**s / (s + 1.)) if n > 0: i = 0 while n * big_r * (i + 1.) * eta**(-i) > budget: i += 1 if s_run is None or i == s_run: print('s = %d, n = %d' % (i, n)) arms, result, track_valid, track_test = \ ttts(model, resource_type, params, n, i, budget, director, rng=rng, data=data, test=test, track_valid=track_valid, track_test=track_test, problem=problem, verbose=False) results[(k, s)] = arms if resource_type == 'epochs': if verbose: print("k = " + str(k) + ", lscale = " + str(s) + ", validation error = " + str(result[2]) + ", test error = " + str(result[3]) + ", best arm dir: " + result[0]['dir']) if result[2] < best_val: best_val = result[2] # best_n = n # best_i = i # best_arm = result[0] elif resource_type == 'iterations': if verbose: print("k = " + str(k) + ", lscale = " + str(s) + ", validation error = " + str(result[1]) + ", best arm dir: " + result[0]['dir']) if result[1] < best_val: best_val = result[1] durations.append([ utils.s_to_m(start_time, timeit.default_timer()), result ]) print( "time elapsed: " + str(utils.s_to_m(start_time, timeit.default_timer()))) if s_run is None: cPickle.dump([durations, results, track_valid, track_test], open(director + '/results.pkl', 'wb')) else: cPickle.dump( [durations, results, track_valid, track_test], open(director + '/results_' + str(s_run) + '.pkl', 'wb')) # print(track_test) s -= 1
def ttts(model, resource_type, params, n, i, budget, director, data, test, frac=0.5, dist='Bernoulli', rng=np.random.RandomState(12345), track_valid=np.array([1.]), track_test=np.array([1.]), problem='cont', verbose=False): """Top-Two Thompson Sampling. :param model: model to be trained :param resource_type: type of resource to be allocated :param params: hyperparameter search space :param n: number of configurations in this ttts phase :param i: the number of the bracket :param budget: number of resources :param director: where we store the results :param data: dataset :param test: test set :param frac: threshold in ttts :param dist: type of prior distribution :param rng: random state :param track_valid: initial track vector :param track_test: initial track vector :param problem: type of problem (classification or regression) :param verbose: verbose option :return: the dictionary of arms, the stored results and the vector of test errors """ arms = model.generate_arms(n, director, params) remaining_arms = [] if resource_type == 'epochs': remaining_arms = [ list(a) for a in zip(arms.keys(), [0] * len(arms.keys()), [0] * len(arms.keys()), [0] * len(arms.keys())) ] elif resource_type == 'iterations': remaining_arms = [ list(a) for a in zip(arms.keys(), [0] * len(arms.keys()), [0] * len(arms.keys())) ] current_track_valid = np.copy(track_valid) current_track_test = np.copy(track_test) succ = np.zeros(n) fail = np.zeros(n) num_pulls = np.zeros(n) rewards = np.zeros(n) # means = np.zeros(n) start_time = timeit.default_timer() # for a in range(n): # arm_key = remaining_arms[a][0] # num_pulls[a] = 1 # if resource_type == 'epochs': # train_loss, val_err, test_err, current_track_valid, current_track_test = \ # model.run_solver(1, arms[arm_key], data, rng=rng, # track_valid=current_track_valid, track_test=current_track_test, verbose=verbose) # rewards[a] = val_err # elif resource_type == 'iterations': # val_err, avg_loss, current_track_valid, current_track_test = \ # model.run_solver(1, arms[arm_key], data, # rng=rng, track_valid=current_track_valid, # track_test=current_track_test, problem=problem, verbose=verbose) # rewards[a] = 1 + avg_loss # best = 0 for _ in range(int(budget)): # means = rewards / num_pulls # best = np.random.choice(np.flatnonzero(means == means.max())) # print(rewards) ts = np.zeros(n) for a in range(n): if dist == 'Bernoulli': alpha_prior = 1 beta_prior = 1 ts[a] = beta.rvs(alpha_prior + succ[a], beta_prior + fail[a], size=1)[0] idx_i = np.argmax(ts) # print(idx_i) # print("\n"+str(ts[idx_i])+"\n") if np.random.rand() > frac: idx_j = idx_i threshold = 10000 count = 0 while idx_i == idx_j and count < threshold: ts = np.zeros(n) if dist == 'Bernoulli': alpha_prior = 1 beta_prior = 1 for a in range(n): # if rewards[a] >= 1 or rewards[a] <= 0: # trial = bernoulli.rvs(0.5) # else: # trial = bernoulli.rvs(rewards[a]) # if trial == 1: # succ[a] += 1 # else: # fail[a] += 1 ts[a] = beta.rvs(alpha_prior + succ[a], beta_prior + fail[a], size=1)[0] idx_j = np.argmax(ts) count += 1 # print(str(idx_j)+": "+str(ts[idx_j])) if idx_i != idx_j: idx_i = idx_j else: _, idx_j = utils.second_largest(list(ts)) idx_i = idx_j if rewards[idx_i] >= 1 or rewards[idx_i] <= 0: trial = bernoulli.rvs(0.5) else: trial = bernoulli.rvs(rewards[idx_i]) if trial == 1: succ[idx_i] += 1 else: fail[idx_i] += 1 if resource_type == 'epochs': arm_key = remaining_arms[int(idx_i)][0] classifier = cPickle.load( open(arms[arm_key]['dir'] + '/best_model.pkl', 'rb')) train_loss, val_err, test_err, current_track_valid, current_track_test = \ model.run_solver(1, arms[arm_key], data, rng=rng, classifier=classifier, track_valid=current_track_valid, track_test=current_track_test, verbose=verbose) rewards[idx_i] = val_err num_pulls[idx_i] += 1 if verbose: print(arm_key, train_loss, val_err, test_err, utils.s_to_m(start_time, timeit.default_timer())) arms[arm_key]['results'].append( [num_pulls[idx_i], train_loss, val_err, test_err]) remaining_arms[int(idx_i)][1] = train_loss remaining_arms[int(idx_i)][2] = val_err remaining_arms[int(idx_i)][3] = test_err elif resource_type == 'iterations': arm_key = remaining_arms[int(idx_i)][0] val_err, avg_loss, current_track_valid, current_track_test = \ model.run_solver(1, arms[arm_key], data, test, rng=rng, track_valid=current_track_valid, track_test=current_track_test, problem=problem, verbose=verbose) rewards[idx_i] = 1 + avg_loss num_pulls[idx_i] += 1 if verbose: print(arm_key, val_err, utils.s_to_m(start_time, timeit.default_timer())) arms[arm_key]['results'].append( [num_pulls[idx_i], val_err, avg_loss]) remaining_arms[int(idx_i)][1] = val_err remaining_arms[int(idx_i)][2] = avg_loss if resource_type == 'epochs': remaining_arms = sorted(remaining_arms, key=lambda a: a[2]) elif resource_type == 'iterations': remaining_arms = sorted(remaining_arms, key=lambda a: a[2]) # TODO: this best arm output is wrong in the 'iteration' case, but it does not affect the final output figure best_arm = arms[remaining_arms[0][0]] result = [] if resource_type == 'epochs': result = [ best_arm, remaining_arms[0][1], remaining_arms[0][2], remaining_arms[0][3] ] elif resource_type == 'iterations': result = [best_arm, remaining_arms[0][1], remaining_arms[0][2]] return arms, result, current_track_valid, current_track_test
def sh_finite(model, resource_type, params, n, i, eta, big_r, director, data, test, rng=np.random.RandomState(12345), track_valid=np.array([1.]), track_test=np.array([1.]), problem='cont', verbose=False): """Successive halving. :param model: model to be trained :param resource_type: type of resource to be allocated :param params: hyperparameter search space :param n: number of configurations in this successive halving phase :param i: the number of the bracket :param eta: elimination proportion :param big_r: number of resources :param director: where we store the results :param data: dataset :param test: test set :param rng: random state :param track_valid: initial track vector :param track_test: initial track vector :param problem: type of problem (classification or regression) :param verbose: verbose option :return: the dictionary of arms, the stored results and the vector of test errors """ arms = model.generate_arms(n, director, params) remaining_arms = [] if resource_type == 'epochs': remaining_arms = [ list(a) for a in zip(arms.keys(), [0] * len(arms.keys()), [0] * len(arms.keys()), [0] * len(arms.keys())) ] elif resource_type == 'iterations': remaining_arms = [ list(a) for a in zip(arms.keys(), [0] * len(arms.keys()), [0] * len(arms.keys())) ] current_track_valid = np.copy(track_valid) current_track_test = np.copy(track_test) for l in range(i + 1): num_pulls = int(big_r * eta**(l - i)) num_arms = int(n * eta**(-l)) print('%d\t%d' % (num_arms, num_pulls)) for a in range(len(remaining_arms)): start_time = timeit.default_timer() arm_key = remaining_arms[a][0] if verbose: print(arms[arm_key]) if resource_type == 'epochs': if not os.path.exists(arms[arm_key]['dir'] + '/best_model.pkl'): train_loss, val_err, test_err, current_track_valid, current_track_test = \ model.run_solver(num_pulls, arms[arm_key], data, rng=rng, track_valid=current_track_valid, track_test=current_track_test, verbose=verbose) else: classifier = cPickle.load( open(arms[arm_key]['dir'] + '/best_model.pkl', 'rb')) train_loss, val_err, test_err, current_track_valid, current_track_test = \ model.run_solver(num_pulls, arms[arm_key], data, rng=rng, classifier=classifier, track_valid=current_track_valid, track_test=current_track_test, verbose=verbose) if verbose: print(arm_key, train_loss, val_err, test_err, utils.s_to_m(start_time, timeit.default_timer())) arms[arm_key]['results'].append( [num_pulls, train_loss, val_err, test_err]) remaining_arms[a][1] = train_loss remaining_arms[a][2] = val_err remaining_arms[a][3] = test_err elif resource_type == 'iterations': val_err, avg_loss, current_track_valid, current_track_test = \ model.run_solver(num_pulls, arms[arm_key], data, test, rng=rng, track_valid=current_track_valid, track_test=current_track_test, problem=problem, verbose=verbose) if verbose: print(arm_key, val_err, utils.s_to_m(start_time, timeit.default_timer())) arms[arm_key]['results'].append([num_pulls, val_err, avg_loss]) remaining_arms[a][1] = val_err remaining_arms[a][2] = avg_loss # print(avg_loss) if resource_type == 'epochs': remaining_arms = sorted(remaining_arms, key=lambda a: a[2]) elif resource_type == 'iterations': remaining_arms = sorted(remaining_arms, key=lambda a: a[2]) n_k1 = int(n * eta**(-l - 1)) if i - l - 1 >= 0: # for k in range(n_k1, len(remaining_arms)): # arm_dir = arms[remaining_arms[k][0]]['dir'] # files = os.listdir(arm_dir) remaining_arms = remaining_arms[0:n_k1] best_arm = arms[remaining_arms[0][0]] result = [] if resource_type == 'epochs': result = [ best_arm, remaining_arms[0][1], remaining_arms[0][2], remaining_arms[0][3] ] elif resource_type == 'iterations': result = [best_arm, remaining_arms[0][1], remaining_arms[0][2]] return arms, result, current_track_valid, current_track_test