def test_api(assert_200=True, num_objects=5, desired_dimension=2, total_pulls_per_client=4, num_experiments=1, num_clients=6): pool = Pool(processes=num_clients) supported_alg_ids = ['TestAlg'] alg_list = [] for idx, alg_id in enumerate(supported_alg_ids): alg_item = {} alg_item['alg_id'] = alg_id alg_item['alg_label'] = alg_id alg_list.append(alg_item) params = [] for algorithm in alg_list: params.append({'alg_label': algorithm['alg_label'], 'proportion': 1./len(alg_list)}) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params # Test POST Experiment initExp_args_dict = {} initExp_args_dict['app_id'] = 'Tests' initExp_args_dict['args'] = {} initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many' initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings initExp_args_dict['args']['alg_list'] = alg_list initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions' initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief' initExp_args_dict['args']['targets'] = {} initExp_args_dict['args']['targets']['n'] = num_objects exp_info = [] for ell in range(num_experiments): initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict) exp_info += [exp_uid] # Generate participants participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append((exp_uid, participant_uid, total_pulls_per_client, assert_200)) results = pool.map(simulate_one_client, pool_args) for result in results: print result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def test_api(assert_200=True, num_objects=6, total_pulls_per_client=5, num_experiments=1, num_clients=2): pool = Pool(processes=num_clients) supported_alg_ids = ['RoundRobin'] alg_list = [] for idx, alg_id in enumerate(supported_alg_ids): alg_item = {} alg_item['alg_id'] = alg_id if idx == 0: alg_item['alg_label'] = 'Test' else: alg_item['alg_label'] = alg_id alg_item['test_alg_label'] = 'Test' alg_list.append(alg_item) params = [] for algorithm in alg_list: params.append({ 'alg_label': algorithm['alg_label'], 'proportion': 1. / len(alg_list) }) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params targetset = [] for i in range(num_objects): features = list(numpy.random.randn(6)) targetset.append({ 'primary_description': str(features), 'primary_type': 'text', 'alt_description': '%d' % (i), 'alt_type': 'text', 'target_id': str(i), 'meta': { 'features': features } }) # Test POST Experiment print '\n' * 2 + 'Testing POST initExp...' initExp_args_dict = {} initExp_args_dict['app_id'] = app_id initExp_args_dict['args'] = {} initExp_args_dict['args'][ 'participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one' #optional field initExp_args_dict['args'][ 'algorithm_management_settings'] = algorithm_management_settings #optional field initExp_args_dict['args']['alg_list'] = alg_list #optional field initExp_args_dict['args'][ 'instructions'] = 'You want instructions, here are your test instructions' initExp_args_dict['args'][ 'debrief'] = 'You want a debrief, here is your test debrief' initExp_args_dict['args']['targets'] = {'targetset': targetset} exp_info = [] for ell in range(num_experiments): initExp_response_dict, exp_info_ = test_utils.initExp( initExp_args_dict) exp_info += [exp_info_] exp_uid = initExp_response_dict['exp_uid'] exp_info.append({ 'exp_uid': exp_uid, }) # Test GET Experiment initExp_response_dict = test_utils.getExp(exp_uid) # Generate participants ################################### participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append((exp_uid, participant_uid, total_pulls_per_client, true_weights, assert_200)) print "participants are", participants results = pool.map(simulate_one_client, pool_args) for result in results: print result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def test_api(assert_200=True, num_arms=5, num_experiments=1, num_clients=10, total_pulls=5): app_id = 'CardinalBanditsPureExploration' true_means = numpy.array(range(num_arms)[::-1])/float(num_arms) pool = Pool(processes=num_clients) # input test parameters n = num_arms delta = 0.05 supported_alg_ids = ['LilUCB', 'RoundRobin'] labels = [{'label':'bad','reward':1.},{'label':'neutral','reward':2.},{'label':'good','reward':3.}] alg_list = [] for i, alg_id in enumerate(supported_alg_ids): alg_item = {} alg_item['alg_id'] = alg_id alg_item['alg_label'] = alg_id+'_'+str(i) #alg_item['params'] = {} alg_list.append(alg_item) params = [] #params['proportions'] = [] for algorithm in alg_list: params.append( { 'alg_label': algorithm['alg_label'] , 'proportion':1./len(alg_list) } ) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params print "alg mangement settings", algorithm_management_settings # Test POST Experiment initExp_args_dict = {} initExp_args_dict['args'] = {} initExp_args_dict['args']['targets'] = {'n':n} initExp_args_dict['args']['failure_probability'] = delta initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one' #optional field initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings #optional field initExp_args_dict['args']['alg_list'] = alg_list #optional field initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions' initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief' initExp_args_dict['args']['context_type'] = 'text' initExp_args_dict['args']['context'] = 'This is a context' initExp_args_dict['args']['rating_scale'] = {'labels':labels} # initExp_args_dict['args']['HAHA'] = {'labels':labels} initExp_args_dict['app_id'] = app_id exp_info = [] for ell in range(num_experiments): initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict) exp_info += [exp_uid] exp_uid = initExp_response_dict['exp_uid'] exp_info += [{'exp_uid':exp_uid,}] # Test GET Experiment initExp_response_dict = test_utils.getExp(exp_uid) # Generate participants participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append((exp_uid,participant_uid,total_pulls,true_means,assert_200)) results = pool.map(simulate_one_client, pool_args) for result in results: result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def test_api(assert_200=True, num_arms=5, num_clients=8, delta=0.05, total_pulls_per_client=5, num_experiments=1, params={'num_tries': 5}): app_id = 'DuelingBanditsPureExploration' true_means = numpy.array(range(num_arms)[::-1])/float(num_arms) pool = Pool(processes=num_clients) supported_alg_ids = ['BR_LilUCB', 'BR_Random', 'ValidationSampling', 'BR_KLUCB'] alg_list = [] for i, alg_id in enumerate(supported_alg_ids): alg_item = {} if alg_id == 'ValidationSampling': alg_item['params'] = params alg_item['alg_id'] = alg_id alg_item['alg_label'] = alg_id+'_'+str(i) alg_list.append(alg_item) params = [] for algorithm in alg_list: params.append({'alg_label': algorithm['alg_label'], 'proportion':1./len(alg_list)}) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params print algorithm_management_settings ################################################# # Test POST Experiment ################################################# initExp_args_dict = {} initExp_args_dict['args'] = {'alg_list': alg_list, 'algorithm_management_settings': algorithm_management_settings, 'context': 'Context for Dueling Bandits', 'context_type': 'text', 'debrief': 'Test debried.', 'failure_probability': 0.05, 'instructions': 'Test instructions.', 'participant_to_algorithm_management': 'one_to_many', 'targets': {'n': num_arms}} initExp_args_dict['app_id'] = app_id initExp_args_dict['site_id'] = 'replace this with working site id' initExp_args_dict['site_key'] = 'replace this with working site key' exp_info = [] for ell in range(num_experiments): exp_info += [test_utils.initExp(initExp_args_dict)[1]] # Generate participants participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append((exp_uid, participant_uid, total_pulls_per_client, true_means,assert_200)) results = pool.map(simulate_one_client, pool_args) for result in results: result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def test_api(assert_200=True, num_objects=5, desired_dimension=2, total_pulls_per_client=4, num_experiments=1, num_clients=6): x = numpy.linspace(0, 1, num_objects) X_true = numpy.vstack([x, x]).transpose() pool = Pool(processes=num_clients) supported_alg_ids = [ 'CrowdKernel', 'RandomSampling', 'UncertaintySampling', 'ValidationSampling', 'STE' ] alg_list = [] for idx, alg_id in enumerate(supported_alg_ids): alg_item = {} alg_item['alg_id'] = alg_id if alg_id == 'ValidationSampling': alg_item['alg_label'] = 'Test' alg_item['params'] = { 'query_list': [[q1, q2, q3] for q1 in [0, 1, 2] for q2 in [0, 1, 2] for q3 in [0, 1, 2]] } else: alg_item['alg_label'] = alg_id alg_item['test_alg_label'] = 'Test' alg_list.append(alg_item) params = [] for algorithm in alg_list: params.append({ 'alg_label': algorithm['alg_label'], 'proportion': 1. / len(alg_list) }) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params # Test POST Experiment initExp_args_dict = {} initExp_args_dict['app_id'] = 'PoolBasedTripletMDS' initExp_args_dict['args'] = {} initExp_args_dict['args']['d'] = desired_dimension initExp_args_dict['args']['failure_probability'] = 0.01 initExp_args_dict['args'][ 'participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one' #optional field initExp_args_dict['args'][ 'algorithm_management_settings'] = algorithm_management_settings #optional field initExp_args_dict['args']['alg_list'] = alg_list #optional field initExp_args_dict['args'][ 'instructions'] = 'You want instructions, here are your test instructions' initExp_args_dict['args'][ 'debrief'] = 'You want a debrief, here is your test debrief' initExp_args_dict['args']['targets'] = {} initExp_args_dict['args']['targets']['n'] = num_objects exp_info = [] for ell in range(num_experiments): initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict) exp_info += [exp_uid] # Generate participants participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append((exp_uid, participant_uid, total_pulls_per_client, X_true, assert_200)) results = pool.map(simulate_one_client, pool_args) for result in results: print result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def test_api(assert_200=True, num_objects=5, desired_dimension=2, total_pulls_per_client=4, num_experiments=1, num_clients=6): pool = Pool(processes=num_clients) supported_alg_ids = ['TestAlg'] alg_list = [] for idx, alg_id in enumerate(supported_alg_ids): alg_item = {} alg_item['alg_id'] = alg_id alg_item['alg_label'] = alg_id alg_list.append(alg_item) params = [] for algorithm in alg_list: params.append({ 'alg_label': algorithm['alg_label'], 'proportion': 1. / len(alg_list) }) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params # Test POST Experiment initExp_args_dict = {} initExp_args_dict['app_id'] = 'Tests' initExp_args_dict['args'] = {} initExp_args_dict['args'][ 'participant_to_algorithm_management'] = 'one_to_many' initExp_args_dict['args'][ 'algorithm_management_settings'] = algorithm_management_settings initExp_args_dict['args']['alg_list'] = alg_list initExp_args_dict['args'][ 'instructions'] = 'You want instructions, here are your test instructions' initExp_args_dict['args'][ 'debrief'] = 'You want a debrief, here is your test debrief' initExp_args_dict['args']['targets'] = {} initExp_args_dict['args']['targets']['n'] = num_objects exp_info = [] for ell in range(num_experiments): initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict) exp_info += [exp_uid] # Generate participants participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append( (exp_uid, participant_uid, total_pulls_per_client, assert_200)) results = pool.map(simulate_one_client, pool_args) for result in results: print result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def test_api(assert_200=True, num_objects=4, desired_dimension=1, total_pulls_per_client=5, num_experiments=1, num_clients=7): true_weights = numpy.zeros(desired_dimension) true_weights[0] = 1. pool = Pool(processes=num_clients) supported_alg_ids = ['RandomSamplingLinearLeastSquares','RandomSamplingLinearLeastSquares'] alg_list = [] for idx,alg_id in enumerate(supported_alg_ids): alg_item = {} alg_item['alg_id'] = alg_id if idx==0: alg_item['alg_label'] = 'Test' else: alg_item['alg_label'] = alg_id alg_item['test_alg_label'] = 'Test' alg_list.append(alg_item) params = [] for algorithm in alg_list: params.append({'alg_label': algorithm['alg_label'], 'proportion': 1./len(alg_list)}) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params targetset = [] for i in range(num_objects): features = list(numpy.random.randn(desired_dimension)) targetset.append({'primary_description': str(features), 'primary_type':'text', 'alt_description':'%d' % (i), 'alt_type':'text', 'meta': {'features':features}}) # Test POST Experiment print '\n'*2 + 'Testing POST initExp...' initExp_args_dict = {} initExp_args_dict['app_id'] = 'PoolBasedBinaryClassification' initExp_args_dict['args'] = {} initExp_args_dict['args']['failure_probability'] = 0.01 initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one' #optional field initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings #optional field initExp_args_dict['args']['alg_list'] = alg_list #optional field initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions' initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief' initExp_args_dict['args']['targets'] = {'targetset': targetset} exp_info = [] for ell in range(num_experiments): initExp_response_dict, exp_info_ = test_utils.initExp(initExp_args_dict) exp_info += [exp_info_] exp_uid = initExp_response_dict['exp_uid'] exp_info.append({'exp_uid':exp_uid,}) # Test GET Experiment initExp_response_dict = test_utils.getExp(exp_uid) # Generate participants ################################### participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append((exp_uid,participant_uid,total_pulls_per_client,true_weights,assert_200)) print "participants are", participants results = pool.map(simulate_one_client, pool_args) for result in results: print result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
def test_api(assert_200=True, num_objects=5, desired_dimension=2, total_pulls_per_client=4, num_experiments=1, num_clients=6): x = numpy.linspace(0,1,num_objects) X_true = numpy.vstack([x,x]).transpose() pool = Pool(processes=num_clients) supported_alg_ids = ['CrowdKernel', 'RandomSampling', 'UncertaintySampling', 'ValidationSampling', 'STE'] alg_list = [] for idx, alg_id in enumerate(supported_alg_ids): alg_item = {} alg_item['alg_id'] = alg_id if alg_id == 'ValidationSampling': alg_item['alg_label'] = 'Test' alg_item['params'] = {'query_list': [ [q1, q2, q3] for q1 in [0, 1, 2] for q2 in [0, 1, 2] for q3 in [0, 1, 2] ]} else: alg_item['alg_label'] = alg_id alg_item['test_alg_label'] = 'Test' alg_list.append(alg_item) params = [] for algorithm in alg_list: params.append({'alg_label': algorithm['alg_label'], 'proportion': 1./len(alg_list)}) algorithm_management_settings = {} algorithm_management_settings['mode'] = 'fixed_proportions' algorithm_management_settings['params'] = params # Test POST Experiment initExp_args_dict = {} initExp_args_dict['app_id'] = 'PoolBasedTripletMDS' initExp_args_dict['args'] = {} initExp_args_dict['args']['d'] = desired_dimension initExp_args_dict['args']['failure_probability'] = 0.01 initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one' #optional field initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings #optional field initExp_args_dict['args']['alg_list'] = alg_list #optional field initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions' initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief' initExp_args_dict['args']['targets'] = {} initExp_args_dict['args']['targets']['n'] = num_objects exp_info = [] for ell in range(num_experiments): initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict) exp_info += [exp_uid] # Generate participants participants = [] pool_args = [] for i in range(num_clients): participant_uid = '%030x' % random.randrange(16**30) participants.append(participant_uid) experiment = numpy.random.choice(exp_info) exp_uid = experiment['exp_uid'] pool_args.append( (exp_uid,participant_uid,total_pulls_per_client,X_true,assert_200) ) results = pool.map(simulate_one_client, pool_args) for result in results: print result test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)