Ejemplo n.º 1
0
def test_api(assert_200=True, num_objects=5, desired_dimension=2,
            total_pulls_per_client=4, num_experiments=1, num_clients=6):

    pool = Pool(processes=num_clients)
    supported_alg_ids = ['TestAlg']
    alg_list = []
    for idx, alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        alg_item['alg_id'] = alg_id
        alg_item['alg_label'] = alg_id
        alg_list.append(alg_item)

    params = []
    for algorithm in alg_list:
        params.append({'alg_label': algorithm['alg_label'],
                       'proportion': 1./len(alg_list)})
    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    # Test POST Experiment
    initExp_args_dict = {}
    initExp_args_dict['app_id'] = 'Tests'
    initExp_args_dict['args'] = {}
    initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many'
    initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings
    initExp_args_dict['args']['alg_list'] = alg_list
    initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions'
    initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief'
    initExp_args_dict['args']['targets'] = {}
    initExp_args_dict['args']['targets']['n'] = num_objects

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict)
        exp_info += [exp_uid]

    # Generate participants
    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append((exp_uid, participant_uid, total_pulls_per_client, assert_200))
    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        print result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
Ejemplo n.º 2
0
def test_api(assert_200=True,
             num_objects=6,
             total_pulls_per_client=5,
             num_experiments=1,
             num_clients=2):

    pool = Pool(processes=num_clients)
    supported_alg_ids = ['RoundRobin']
    alg_list = []
    for idx, alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        alg_item['alg_id'] = alg_id
        if idx == 0:
            alg_item['alg_label'] = 'Test'
        else:
            alg_item['alg_label'] = alg_id
        alg_item['test_alg_label'] = 'Test'
        alg_list.append(alg_item)
    params = []
    for algorithm in alg_list:
        params.append({
            'alg_label': algorithm['alg_label'],
            'proportion': 1. / len(alg_list)
        })

    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    targetset = []
    for i in range(num_objects):
        features = list(numpy.random.randn(6))
        targetset.append({
            'primary_description': str(features),
            'primary_type': 'text',
            'alt_description': '%d' % (i),
            'alt_type': 'text',
            'target_id': str(i),
            'meta': {
                'features': features
            }
        })

    # Test POST Experiment
    print '\n' * 2 + 'Testing POST initExp...'
    initExp_args_dict = {}
    initExp_args_dict['app_id'] = app_id
    initExp_args_dict['args'] = {}
    initExp_args_dict['args'][
        'participant_to_algorithm_management'] = 'one_to_many'  # 'one_to_one'    #optional field
    initExp_args_dict['args'][
        'algorithm_management_settings'] = algorithm_management_settings  #optional field
    initExp_args_dict['args']['alg_list'] = alg_list  #optional field
    initExp_args_dict['args'][
        'instructions'] = 'You want instructions, here are your test instructions'
    initExp_args_dict['args'][
        'debrief'] = 'You want a debrief, here is your test debrief'
    initExp_args_dict['args']['targets'] = {'targetset': targetset}

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_info_ = test_utils.initExp(
            initExp_args_dict)
        exp_info += [exp_info_]
        exp_uid = initExp_response_dict['exp_uid']

        exp_info.append({
            'exp_uid': exp_uid,
        })

        # Test GET Experiment
        initExp_response_dict = test_utils.getExp(exp_uid)

    # Generate participants
    ###################################

    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append((exp_uid, participant_uid, total_pulls_per_client,
                          true_weights, assert_200))

    print "participants are", participants
    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        print result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
Ejemplo n.º 3
0
def test_api(assert_200=True, num_arms=5,
             num_experiments=1, num_clients=10, total_pulls=5):
    app_id = 'CardinalBanditsPureExploration'
    true_means = numpy.array(range(num_arms)[::-1])/float(num_arms)

    pool = Pool(processes=num_clients)

    # input test parameters
    n = num_arms
    delta = 0.05
    supported_alg_ids = ['LilUCB', 'RoundRobin']

    labels = [{'label':'bad','reward':1.},{'label':'neutral','reward':2.},{'label':'good','reward':3.}]

    alg_list = []
    for i, alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        alg_item['alg_id'] = alg_id
        alg_item['alg_label'] = alg_id+'_'+str(i)
        #alg_item['params'] = {}
        alg_list.append(alg_item)
    params = []
    #params['proportions'] = []
    for algorithm in alg_list:
        params.append(    { 'alg_label': algorithm['alg_label'] , 'proportion':1./len(alg_list) }    )
    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    print "alg mangement settings", algorithm_management_settings


    # Test POST Experiment
    initExp_args_dict = {}
    initExp_args_dict['args'] = {}

    initExp_args_dict['args']['targets'] = {'n':n}
    initExp_args_dict['args']['failure_probability'] = delta
    initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one'    #optional field
    initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings #optional field
    initExp_args_dict['args']['alg_list'] = alg_list #optional field
    initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions'
    initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief'
    initExp_args_dict['args']['context_type'] = 'text'
    initExp_args_dict['args']['context'] = 'This is a context'
    initExp_args_dict['args']['rating_scale'] = {'labels':labels}
    #    initExp_args_dict['args']['HAHA'] = {'labels':labels}
    initExp_args_dict['app_id'] = app_id

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict)
        exp_info += [exp_uid]

        exp_uid = initExp_response_dict['exp_uid']
        exp_info += [{'exp_uid':exp_uid,}]

        # Test GET Experiment
        initExp_response_dict = test_utils.getExp(exp_uid)

    # Generate participants
    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append((exp_uid,participant_uid,total_pulls,true_means,assert_200))

    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
Ejemplo n.º 4
0
def test_api(assert_200=True, num_arms=5, num_clients=8, delta=0.05,
             total_pulls_per_client=5, num_experiments=1,
             params={'num_tries': 5}):

    app_id = 'DuelingBanditsPureExploration'
    true_means = numpy.array(range(num_arms)[::-1])/float(num_arms)
    pool = Pool(processes=num_clients)
    supported_alg_ids = ['BR_LilUCB', 'BR_Random', 'ValidationSampling', 'BR_KLUCB']

    alg_list = []
    for i, alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        if alg_id == 'ValidationSampling':
            alg_item['params'] = params
        alg_item['alg_id'] = alg_id
        alg_item['alg_label'] = alg_id+'_'+str(i)
        alg_list.append(alg_item)

    params = []
    for algorithm in alg_list:
        params.append({'alg_label': algorithm['alg_label'], 'proportion':1./len(alg_list)})
    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    print algorithm_management_settings

    #################################################
    # Test POST Experiment
    #################################################
    initExp_args_dict = {}
    initExp_args_dict['args'] = {'alg_list': alg_list,
                                 'algorithm_management_settings': algorithm_management_settings,
                                 'context': 'Context for Dueling Bandits',
                                 'context_type': 'text',
                                 'debrief': 'Test debried.',
                                 'failure_probability': 0.05,
                                 'instructions': 'Test instructions.',
                                 'participant_to_algorithm_management': 'one_to_many',
                                 'targets': {'n': num_arms}}

    initExp_args_dict['app_id'] = app_id
    initExp_args_dict['site_id'] = 'replace this with working site id'
    initExp_args_dict['site_key'] = 'replace this with working site key'

    exp_info = []
    for ell in range(num_experiments):
        exp_info += [test_utils.initExp(initExp_args_dict)[1]]

    # Generate participants
    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append((exp_uid, participant_uid, total_pulls_per_client,
                          true_means,assert_200))

    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
Ejemplo n.º 5
0
def test_api(assert_200=True,
             num_objects=5,
             desired_dimension=2,
             total_pulls_per_client=4,
             num_experiments=1,
             num_clients=6):
    x = numpy.linspace(0, 1, num_objects)
    X_true = numpy.vstack([x, x]).transpose()

    pool = Pool(processes=num_clients)
    supported_alg_ids = [
        'CrowdKernel', 'RandomSampling', 'UncertaintySampling',
        'ValidationSampling', 'STE'
    ]
    alg_list = []
    for idx, alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        alg_item['alg_id'] = alg_id
        if alg_id == 'ValidationSampling':
            alg_item['alg_label'] = 'Test'
            alg_item['params'] = {
                'query_list': [[q1, q2, q3] for q1 in [0, 1, 2]
                               for q2 in [0, 1, 2] for q3 in [0, 1, 2]]
            }
        else:
            alg_item['alg_label'] = alg_id
        alg_item['test_alg_label'] = 'Test'
        alg_list.append(alg_item)

    params = []
    for algorithm in alg_list:
        params.append({
            'alg_label': algorithm['alg_label'],
            'proportion': 1. / len(alg_list)
        })
    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    # Test POST Experiment
    initExp_args_dict = {}
    initExp_args_dict['app_id'] = 'PoolBasedTripletMDS'
    initExp_args_dict['args'] = {}
    initExp_args_dict['args']['d'] = desired_dimension
    initExp_args_dict['args']['failure_probability'] = 0.01
    initExp_args_dict['args'][
        'participant_to_algorithm_management'] = 'one_to_many'  # 'one_to_one'    #optional field
    initExp_args_dict['args'][
        'algorithm_management_settings'] = algorithm_management_settings  #optional field
    initExp_args_dict['args']['alg_list'] = alg_list  #optional field
    initExp_args_dict['args'][
        'instructions'] = 'You want instructions, here are your test instructions'
    initExp_args_dict['args'][
        'debrief'] = 'You want a debrief, here is your test debrief'
    initExp_args_dict['args']['targets'] = {}
    initExp_args_dict['args']['targets']['n'] = num_objects

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict)
        exp_info += [exp_uid]

    # Generate participants
    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append((exp_uid, participant_uid, total_pulls_per_client,
                          X_true, assert_200))
    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        print result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
Ejemplo n.º 6
0
def test_api(assert_200=True,
             num_objects=5,
             desired_dimension=2,
             total_pulls_per_client=4,
             num_experiments=1,
             num_clients=6):

    pool = Pool(processes=num_clients)
    supported_alg_ids = ['TestAlg']
    alg_list = []
    for idx, alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        alg_item['alg_id'] = alg_id
        alg_item['alg_label'] = alg_id
        alg_list.append(alg_item)

    params = []
    for algorithm in alg_list:
        params.append({
            'alg_label': algorithm['alg_label'],
            'proportion': 1. / len(alg_list)
        })
    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    # Test POST Experiment
    initExp_args_dict = {}
    initExp_args_dict['app_id'] = 'Tests'
    initExp_args_dict['args'] = {}
    initExp_args_dict['args'][
        'participant_to_algorithm_management'] = 'one_to_many'
    initExp_args_dict['args'][
        'algorithm_management_settings'] = algorithm_management_settings
    initExp_args_dict['args']['alg_list'] = alg_list
    initExp_args_dict['args'][
        'instructions'] = 'You want instructions, here are your test instructions'
    initExp_args_dict['args'][
        'debrief'] = 'You want a debrief, here is your test debrief'
    initExp_args_dict['args']['targets'] = {}
    initExp_args_dict['args']['targets']['n'] = num_objects

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict)
        exp_info += [exp_uid]

    # Generate participants
    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append(
            (exp_uid, participant_uid, total_pulls_per_client, assert_200))
    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        print result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
Ejemplo n.º 7
0
def test_api(assert_200=True, num_objects=4, desired_dimension=1,
                        total_pulls_per_client=5, num_experiments=1,
                        num_clients=7):
    true_weights = numpy.zeros(desired_dimension)
    true_weights[0] = 1.
    pool = Pool(processes=num_clients)
    supported_alg_ids = ['RandomSamplingLinearLeastSquares','RandomSamplingLinearLeastSquares']
    alg_list = []
    for idx,alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        alg_item['alg_id'] = alg_id
        if idx==0:
            alg_item['alg_label'] = 'Test'
        else:
            alg_item['alg_label'] = alg_id
        alg_item['test_alg_label'] = 'Test'
        alg_list.append(alg_item)
    params = []
    for algorithm in alg_list:
        params.append({'alg_label': algorithm['alg_label'],
                       'proportion': 1./len(alg_list)})

    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    targetset = []
    for i in range(num_objects):
        features = list(numpy.random.randn(desired_dimension))
        targetset.append({'primary_description': str(features),
                        'primary_type':'text',
                        'alt_description':'%d' % (i),
                        'alt_type':'text',
                        'meta': {'features':features}})

    # Test POST Experiment
    print '\n'*2 + 'Testing POST initExp...'
    initExp_args_dict = {}
    initExp_args_dict['app_id'] = 'PoolBasedBinaryClassification'
    initExp_args_dict['args'] = {}
    initExp_args_dict['args']['failure_probability'] = 0.01
    initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one'    #optional field
    initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings #optional field
    initExp_args_dict['args']['alg_list'] = alg_list #optional field
    initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions'
    initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief'

    initExp_args_dict['args']['targets'] = {'targetset': targetset}

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_info_ = test_utils.initExp(initExp_args_dict)
        exp_info += [exp_info_]
        exp_uid = initExp_response_dict['exp_uid']

        exp_info.append({'exp_uid':exp_uid,})

        # Test GET Experiment
        initExp_response_dict = test_utils.getExp(exp_uid)

    # Generate participants
    ###################################

    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append((exp_uid,participant_uid,total_pulls_per_client,true_weights,assert_200))

    print "participants are", participants
    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        print result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)
Ejemplo n.º 8
0
def test_api(assert_200=True, num_experiments=1, num_clients=8):
    """
    method to test the app
    :param assert_200: boolean, default value True
    :param num_experiments: int, number of experiments to run
    :param num_clients: int, number of clients to simulate
    """
    # path to files (relative path, depends on where the test is called)
    target_file = './local/data/01_X/mol_img_dict.json'
    pretest_dist_fname = './local/data/02_TestDistribution/test_dist_LewisSF.csv'
    training_dist_fname = './local/data/03_TrainingPool/training_dist_LewisSF.csv'
    training_dataset_fname = './local/data/04_SampleDataset/training_dataset.csv'
    guard_dataset_fname = './local/data/04_SampleDataset/guard_dataset.csv'
    posttest_dist_fname = './local/data/02_TestDistribution/test_dist_LewisSF.csv'

    # keys
    pretest_dist_key = 'pretest_dist'
    training_data_key = 'training_data'
    posttest_dist_key = 'posttest_dist'
    guard_data_key = 'guard_data'
    alg_id_key = 'alg_id'
    alg_label_key = 'alg_label'
    time_required_key = 'time_required'
    monetary_gain_key = 'monetary_gain'

    # question count variables
    pretest_count = 2
    training_count = 6
    posttest_count = 4
    guard_gap = 5

    pool = Pool(processes=num_clients)
    supported_alg_ids = ['FixedTrainRandomTest', 'RandomTrainTest']
    alg_list = []

    # parameters for FixedTrainRandomTest
    alg_item = {}
    alg_item[alg_id_key] = supported_alg_ids[0]
    alg_item[alg_label_key] = supported_alg_ids[0]
    alg_item[pretest_dist_key] = read_csv_to_dictlist(pretest_dist_fname)
    alg_item[training_data_key] = read_csv_to_dictlist(training_dataset_fname)
    alg_item[posttest_dist_key] = read_csv_to_dictlist(posttest_dist_fname)
    alg_item[guard_data_key] = read_csv_to_dictlist(guard_dataset_fname)
    alg_item[time_required_key] = '5-10'
    alg_item[
        monetary_gain_key] = 'You will be entered in a lottery to win a $50 cash prize.'
    alg_list.append(alg_item)

    # parameters for RandomTrainTest
    alg_item = {}
    alg_item[alg_id_key] = supported_alg_ids[1]
    alg_item[alg_label_key] = supported_alg_ids[1]
    alg_item[pretest_dist_key] = read_csv_to_dictlist(pretest_dist_fname)
    alg_item[training_data_key] = read_csv_to_dictlist(training_dist_fname)
    alg_item[posttest_dist_key] = read_csv_to_dictlist(posttest_dist_fname)
    alg_item[guard_data_key] = read_csv_to_dictlist(guard_dataset_fname)
    alg_item[time_required_key] = '5-10'
    alg_item[
        monetary_gain_key] = 'You will be entered in a lottery to win a $50 cash prize.'
    alg_list.append(alg_item)

    params = []
    for algorithm in alg_list:
        params.append({
            'alg_label': algorithm['alg_label'],
            'proportion': 1. / len(alg_list)
        })
    algorithm_management_settings = {}
    algorithm_management_settings[
        'mode'] = 'custom'  # switch between algorithms for each participant
    algorithm_management_settings['params'] = params

    # Test POST Experiment
    initExp_args_dict = {}
    initExp_args_dict['app_id'] = app_id
    initExp_args_dict['args'] = {}
    initExp_args_dict['args']['pretest_count'] = pretest_count
    initExp_args_dict['args']['training_count'] = training_count
    initExp_args_dict['args']['posttest_count'] = posttest_count
    initExp_args_dict['args']['guard_gap'] = guard_gap
    initExp_args_dict['args'][
        'participant_to_algorithm_management'] = 'one_to_one'  # assign one participant to one
    # condition only
    initExp_args_dict['args'][
        'algorithm_management_settings'] = algorithm_management_settings
    initExp_args_dict['args']['alg_list'] = alg_list
    initExp_args_dict['args'][
        'instructions'] = 'Answer the following question.'
    initExp_args_dict['args'][
        'debrief'] = 'Thank you for your participation. Your response has been recorded.'
    # the number of questions the participant will see, this value will be calculated by adding pretest_count,
    # training_count, posttest_count and number of instruction questions
    initExp_args_dict['args']['num_tries'] = 1

    experiment = {}
    experiment['initExp'] = initExp_args_dict
    experiment['primary_type'] = 'json-urls'
    experiment['primary_target_file'] = target_file

    targets = generate_target_blob(
        prefix=str(datetime.date.today()),
        primary_file=experiment['primary_target_file'],
        primary_type=experiment['primary_type'],
        alt_file=experiment.get('alt_target_file', None),
        experiment=experiment,
        alt_type=experiment.get('alt_type', 'text'))
    initExp_args_dict['args']['targets'] = {'targetset': targets}

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict)
        exp_info += [exp_uid]

    # Generate participants
    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append((exp_uid, participant_uid, assert_200, pretest_count,
                          training_count, posttest_count, guard_gap))
    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        print result
Ejemplo n.º 9
0
def test_api(assert_200=True, num_objects=5, desired_dimension=2,
            total_pulls_per_client=4, num_experiments=1, num_clients=6):
    x = numpy.linspace(0,1,num_objects)
    X_true = numpy.vstack([x,x]).transpose()

    pool = Pool(processes=num_clients)
    supported_alg_ids = ['CrowdKernel', 'RandomSampling',
                         'UncertaintySampling', 'ValidationSampling', 'STE']
    alg_list = []
    for idx, alg_id in enumerate(supported_alg_ids):
        alg_item = {}
        alg_item['alg_id'] = alg_id
        if alg_id == 'ValidationSampling':
            alg_item['alg_label'] = 'Test'
            alg_item['params'] = {'query_list': [
                [q1, q2, q3] for q1 in [0, 1, 2]
                             for q2 in [0, 1, 2]
                             for q3 in [0, 1, 2]
                                             ]}
        else:
            alg_item['alg_label'] = alg_id
        alg_item['test_alg_label'] = 'Test'
        alg_list.append(alg_item)

    params = []
    for algorithm in alg_list:
        params.append({'alg_label': algorithm['alg_label'],
                       'proportion': 1./len(alg_list)})
    algorithm_management_settings = {}
    algorithm_management_settings['mode'] = 'fixed_proportions'
    algorithm_management_settings['params'] = params

    # Test POST Experiment
    initExp_args_dict = {}
    initExp_args_dict['app_id'] = 'PoolBasedTripletMDS'
    initExp_args_dict['args'] = {}
    initExp_args_dict['args']['d'] = desired_dimension
    initExp_args_dict['args']['failure_probability'] = 0.01
    initExp_args_dict['args']['participant_to_algorithm_management'] = 'one_to_many' # 'one_to_one'    #optional field
    initExp_args_dict['args']['algorithm_management_settings'] = algorithm_management_settings #optional field
    initExp_args_dict['args']['alg_list'] = alg_list #optional field
    initExp_args_dict['args']['instructions'] = 'You want instructions, here are your test instructions'
    initExp_args_dict['args']['debrief'] = 'You want a debrief, here is your test debrief'
    initExp_args_dict['args']['targets'] = {}
    initExp_args_dict['args']['targets']['n'] = num_objects

    exp_info = []
    for ell in range(num_experiments):
        initExp_response_dict, exp_uid = test_utils.initExp(initExp_args_dict)
        exp_info += [exp_uid]

    # Generate participants
    participants = []
    pool_args = []
    for i in range(num_clients):
        participant_uid = '%030x' % random.randrange(16**30)
        participants.append(participant_uid)

        experiment = numpy.random.choice(exp_info)
        exp_uid = experiment['exp_uid']
        pool_args.append( (exp_uid,participant_uid,total_pulls_per_client,X_true,assert_200) )
    results = pool.map(simulate_one_client, pool_args)

    for result in results:
        print result

    test_utils.getModel(exp_uid, app_id, supported_alg_ids, alg_list)