コード例 #1
0
def main(subject, session, settings, test=False, skip_calibration=False):

    output_dir, output_str = get_output_dir_str(subject, session, 'calibrate',
                                                None)

    if test:
        log_file = op.abspath(
            'logs/test_log/sub-test_ses-test_task-calibrate_events.tsv')

    else:
        if not skip_calibration:
            calibrate_session = run_experiment(CalibrationSession,
                                               task='calibrate',
                                               subject=subject,
                                               session=session,
                                               settings=settings,
                                               use_runs=False)

    logging.warn(op.join(output_dir, output_str))
    log_file = op.join(output_dir, output_str + '_events.tsv')

    x_lower, x_upper = fit_psychometric_curve(log_file)

    logging.warn(f'Range: {x_lower}, {x_upper}')
    if test:
        limit = 1
    else:
        limit = None

    fn = make_trial_design(subject, x_lower, x_upper, limit=limit)
    logging.warn(fn)

    task_session = run_experiment(TaskSession,
                                  task='task',
                                  session=session,
                                  settings=settings,
                                  subject=subject)

    txt, payout = get_payout(subject)

    txt += '\nPlease remain seated until you get picked up for payment.'

    payout_folder = task_session.settings['output'].get('payout_folder')

    if op.exists(payout_folder):
        payout_fn = op.join(payout_folder, f'sub-{subject}_payout.txt')

        with open(payout_fn, 'w') as f:
            f.write(str(payout))

    payout_session = TextSession(txt=txt,
                                 output_str='txt',
                                 output_dir=output_dir,
                                 settings_file=task_session.settings_file)

    payout_session.run()
コード例 #2
0
# Aggregate all the training options in one dictionary
training_options = {'aec_only_epochs': 3,
                    'init_full_epochs': 15,
                    'best_model_epochs': 500,
                    'num_init_models': 20,
                    'loss_fn': RelMSE(),
                    'optimizer': keras.optimizers.Adam,
                    'optimizer_opts': {},
                    'batch_size': 32,
                    'data_train_len': data_train_len,
                    'loss_weights': loss_weights}

#
# Launch the Experiment
#

# Get a random number generator seed
random_seed = r.randint(0, 10**(10))

# Set the custom objects used in the model (for loading purposes)
custom_objs = {"RelMSE": RelMSE}

# And run the experiment!
run_experiment(random_seed=random_seed,
               expt_name=expt_name,
               data_file_prefix=data_file_prefix,
               training_options=training_options,
               network_config=network_config,
               custom_objects=custom_objs)
コード例 #3
0
ファイル: rcv1-th.py プロジェクト: jbcdnr/memsgd-cpp
                 **common_params))

ks = [1, 5, 10]
for i, k in enumerate(ks):
    label = "k={} no delay".format(k)
    params_with_tag.append(
        label_params(label,
                     useMemory=True,
                     takeK=k,
                     takeTop=False,
                     tau=tau_no_delay,
                     **common_params))

    label = "k={}".format(k)
    params_with_tag.append(
        label_params(label,
                     useMemory=True,
                     takeK=k,
                     takeTop=False,
                     tau=tau_delay,
                     **common_params))

# params_with_tag.append(label_params("top k=10", useMemory=True, takeK=10, takeTop=True, tau=tau_delay, **common_params))
# params_with_tag.append(label_params("top k=1", useMemory=True, takeK=1, takeTop=True, tau=tau_delay, **common_params))

#
# RUN THE EXPERIMENT
#

run_experiment(dataset_params, params_with_tag)
コード例 #4
0
from utils import run_experiment
from task import TaskSessionMRI

if __name__ == '__main__':

    session_cls = TaskSessionMRI
    task = 'task'

    for run in range(1, 5):
        run_experiment(session_cls, run=run, session='7t', task=task, settings='7t', use_runs=True)
コード例 #5
0
            The part of the pie chart that is lightly colored indicates
            the probability of a lottery you will gain the amount of
            Swiss Francs represented by the pile.

            Your task is to either select the first lottery or
            the second lottery, by using your index or middle finger.
            Immediately after your choice, we ask how certain you were
            about your choice from a scale from 1 (very CERTAIN)
            to 4 (very UNCERTAIN).

            NOTE: if you are to late in responding, or you do not 
            respond. You will gain no money for that trial.

            Take some time to take a break, if you want to.

            Press any of your buttons to continue.

            """

        super().__init__(session=session,
                         trial_nr=trial_nr,
                         phase_durations=phase_durations,
                         txt=txt,
                         **kwargs)


if __name__ == '__main__':
    session_cls = TaskSessionMRI
    task = 'task'
    run_experiment(session_cls, task=task)
コード例 #6
0
#!/usr/bin/env python

import utils
import sys

# Get experiment file from command-line argument
for file in sys.argv[1:]:
    print("=== Running %s" % file)
    utils.run_experiment(file)
コード例 #7
0
from Agents.NPC import SWA, wSWA
from Agents.PC import CTO, DCTO
from Agents.Naive import Random, RoundRobin
from Environments.RottenBandits import NonParametricRottenBandit, ParametricRottenBandit
from utils import run_experiment
import numpy as np

seed = 0
T = 30000
experiment_repeats = 100

#%% Naive agents < ~1m each experiment
np.random.seed(seed)
run_experiment(NonParametricRottenBandit(), Random, 'NP-Random', T,
               experiment_repeats)
run_experiment(NonParametricRottenBandit(), RoundRobin, 'NP-RR', T,
               experiment_repeats)

run_experiment(ParametricRottenBandit(seed=seed), Random, 'P-AV-Random', T,
               experiment_repeats)
run_experiment(ParametricRottenBandit(seed=seed), RoundRobin, 'P-AV-RR', T,
               experiment_repeats)

run_experiment(ParametricRottenBandit(ANV=True, seed=seed), Random,
               'P-ANV-Random', T, experiment_repeats)
run_experiment(ParametricRottenBandit(ANV=True, seed=seed), RoundRobin,
               'P-ANV-RR', T, experiment_repeats)

#%% SWA and wSWA ~ 1-2m each experiment
np.random.seed(seed)
run_experiment(NonParametricRottenBandit(), SWA, 'NP-SWA', T,
コード例 #8
0
            with tf.name_scope('weights'):
                nce_weights = tf.Variable(
                    tf.truncated_normal(
                        shape=[vocabulary_size, embedding_size],
                        stddev=1.0 / math.sqrt(embedding_size),
                    ))
            with tf.name_scope('biases'):
                nce_biases = tf.Variable(tf.zeros([vocabulary_size]))
        # Compute the cosine similarity between minibatch examples and all embeddings.
        norm = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
        normalized_embeddings = embeddings / norm
    return dict(
        embeddings=embeddings,
        nce_weights=nce_weights,
        nce_biases=nce_biases,
        norm=norm,
        normalized_embeddings=normalized_embeddings,
    )


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument("--filename")
    parser.add_argument("--pfile")  # parameters file
    parser.add_argument("--is_grid")
    args = parser.parse_args()
    if args.is_grid:
        run_grid_search(args, main)
    else:
        run_experiment(args, main)
コード例 #9
0
dtsets.append(mnist_dataset)
train_dataset = ModifiedDataset(ConcatDataset(dtsets))

test_dataset = ModifiedDataset(datasets.MNIST(root='./data/', train=False, transform=transforms.ToTensor()))

test_loader = torch.utils.data.DataLoader(dataset=test_dataset, batch_size=1000, shuffle=False)
criterion = nn.CrossEntropyLoss()


#Contains most of the parameters needed for an experiment
#CONFIGURABLE STUFF
NUM_TRIALS = 10
max_training_num = 5020
hyper_params = {"learning_rate": 0.001, "sampling_size": int(len(train_dataset)/6), "selection_size": 250, "max_training_num": max_training_num, "NUM_EPOCHS": 1, "bootstrap_samplesize": 20, "reset_model_per_selection": False}

experiment = Experiment(api_key="Gncqbz3Rhfy3MZJBcX7xKVJoo", project_name="comp652", workspace="comp652")
experiment.log_parameters(hyper_params)

myfunctions = [AcquisitionFunctions.Random, AcquisitionFunctions.Smallest_Margin, AcquisitionFunctions.Density_Max,AcquisitionFunctions.Density_Entropy, AcquisitionFunctions.SN_Entropy, AcquisitionFunctions.SN_BALD, AcquisitionFunctions.Variation_Ratios, AcquisitionFunctions.Mean_STD]
random_bootstrap_samples = random.sample(range(0, len(train_dataset)), hyper_params["bootstrap_samplesize"])

for j in range(len(myfunctions)):
    print()
    print('-------------------------------------------------------')
    print("Processing function {}".format(j+1))
    print("Name = {}".format(myfunctions[j].name))
    model = convnet_mnist(10).to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr= hyper_params["learning_rate"])
    myselector = Selector(myfunctions[j](selection_size = hyper_params["selection_size"]))
    acc_random = run_experiment(train_dataset, test_dataset, test_loader,  model, hyper_params["sampling_size"], myselector, optimizer, criterion, myfunctions[j].name, experiment, max_training_num, NUM_TRIALS, hyper_params["NUM_EPOCHS"], hyper_params["learning_rate"], random_bootstrap_samples, hyper_params["reset_model_per_selection"])
コード例 #10
0
        n_repeats_stimulus = self.settings['mapper'].get('n_repeats_stimulus')

        colors = sample_isis(n_blocks * block_length * n_repeats_stimulus)

        for block in range(n_blocks):
            for trial_nr, n_dots in enumerate(design):
                trial_nr += block * block_length + 1

                color_ix = (trial_nr-1) * \
                    n_repeats_stimulus, trial_nr*n_repeats_stimulus
                self.trials.append(
                    self.Trial(
                        session=self,
                        trial_nr=trial_nr,
                        phase_durations=[],
                        n_dots=n_dots,
                        colors=colors[color_ix[0]:color_ix[1]],
                        verbose=True,
                    ))

        outro_trial = OutroTrial(session=self,
                                 trial_nr=n_blocks * len(design) + 1,
                                 phase_durations=[np.inf])
        self.trials.append(outro_trial)


if __name__ == '__main__':
    session_cls = MapperSession
    task = 'mapper'
    run_experiment(session_cls, task=task, n_runs=3)
コード例 #11
0
ファイル: sampling_experiments.py プロジェクト: kklein/ttts
def run_t1_ttts(parameter):
    utils.run_experiment(parameter, select_arm_ttts)
コード例 #12
0
ファイル: sampling_experiments.py プロジェクト: kklein/ttts
def run_tm_tsus(parameter):
    sampler = lambda prior: select_arms_tsus(prior, parameter.m)
    utils.run_experiment(parameter, sampler)
コード例 #13
0
ファイル: sampling_experiments.py プロジェクト: kklein/ttts
def run_tm_ts(parameter):
    utils.run_experiment(parameter, select_arms_ts)
コード例 #14
0
ファイル: sampling_experiments.py プロジェクト: kklein/ttts
def run_tm_uniform(parameter):
    utils.run_experiment(parameter, select_arms_uniform)
コード例 #15
0
from Agents.NPC import SWA, wSWA
from Agents.PC import CTO
from Environments.RottenBandits import NonParametricRottenBandit, ParametricRottenBandit
from utils import run_experiment
import numpy as np

np.random.seed(5)
T = 30000
experiment_repeats = 100

experiments = [(ParametricRottenBandit(), wSWA, 'P-AV-wSWA'),
               (ParametricRottenBandit(), SWA, 'P-AV-SWA')]

for experiment in experiments:
    run_experiment(*experiment, T=T, experiment_repeats=experiment_repeats)