Пример #1
0
# This file introduces experiments with the knowledge renewal capability, modeled as a time window of memory.

# This experiment is reported in: Clark, J. (2016). When are Real-Time Feedback Loops Most Valuable? New Insights from Bandit Simulations of Decision Making in Turbulent Environments. Proceedings of HICSS-49.
# Its results are featured in figs. 7-9.
# For figs. 3-6, see clark_experiment_01.py

# Import statements: do not change

import random
import sys
from os import path
sys.path.insert(0, path.join(sys.path[0], "bandito"))
from bandito.banditexperiment import BanditExperiment

# Run using Python 3.4 on Windows 7, random seed 12345
random.seed(12345)

# Define the experiment:
# This one introduces memory; only needs two latency levels to calculate "cost of latency". 480 experiments.
BanditExperiment(strategy=[0.02, 0.25, 0.5, 0.75, 1],
                 turbulence=[0, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32],
                 latency=[0, 16],
                 memory=[50, 100, 150, 200, 250, 300, 350, 400, 450, 500],
                 replications=1000,
                 experiment_name="clark02").run()
Пример #2
0
from os import path
sys.path.insert(0,path.join(sys.path[0],"bandito"))
from bandito.banditexperiment import BanditExperiment

# By setting a random number seed, you can try to guarantee that your results are repeatable.
# Making note of your operating system and Python versions is also advisable.
# Simply comment out the following two lines if you want new random results each time you run the program.

# test experiments used random seed 12345
random.seed(12345)

# Define the experiment:
# Experimental treatments are defined by assigning lists of values to the arguments 
# of the BanditExperiment function.  The simulation engine will iterate through the lists,
# running the full number of simulation replications (500 by default) for each value. For
# example, Posen & Levinthal's (2012) Figure #1 compared five levels of the strategy variable
# tau from 0.02 to 1.  To replicate their experiment, we assign strategy=[0.02,0.25,0.5,0.75,1]
# and leave all other arguments with their defaults.
#
# Some other arguments do not set up experimental treatments but control the simulation parameters:
# replications, arms, turns, debug, and experiment_name. The optional experiment_name determines the
# name of the output files.  If not assigned, the current datetime will be used.
#
# Default values are used for arguments not assigned values.  
# See bandito/banditexperiment.py to examine the default arguments.

BanditExperiment(strategy=[0.02,0.25,0.5,0.75,1], replications=100, experiment_name="sample").run()

# Run this file by typing something like: python sample_experiment.py
# Output files will be found in the 'output' directory.
Пример #3
0
import sys
from os import path
sys.path.insert(0,path.join(sys.path[0],"bandito"))
from bandito.banditexperiment import BanditExperiment

# By setting a random number seed, you can try to guarantee that your results are repeatable.
# Making note of your operating system and Python versions is also advisable.
# Simply comment out the following two lines if you want new random results each time you run the program.

random.seed(12345)

# Define the experiment:
# Experimental treatments are defined by assigning lists of values to the arguments 
# of the BanditExperiment function.  The simulation engine will iterate through the lists,
# running the full number of simulation replications (500 by default) for each value. For
# example, Posen & Levinthal's (2012) Figure #1 compared five levels of the strategy variable
# tau from 0.02 to 1.  To replicate their experiment, we assign strategy=[0.02,0.25,0.5,0.75,1]
# and leave all other arguments with their defaults.
#
# Some other arguments do not set up experimental treatments but control the simulation parameters:
# replications, arms, turns, debug, and experiment_name. The optional experiment_name determines the
# name of the output files.  If not assigned, the current datetime will be used.
#
# Default values are used for arguments not assigned values.  
# See bandito/banditexperiment.py to examine the default arguments.

BanditExperiment(strategy=[0.02,0.25,0.5,0.75,1], replications=25000, experiment_name="PLfig1").run()

# Run this file by typing something like: python sample_experiment.py
# Output files will be found in the 'output' directory.
Пример #4
0
from os import path
sys.path.insert(0, path.join(sys.path[0], "bandito"))
from bandito.banditexperiment import BanditExperiment

# By setting a random number seed, you can try to guarantee that your results are repeatable.
# Making note of your operating system and Python versions is also advisable.
# Simply comment out the following two lines if you want new random results each time you run the program.

random.seed(12345)

# Variations on the simulation to test the robustness of P+L 2012's figure 3 to different parameters

BanditExperiment(arms=[5],
                 turns=[100],
                 memory=[100],
                 strategy=[0.02, 0.25, 0.5, 0.75, 1],
                 turbulence=[0, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32],
                 replications=1000,
                 experiment_name="fig3_5arms100turns").run()
BanditExperiment(arms=[5],
                 turns=[2000],
                 memory=[2000],
                 strategy=[0.02, 0.25, 0.5, 0.75, 1],
                 turbulence=[0, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32],
                 replications=1000,
                 experiment_name="fig3_5arms2000turns").run()
BanditExperiment(arms=[20],
                 turns=[100],
                 memory=[100],
                 strategy=[0.02, 0.25, 0.5, 0.75, 1],
                 turbulence=[0, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32],
Пример #5
0
# This file replicates the experiment of Posen & Levinthal (2012), figure 3.

# Import statements: do not change

import random
import sys
from os import path
sys.path.insert(0, path.join(sys.path[0], "bandito"))
from bandito.banditexperiment import BanditExperiment

# By setting a random number seed, you can try to guarantee that your results are repeatable.
# Making note of your operating system and Python versions is also advisable.
# Simply comment out the following two lines if you want new random results each time you run the program.

random.seed(12345)

# Define the experiment:

BanditExperiment(strategy=[0.02, 0.25, 0.5, 0.75, 1],
                 turbulence=[0, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32],
                 replications=25000,
                 experiment_name="PLfig3").run()

# Warning: this took over 14 hours for my laptop to simulate.  While testing, you may want to decrease the number of replications to fewer than 25000!
Пример #6
0
# This file replicates the basic bandit simulation (like P+L fig 1) at several
# levels of latency in the feedback loop, and several levels of turbulence. The
# goal is to show the "value of real-time feedback" or conversely "the cost of
# latency" at different levels of turbulence.

# This experiment is reported in: Clark, J. (2016). When are Real-Time Feedback Loops Most Valuable? New Insights from Bandit Simulations of Decision Making in Turbulent Environments. Proceedings of HICSS-49.
# Its results are featured in figs. 3-6.
# For figs. 7-9, see clark_experiment_02.py

# Import statements: do not change

import random
import sys
from os import path
sys.path.insert(0, path.join(sys.path[0], "bandito"))
from bandito.banditexperiment import BanditExperiment

# Run using Python 3.4 on Windows 7, random seed 12345
random.seed(12345)

# Define the experiment:
# 1000 reps seems a little more reasonable than P+L's 25000.  This is 240 experimental treatments x 1000 replications and should run in about 6 hours on my laptop. Amended as clark01b with the initialization bias fix.
BanditExperiment(strategy=[0.02, 0.25, 0.5, 0.75, 1],
                 turbulence=[0, 0.005, 0.01, 0.02, 0.04, 0.08, 0.16, 0.32],
                 latency=[0, 1, 2, 4, 8, 16],
                 replications=1000,
                 experiment_name="clark01").run()