Beispiel #1
0
    def __init__(self, runner):
        Experiment.__init__(self, runner)

        runner.paths = \
           [ ["B29", "B16", "B15", "B1"]
           , ["B29", "B17", "B15", "B1"]
           , ["B29", "B14", "B1"]
           , ["B29", "B12", "B1"]
           , ["B29", "B13", "B1"]
           , ["B29", "B11", "B1"]
           , ["B29", "B18", "B1"]
           , ["B29", "B28", "B26", "B1"]
           , ["B29", "B27", "B26", "B1"]
           , ["B29", "B20", "B19", "B1"]
           , ["B29", "B25", "B1"]
           , ["B29", "B10", "B8", "B1"]
           , ["B29", "B24", "B23", "B1"]
           , ["B29", "B22", "B21", "B1"]
           , ["B29", "B6", "B1"]
           , ["B29", "B4", "B2", "B1"]
           , ["B29", "B3", "B1"]
           , ["B29", "B7", "B5", "B1"]
           , ["B29", "B9", "B1"]
           ]

        runner.lineMap = \
            { "B29": 46
            , "B28": 47
            , "B27": 49
            , "B26": 52
            , "B25": 57
            , "B24": 59
            , "B23": 61
            , "B22": 63
            , "B21": 65
            , "B20": 67
            , "B19": 70
            , "B18": 79
            , "B17": 80
            , "B16": 81
            , "B15": 83
            , "B14": 86
            , "B13": 88
            , "B12": 96
            , "B11": 98
            , "B10": 100
            , "B9": 101
            , "B8": 103
            , "B7": 105
            , "B6": 106
            , "B5": 108
            , "B4": 110
            , "B3": 111
            , "B2": 113
            , "B1": 116
            , "B0": 122
            }

        self.minInput = -10
        self.maxInput = 10 
Beispiel #2
0
    def menuButton5(self, control):
        exampleHead = Head()
        exampleHeadModel = HeadModel(exampleHead)
        exampleHead.setSamplingFrequency(256)
        exampleHead.addRegistrationSite([0, 0, 0])
        exampleExperiment = Experiment(exampleHead.getSamplingFrequency(), 100.0)
        
        # Randomizing stimuli times
        stimuli = []
        for i in range(100):
            stimuli.append( i + 0.2 +random()/2 )
        exampleExperiment.setStimulusTimes([stimuli])
        exampleStimulus = Stimulus('Stim', exampleHead)
        exampleStimulus.setStimulusTimes(exampleExperiment.getStimulusTimes()[0])

        # Creating many generators with random frequencies in the range 2-20 Hz and
        # random phases. Connecting some of them to the stimulus generator
        exampleGenerators = []
        exampleConnections = []
        for i in range(100):
            randomFrequency = 2.0 + random() * 18
            randomPhaseShift = random()
            exampleGenerators.append(GeneratorSine('Gen', exampleHead, frequency=randomFrequency, phaseShift=randomPhaseShift))
            if(random() > 0.75):
                exampleConnections.append(Connection('Con', exampleHead, exampleStimulus, exampleGenerators[i]))

        exampleExperiment.setRecording(exampleHead.runSimulation(exampleExperiment.getDuration()))
        exampleExperiment.plotRecording()
    def test_experiment_not_transformed_test(self):
        file_path = "../Datasets/HIV_37_Samples/MergedDataset.csv"
        loaded_data = FileLoader.load_file(file_path)
        data_manager = DataManager()
        data_manager.set_data(loaded_data)
        data_manager.split_data(test_split=0.19, train_split=0.62)
        learning_model = FakePredictionModel()
        exp = Experiment(data_manager, learning_model)

        exp.run_experiment()

        self.assertEquals(0, exp.get_r2(SplitTypes.Test))
    def test_experiment(self):
        output_filename_header = FileLoader.create_output_file()
        time.sleep(1)
        loaded_algorithm_combinations = FileLoader.read_csv_file("../Datasets/test.csv")
        file_path = "../Datasets/HIV_37_Samples/MergedDataset.csv"
        loaded_data = FileLoader.load_file(file_path)
        # feature_eliminator = SelectKBest(f_regression,k=k_value)

        print (loaded_algorithm_combinations[0])
        output_filename = FileLoader.create_output_file()

        for i in range(0, 80):
            normalizer = self.getnormalizer(loaded_algorithm_combinations[i][0])

            feature_eliminator = self.getfeature_eliminator(loaded_algorithm_combinations[i][1])
            the_model = self.get_model(loaded_algorithm_combinations[i][2])

            print "taking ", type(normalizer).__name__, "and feature selector ", type(
                feature_eliminator
            ).__name__, "model", type(the_model).__name__
            FileLoader.write_model_in_file(
                output_filename_header,
                type(normalizer).__name__,
                type(feature_eliminator).__name__,
                type(the_model).__name__,
                "",
                "",
                "",
                "",
                "",
            )

            the_data_manager = DataManager(feature_eliminator, normalizer=normalizer)
            the_data_manager.set_data(loaded_data)
            the_data_manager.split_data(test_split=0.15, train_split=0.70)
            exp = Experiment(the_data_manager, the_model)

            exp.run_experiment()
            # arr_selected = feature_eliminator.get_support(indices=True)

            # if(exp.get_r2(SplitTypes.Train) > 0 and exp.get_r2(SplitTypes.Valid) > 0 and exp.get_r2(SplitTypes.Test) >  0):
            FileLoader.write_model_in_file(
                output_filename,
                type(normalizer).__name__,
                type(feature_eliminator).__name__,
                type(the_model).__name__,
                "",
                exp.fitness_matrix[0],
                exp.get_r2(SplitTypes.Train),
                exp.get_r2(SplitTypes.Valid),
                exp.get_r2(SplitTypes.Test),
            )
    def test_experiment_all_zeros_r2_1(self):
        the_data_manager = DataManager()
        array_all_zeroes = np.zeros((37, 397))
        the_data_manager.set_data(array_all_zeroes)
        the_data_manager.split_data(test_split=0.19, train_split=0.62)

        the_model = svm.SVR()
        exp = Experiment(the_data_manager, the_model)
        exp.run_experiment()

        r2_train = exp.get_r2(SplitTypes.Train)
        expected = 1.0
        self.assertEqual(r2_train, expected)
    def test_experiment_svm_svr_37dataset_r2_train(self):
        file_path = "../Datasets/HIV_37_Samples/MergedDataset.csv"
        loaded_data = FileLoader.load_file(file_path)
        the_data_manager = DataManager()
        the_data_manager.set_data(loaded_data)
        the_data_manager.split_data(test_split=0.19, train_split=0.62)
        the_model = svm.SVR()
        exp = Experiment(the_data_manager, the_model)
        exp.run_experiment()

        r2_train = exp.get_r2(SplitTypes.Train)
        expected_svm_r2_value = 0.93994377385638073
        self.assertEqual(r2_train, expected_svm_r2_value)
Beispiel #7
0
	def __init__(self, measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory):	
		Experiment.__init__(self,measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory)
	
		# parameters of the top sphere
		self.spherePositionX=self.objectSizeX / 2
		self.spherePositionY=self.objectSizeY / 2
		self.spherePositionZ=self.objectSizeZ / 2
		self.sphereRadius=5
		self.sphereIntensity=100

		self.background=0.000001

		self.directory=homeDirectory+"/Sphere/"
    def test_experiment_sum_of_squares_zeros_test(self):
        the_data_manager = DataManager()
        an_array_of_all_ones = np.ones((37, 397))
        the_model = svm.SVR()
        the_data_manager.set_data(an_array_of_all_ones)
        the_data_manager.split_data(test_split=0.19, train_split=0.62)
        exp = Experiment(the_data_manager, the_model)

        exp.run_experiment()
        sum_of_squares_test = exp.get_sum_of_squares(SplitTypes.Test)

        expected = 0
        self.assertEquals(expected, sum_of_squares_test)
Beispiel #9
0
    def menuButton1(self, control):
        exampleHead = Head()
        exampleHeadModel = HeadModel(exampleHead)
        exampleHead.setSamplingFrequency(10)
        exampleHead.addRegistrationSite([0, 0, 0])

        exampleStimulus = StimulusDummy('Stim', exampleHead)
        exampleGenerator = GeneratorDummy('Gen', exampleHead)
        exampleConnection = ConnectionDummy('Con', exampleHead, exampleStimulus, exampleGenerator)
        
        exampleExperiment = Experiment(exampleHead.getSamplingFrequency(), 1.0, exampleHead.runSimulation( 1.0 ))
        output = str(exampleExperiment.getRecording())
        self.log.SetValue(output)
        self.logWindow.Show()
    def test_experiment_svr_37dataset_r2_test(self):
        file_path = "../Datasets/HIV_37_Samples/MergedDataset.csv"
        loaded_data = FileLoader.load_file(file_path)
        the_data_manager = DataManager()
        the_data_manager.set_data(loaded_data)
        the_data_manager.split_data(test_split=0.19, train_split=0.62)
        the_model = svm.SVR()
        exp = Experiment(the_data_manager, the_model)

        exp.run_experiment()

        r2_test = exp.get_r2(SplitTypes.Test)
        expected_svm_r2_value = -0.33005242525900247
        self.assertEqual(r2_test, expected_svm_r2_value)
    def test_experiment_sum_of_squares_real37_test(self):
        file_path = "../Datasets/HIV_37_Samples/MergedDataset.csv"
        loaded_data = FileLoader.load_file(file_path)
        the_data_manager = DataManager()
        the_data_manager.set_data(loaded_data)
        the_model = svm.SVR()
        the_data_manager.split_data(test_split=0.19, train_split=0.62)
        exp = Experiment(the_data_manager, the_model)

        exp.run_experiment()
        sum_of_squares_test = exp.get_sum_of_squares(SplitTypes.Test)

        expected = 6.708898437500002

        self.assertAlmostEqual(expected, sum_of_squares_test)
def load_merged_traces_experiments_from_list(cells, verbose=True):
    """
    Load experiments where IDRest traces have been merged.
    This function will try to load an experiment with merged IDRest traces for all cells
    in the list and just skip the ones for which it is not found. If no experiments were
    found, None is returned.
    
    Params:
        - cells: List with cell names or dictionairy where the keys are the cell names.
    
    See also:
    load_single_traces_experiments_from_list()
    """
    if type(cells) is dict:
        cell_names = list(cells.keys())
    else:
        cell_names = cells

    expms = []

    for i in range(len(cell_names)):
        current_expm_name = 'Experiment_Cell_' + cell_names[i] + '_merged_idrest_traces.pkl'
        current_expm_path = os.path.join(EXPM_PATH, current_expm_name)
        try:
            current_expm = Experiment.load(current_expm_path, verbose=verbose)
            expms.append(current_expm)
        except:
            pass

    if not len(expms) == 0:
        return expms
    else:
        return None
def load_single_traces_experiments_from_list(cells, verbose=True):
    """
    Load experiments where traces have been added separately.
    
    Params:
        - cells: List with cell names or dictionairy where the keys are the cell names.
        
    See also:
    load_merged_traces_experiments_from_list()
    """
    if type(cells) is dict:
        cell_names = list(cells.keys())
    else:
        cell_names = cells

    expms = []

    for i in range(len(cell_names)):
        current_expm_name = 'Experiment_Cell_' + cell_names[i] + '_single_traces.pkl'
        current_expm_path = os.path.join(EXPM_PATH, current_expm_name)
        try:
            current_expm = Experiment.load(current_expm_path, verbose=verbose)
            expms.append(current_expm)
        except:
            pass

    if not len(expms) == 0:
        return expms
    else:
        return None
Beispiel #14
0
    def menuButton4(self, control):
        exampleHead = Head()
        exampleHeadModel = HeadModel(exampleHead)
        exampleHead.setSamplingFrequency(128)
        exampleHead.addRegistrationSite([0, 0, 0])
        
        exampleExperiment = Experiment(exampleHead.getSamplingFrequency(), 10.0)
        exampleExperiment.setStimulusTimes([[0.3, 1.75, 2.16, 3.87, 4.31, 5.183, 6.34, 7.13]])

        exampleStimulus = Stimulus('Stim', exampleHead)
        exampleStimulus.setStimulusTimes(exampleExperiment.getStimulusTimes()[0])
        exampleGenerator = GeneratorSine('Gen', exampleHead)
        exampleConnection = Connection('Con', exampleHead, exampleStimulus, exampleGenerator)

        exampleExperiment.setRecording(exampleHead.runSimulation(exampleExperiment.getDuration()))
        exampleExperiment.plotRecording()
Beispiel #15
0
    def __init__(self, runner):
        Experiment.__init__(self, runner)

        runner.paths = \
            [ [ 1 ]
            , [ 2, 3, 4 ]
            , [ 2, 3, 5 ]
            , [ 6 ]
            ]

        runner.lineMap = \
            { 1: 16
            , 2: 17
            , 3: 26
            , 4: 21
            , 5: 23
            , 6: 30
            }

        self.minInput = -1000
        self.maxInput = 1000
Beispiel #16
0
	def __init__(self, measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory):	
		Experiment.__init__(self,measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory)
	
		# parameters of the top sphere
		self.spherePositionX=self.objectSizeX / 2
		self.spherePositionY=self.objectSizeY / 2
		self.spherePositionZ=self.objectSizeZ / 2 - 27
		self.sphereRadius=6
		self.zRadius=2
		self.sphereIntensity=100

		# parameters of the bottom sphere
		self.spherePosition2X=self.objectSizeX / 2
		self.spherePosition2Y=self.objectSizeY / 2
		self.spherePosition2Z=self.objectSizeZ / 2 
		self.sphereRadius2=6
		self.szRadium=2
		self.sphereIntensity2=100

		self.background=0.000001

		self.directory=homeDirectory+"/Tester2/"
Beispiel #17
0
    def menuButton3(self, control):
        exampleHead = Head()
        exampleHeadModel = HeadModel(exampleHead)
        exampleHead.setSamplingFrequency(10)
        exampleHead.addRegistrationSite([0, 0, 0])

        exampleExperiment = Experiment(exampleHead.getSamplingFrequency(), 1.0)
        exampleExperiment.setStimulusTimes([[0.3, 0.6], [0.5]])

        exampleStimulus1 = Stimulus('Stim1', exampleHead)
        exampleStimulus2 = Stimulus('Stim2', exampleHead)
        exampleStimulus1.setStimulusTimes(exampleExperiment.getStimulusTimes()[0])
        exampleStimulus2.setStimulusTimes(exampleExperiment.getStimulusTimes()[1])

        exampleGenerator1 = GeneratorNumberIncrementing('Gen1', exampleHead)
        exampleGenerator2 = GeneratorNumberIncrementing('Gen2', exampleHead)
        exampleConnection1 = Connection('Con1', exampleHead, exampleStimulus1, exampleGenerator1)
        exampleConnection2 = Connection('Con2', exampleHead, exampleStimulus2, exampleGenerator2)

        exampleExperiment.setRecording(exampleHead.runSimulation(exampleExperiment.getDuration()))
        output = str(exampleExperiment.getRecording())
        self.log.SetValue(output)
        self.logWindow.Show()
Beispiel #18
0
from skopt import gp_minimize, dummy_minimize
from skopt.plots import plot_convergence
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import pickle
import seaborn as sns
import os

from Experiment import Experiment


def eggholder_function(x):
    """Returns eggholder function. Minimum -959.6407 at (512,404.23)"""
    return -(x[1] + 47) * np.sin(np.sqrt(np.abs(x[0] / 2 + (
        x[1] + 47)))) - x[0] * np.sin(np.sqrt(np.abs(x[0] - (x[1] + 47))))


if __name__ == '__main__':
    search_space = list()
    search_space.append(Real(-513, 513, 'uniform', name='x'))
    search_space.append(Real(-513, 513, 'uniform', name='y'))
    experiment = Experiment(1,
                            eggholder_function,
                            search_space,
                            numberOfEpochs=10,
                            numberOfRepetitions=1,
                            numberOfRandom=10)
    experiment.run('EI')
    experiment.plot_convergence()
    plt.show()
Beispiel #19
0
 def __init__(self, project, dataset, session):
     Experiment.__init__(self, project, dataset, session)
     self.kind = 'ActiveLearning'
     self.labeling_method = None
from LIF import LIF

def open_filterd_list(filtername):
    with open('/home/andrej/Dropbox/Arbeit/MKP/gif_fitting/BlueBrain/' + filtername + '_infos.pkl', 'rb') as f:
        filtered_list = pickle.load(f)
    return filtered_list

soms = open_filterd_list('som_animals')
vips = open_filterd_list('vip_animals')
pvs = open_filterd_list('pv_animals')

cell_names = list(vips.keys())
expms = []

for i in range(len(vips)):
    current_expm_name = 'Experiment_Cell_' + cell_names[i] + '_merged_idrest_traces.pkl'
    current_expm_path = os.path.join(expm_path,current_expm_name)
    try:
        current_expm = Experiment.load(current_expm_path)
        expms.append(current_expm)
    except:
        pass

my_expm = expms[0]

mylif = LIF(my_expm.dt)

mylif.Tref = 4.0
my_expm.detectSpikes_cython()
mylif.fitVoltageReset(my_expm,4.0)
tr = my_expm.trainingset_traces[0]
Beispiel #21
0
    lista = []
    for i in range(tam):
        lista.append(i)
    random.shuffle(lista)
    return lista


def geraListaReversa(tam):
    lista = []
    for i in range(tam):
        lista.append(i)
    lista.reverse()
    return lista


'''
vet = bubbleSort([7, 6, 5, 4, 3, 2, 1])
print(vet.vet)
#print(vet.stats.swaps)
vet.sort()
print(vet.vet)
#print(vet.stats.swaps)
'''
intervals = [100, 200, 300, 400, 500]
vectMelhor = bubbleSort(geraListaOrdenada(500), intervals)
vectPior = bubbleSort(geraListaReversa(500), intervals)
vect = bubbleSort(geraLista(500), intervals)
E1 = Experiment([vect, vectMelhor, vectPior], intervals, title="Bubble Sort")
E1.calculaTempos(bubbleSort)
E1.plotar()
x = 1
Beispiel #22
0
from Experiment import Experiment

exper = Experiment(
    '../Benchmarks',
    boogiePath='../Tools/Ours/VarElim/Boogie/Binaries/Boogie.exe',
    proverPath='../Tools/z3.exe',
    learnerPath='../Tools/Ours/IDT4Inv/build/',
)

exper.cleanIntermediateResult()
exper.setRunMode(Experiment.RunMode.IDT)

exper.setExperiment(limitedTime=60, itemSleep=0, roundSleep=0)

result = exper.experimentAllBplSerially()

exper.GenXlsxFromDict('../Results', result, infoExtra='Elim', titleAdd='Elim')
Beispiel #23
0
# the StimuliSet for the experiment (consisting of set_size/2 transformed samples of each)
set_transform_function = transform.stochastic_release  # The function to use when generating each transformed stimulus
set_transform_params = dict(  # The parameters with which to execute the specified transformation function
    release_duration=3,
    number_of_vesicles=20,
    stimulus_duration=stimulus_duration,
    release_probability=0.5)

# Append underscore to condition name

# %% Running and controlling the experiment
# Set up the experiment
experiment = Experiment(stimuli_creation_params=stimuli_creation_params,
                        model=model_params,
                        training_params=training_params,
                        origin_transform_function=origin_transform_function,
                        origin_transform_params=origin_transform_params,
                        set_transform_function=set_transform_function,
                        set_transform_params=set_transform_params,
                        repetitions=number_of_repetitions)
# Run the experiment
experiment.run()

# Saving all experiment data
experiment.save(report_folder, condition_name)

# Save backup copy of this file
with open(__file__, 'r') as file:
    this_file = file.read()

with open(path.join(report_folder, f'{condition_name}experiment_template.py'),
          'w') as template_file:
 def toJson(self):
     conf = Experiment.toJson(self)
     conf['__type__'] = 'ValidationExperiment'
     return conf
seed = 1337
tensorboard_dir = path_prefix + '/tensorboard/'
#Set initial seed
np.random.seed(seed)
#Load flat matrix
dataset = SIDLoader(dataset_dir,
                    patch_fn=None,
                    keep_raw=False,
                    keep_gt=True,
                    set_id='test')
#Set up experiments
expList = []
expList.append(
    Experiment(name='Sony',
               model_fn={'fn': ModelBuilder.build_loadable_cchen},
               device="/device:GPU:0",
               tensorboard_dir=tensorboard_dir,
               checkpoint_dir='../checkpoint',
               dataset=dataset))
#expList.append(Experiment(name='cchen_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_noflip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_noflip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='deep_isp_flip',model_fn={'fn':ModelBuilder.build_deep_isp_exp},device="/device:GPU:2",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='cchen_resize_sony_flip',model_fn={'fn':ModelBuilder.build_cchen_sony_exp_resize},device="/device:GPU:3",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_s_resize_sony_flip',model_fn={'fn':ModelBuilder.build_unet_s_sony_exp_resize},device="/device:GPU:4",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_self_amp2',model_fn={'fn':ModelBuilder.build_unet_self_scale},device="/device:GPU:0",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
#expList.append(Experiment(name='unet_amp_infer2',model_fn={'fn':ModelBuilder.build_unet_amp_infer},device="/device:GPU:1",tensorboard_dir=tensorboard_dir,checkpoint_dir=checkpoint_dir,dataset=dataset))
epoch = 0
    parameters = json.load(open(arg_parameter_file))

    # Verify parameters
    supp_fig_file_types = [
        'ps', 'eps', 'pdf', 'pgf', 'png', 'raw', 'rgba', 'svg', 'svgz', 'jpg',
        'jpeg', 'tif', 'tiff'
    ]
    assert exp['algorithm'] in [
        'FORCE', 'RMHL', 'SUPERTREX'
    ], "algorithm must be FORCE, RMHL or SUPERTREX."
    assert exp['task_type'] in [1, 2, 3], "task_type must be 1, 2 or 3."
    assert exp['n_segs'] > 0, "n_seg must be greater than zero."
    assert len(exp['arm_len']) == exp['n_segs'], "arm_len size " + str(
        len(exp['arm_len'])) + " is not the same as n_seg."
    assert len(exp['arm_cost']) == exp['n_segs'], "arm_cost size " + str(
        len(exp['arm_cost'])) + " is not the same as n_seg."
    assert exp['display_plot'] in ['Yes',
                                   'No'], "display_plot must be Yes or No"
    assert exp[
        'plot_format'] in supp_fig_file_types, "plot_format must be a valid image format for savefig: " + str(
            supp_fig_file_types)
    assert parameters[
        'n_train_trials'] >= 5, "n_train_trials must be greater than 4."

    # Simulate experiment
    experiment = Experiment(exp, parameters)  # Initialise experiment
    experiment.run(
        exp)  # Comment if you want to replot previously saved results
    experiment.plot(exp)  # Plot results and saves figures

    os.system('say "your program has finished"')
Beispiel #27
0
def getExperiment(appName, howManyRuns):
    durations = getDurationsForWorkloadName(appName, howManyRuns)
    if appName.startswith("numa-remote"):
        appName = appName.replace("numa-remote", "numaR")
    return Experiment(1, appName, durations)
Beispiel #28
0
import sys, argparse
from Experiment import Experiment

if __name__ == '__main__':
    # Parse command line input parameters
    parser = argparse.ArgumentParser(description='Run one experiment once')
    parser.add_argument('outdir', type=str, help='Output folder for results')
    parser.add_argument(
        '--n_trials',
        default=400,
        type=int,
        help='Number of trials in the experiment (default 400)')
    args = parser.parse_args()

    # Build experiment
    exp = Experiment()

    # Tweeks go here:
    aversion = True
    exp.brain.striatum.w = .25
    exp.brain.striatum.J_intra = exp.brain.striatum.w * exp.brain.striatum.J_inter
    exp.brain.vta.DA_pars['A_minus'] = 0.

    # Run experiment
    exp.train_brain(n_trials=args.n_trials,
                    aversion=aversion,
                    save_dir=args.outdir)
Beispiel #29
0
    def testExperimentDesign(self):
        check = 0
        try:
            generateCompatibleFormat(
                exp_path=os.path.abspath("tests/NTU_Experiment"),
                device="eyelink",
                stim_list_mode='NA',
                start='start_trial',
                stop='stop_trial',
                eye='B')
            check = 1
        finally:
            self.assertEqual(check, 1)

        check = 0
        try:
            exp = Experiment(json_file=os.path.abspath(
                "tests/NTU_Experiment/NTU_Experiment.json"))
            check = 1
        finally:
            self.assertEqual(check, 1)

        check = 0
        try:
            exp.metaMatrixInitialisation(standardise_flag=False,
                                         average_flag=False)
            check = 1
        finally:
            self.assertEqual(check, 1)

        check = 0
        try:
            exp.analyse(parameter_list={"all"},
                        between_factor_list=["Subject_type", "Gender"],
                        within_factor_list=["Stimuli_type"],
                        statistical_test="anova",
                        file_creation=True)

            exp.analyse(parameter_list={"all"},
                        statistical_test="anova",
                        file_creation=True)

            exp.analyse(parameter_list={"all"},
                        statistical_test="ttest",
                        file_creation=True)

            exp.analyse(parameter_list={"all"},
                        statistical_test="RM_anova",
                        file_creation=True)

            exp.analyse(statistical_test="None", file_creation=True)

            check = 1
        finally:
            self.assertEqual(check, 1)

        check = 0
        try:
            subject_name = "sub_222"
            stimulus_name = "Alpha1"

            single_meta = exp.getMetaData(sub=subject_name, stim=stimulus_name)

            agg_type_meta = exp.getMetaData(sub=subject_name, stim=None)
            check = 1
        finally:
            self.assertEqual(check, 1)
Beispiel #30
0
def Training():
    args = argument.get_args()
    logger = get_logger()
    currentModel = -1 if args.overwrite else dataProcessor.getLatestNetworkID()
    trainWorker = NetworkTraining()
    replayBuffer = []
    Loss = []
    WinRate = []

    rollout0 = None
    balance0 = 0
    if args.rolloutMode == 'network':
        rollout0 = None
        balance0 = 0
    elif args.rolloutMode == 'minmax':
        rollout0 = minMaxRolloutFn(1)
        balance0 = 1
    elif args.rolloutMode == 'random':
        rollout0 = randomRolloutFn(20)
        balance0 = 1
    elif args.rolloutMode == 'mix_minmax':
        rollout0 = minMaxRolloutFn(1)
        balance0 = args.balance
    elif args.rolloutMode == 'mix_random':
        rollout0 = randomRolloutFn(30)
        balance0 = args.balance
    else:
        rollout0 = None
        balance0 = 1

    for rd in range(1, args.trainround + 1):
        logger.info("round:%d" % rd)
        if currentModel != -1:
            model = dataProcessor.loadNetwork(args, currentModel)
        else:
            model = PolicyValueFn(args).to(device=args.device)
        eta = math.log(args.trainround / rd) + 1
        file = os.path.join(args.data_folder, f"selfplay-{currentModel+1}.txt")
        #rollout =randomRolloutFn(cnt=7)
        agent1 = Agent.SelfplayAgent(args.numOfIterations,
                                     model,
                                     file,
                                     eta,
                                     rollout=rollout0,
                                     balance=balance0)

        b = Board.Board(args.size, args.numberForWin)
        g = Game.Game(agent0=agent1, agent1=agent1, simulator=b)

        for i in range(1, args.epochs + 1):
            logger.info("epoch %d" % i)
            TimeID = timer.startTime("play time")
            g.run()
            timer.endTime(TimeID)
            timer.showTime(TimeID)
            if i % args.n_save_step == 0:
                agent1.saveData()
            if args.openReplayBuffer and len(replayBuffer) > args.buffersize:
                buffer = []
                for i in range(args.buffersize):
                    buffer.append(random.choice(replayBuffer))
                trainWorker.train(args.miniTrainingEpochs,
                                  currentModel,
                                  buffer,
                                  update=False)
            #if args.openReplayBuffer and len(replayBuffer):
            #    trainWorker.train(args.miniTrainingEpochs, currentModel, replayBuffer, update=False)
        agent1.saveData()
        dataList = dataProcessor.retrieveData(file)
        replayBuffer = replayBuffer + dataList
        if len(replayBuffer) > args.maxBufferSize:
            replayBuffer = replayBuffer[-args.maxBufferSize:]
        currentModel += 1
        TimeID = timer.startTime("network training")
        Loss.append(trainWorker.train(args.trainepochs, currentModel,
                                      dataList))
        timer.endTime(TimeID)
        timer.showTime(TimeID)

        #if args.openReplayBuffer:
        #    TimeID = timer.startTime("update replay buffer")
        #    replayBuffer = trainWorker.getReplayData(currentModel, dataList)
        #    timer.endTime(TimeID)
        #    timer.showTime(TimeID)
        agentTest = Agent.IntelligentAgent(args.numOfIterations,
                                           dataProcessor.loadNetwork(args),
                                           rolloutFn=rollout0,
                                           balance=balance0)

        exp = Experiment()
        WinRate.append(exp.evaluationWithBaseLine(agentTest))
        logger.info("WinRate: %.3f" % WinRate[-1])
    return Loss, WinRate
Beispiel #31
0
	def __init__(self, measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory):	
		Experiment.__init__(self,measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory)
		self.numpoints=100;
		self.background=0.000001;
		self.directory=homeDirectory+"/RandomPoints2/"
Beispiel #32
0
def main():
    experiment = Experiment()
    experiment.gatherResults()
    experiment.plotHist(experiment.results)
Beispiel #33
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from builders import ErdosRenyi, Barabasi
from Experiment import Experiment
from Estimator import MonteCarlo, Greedy, GreedyRestart, Annealing

config = {
    'graph_maker': ErdosRenyi(n=100, p=5.0/100),
    'num_trials': 10000,
    'filename': 'results/Erdos-Renyi-1e2.tsv',
    'estimator_types': [MonteCarlo, Greedy, GreedyRestart, Annealing],
    'budgets': [i + 1 for i in range(10)]
}
e = Experiment(**config)
e.run()

config = {
    'graph_maker': ErdosRenyi(n=1000, p=5.0/1000),
    'num_trials': 10000,
    'filename': 'results/Erdos-Renyi-1e3.tsv',
    'estimator_types': [MonteCarlo, Greedy, GreedyRestart, Annealing],
    'budgets': [10 * (i + 1) for i in range(10)]
}
e = Experiment(**config)
e.run()

config = {
    'graph_maker': ErdosRenyi(n=10000, p=5.0/10000),
    'num_trials': 10000,
    'filename': 'results/Erdos-Renyi-1e4.tsv',
Beispiel #34
0
            }
            # batch_size_train = 100
            # learning_rate = 0.01
            seed = 42
            uid = randomString(stringLength=6)
            # batch_size_test = 1000
            # n_epochs = 100

            device = torch.device(
                "cuda:0" if torch.cuda.is_available() else "cpu")
            if torch.cuda.is_available():
                torch.cuda.manual_seed(seed)
            else:
                torch.manual_seed(seed)

            experiment = Experiment(device)

            if params_dict["type"] == "Pruning":
                pruning = Pruning(percentage=params_dict["percentage"])
            elif params_dict["type"] == "Growing":
                growing = Growing(percentage=params_dict["percentage"])
            elif params_dict["type"] == "Shifting":
                growing = Growing(percentage=params_dict["percentage2"])
                pruning = Pruning(percentage=params_dict["percentage"])

            model_dict = {
                "network": {
                    'input_layer': {
                        "units": 784,
                    },
                    'hidden_layer': [{
    #dataset = dataset.MNISTDataset(config)

    #config = config.ConfigLSTM()
    #dataset = dataset.SequenceLearningOneToOne()
    # model = model.LSTM(input_size=10, seq_length=1, num_layers=1,
    #                    out_size=10, hidden_size=10, batch_size=1, device=config.DEVICE)
    dataX = [[(i % 4) / 4, (i % 4) / 4.0] for i in range(0, 1000)]
    dataY = []
    for i in range(len(dataX)):
        if i % 4 == 3:
            dataY.append(0)
        elif i % 4 == 2:
            dataY.append(1)
        elif i % 4 == 1:
            dataY.append(2)
        else:
            dataY.append(3)
    print(np.asarray(dataX))
    print(np.asarray(dataY))
    dataset = dataset.MLPToyDataset(dataX=np.asarray(dataX),
                                    dataY=np.asarray(dataY))
    #model = model.LSTM(input_size=config.INPUT_SIZE, seq_length=config.SEQ_LEN, num_layers=2,
    #                      out_size=config.OUTPUT_SIZE, hidden_size=5, batch_size=config.TRAIN_BATCH_SIZE,
    #                      device=config.DEVICE)
    #
    model = model.MLP(input_size=2, output_size=4)
    #

    experiment = Experiment(config=config, model=model, dataset=dataset)
    experiment.run()
Beispiel #36
0
# set up the backend for the plot
plt.rcdefaults()

# construct the arms and build the corresponding environment
env = Environment([Bernoulli(p) for p in [0.7, 0.3]])
n_arms = env.n_arms
print("### Environment\n" + str(env))

# prepare the algorithm
ucb_params = StructType()
ucb_params.scaling = 1.0
alg = UCB(n_arms, ucb_params)
print("### Algorithm\n" + str(alg))

# prepare the experiment
exp = Experiment(1000)

# execute the experiment and collect the results
res = execute(exp, env, alg)  # type : Result

# compute statistics
stats = res.compute_statistics()

# plot a chart with the cumulative regret
cum_exp_regret = np.cumsum(stats.exp_regret)
print(cum_exp_regret)

plt.plot(np.arange(exp.n_steps), cum_exp_regret, linewidth=2.5, linestyle="-")
plt.legend(loc='upper left', frameon=False)
plt.xlabel("steps")
plt.ylabel("cumulative regret")
Beispiel #37
0
    def __init__( self ):

        ## source table, needed for adding singletons
        self.mTableNameSource = "nrdb40"

        ## mapping table, needed for mapping domains onto nrdb100
        self.mTableNameMapping = "pairsdb_100x40"
        
        ## destination tables
        self.mTableNameFamilies = None
        self.mTableNameDomains = None

        self.mTableNameMappedDomains = None
        self.mTableNameMappedFamilies = None
        self.mTableNameRepresentatives = None
        
        self.mWorkSpace = "temp"

        self.mDatabase = "pairsdb"
        
        ## whether to remove domains overlapping with repeats
        self.mFilterRepeats = None

        self.mShortOptions  += 'D:f:d:e:g:h:s:r:m:'
        self.mLongOptions   += ['Database=',
                                'families=','domains=',
                                'mapped_families=','mapped_domains=',                                
                                'workspace=', "representatives=",
                                'source=', 'repeats=', "mapping=",
                                'filter_repeats','combine_overlaps',
                                'min_domain_length=']

        self.mShortOptions  += 'i:M:'
        self.mLongOptions   += ['input=', "filename_map=" ]
        self.mFileNameMapNids = None

        self.mMinSingletonLength = 30
        self.mMinDomainLength = 20

        self.mCombineOverlaps = 0

	Experiment.__init__( self )

        dbhandle = Pairsdb.Pairsdb()
        
        dbhandle.Connect( dbname = self.mDatabase )

        self.dbhandle = dbhandle
        self.dbhandle = dbhandle

        self.mFileNameFamilies = None
        self.mFileNameDomains = None

        # get suffix
        x = string.rfind( self.mTableNameDomains, "_") 
        if x >= 1:
            self.mGeneralSuffix = self.mTableNameDomains[x:] 
        else:
            self.mGeneralSuffix = ""

        if not self.mTableNameDomains:
            raise "no table domains specified"
        if not self.mTableNameFamilies:
            raise "no table families specified"

        if string.find( self.mTableNameFamilies, ".") == -1:
            self.mTableNameFamilies = self.mWorkSpace + "." + self.mTableNameFamilies

        if string.find( self.mTableNameDomains, ".") == -1:
            self.mTableNameDomains = self.mWorkSpace + "." + self.mTableNameDomains

        self.mTableNrdb = Table_nrdb( self.dbhandle )

        if self.mTableNameSource:
            self.mTableNids = TableNids( self.dbhandle )
            self.mTableNids.SetName( self.mTableNameSource )
            self.mTableNrdb.SetName( self.mTableNameSource )
            
        self.mTableFamilies = TableFamilies( dbhandle, "generic" )
        self.mTableDomains = TableDomains( dbhandle, "generic" )

        self.mTableFamilies.SetName( self.mTableNameFamilies )        
        self.mTableDomains.SetName( self.mTableNameDomains  )
Beispiel #38
0
import sys

from Expert import Expert
from Experiment import Experiment

parser = argparse.ArgumentParser()
parser.add_argument("--input","-i",type=str, default="input.txt", help="адрес папки с входными данными. По умолчанию ./input.txt")
parser.add_argument("--output","-o",type=str, default="output.txt", help="адрес файла с выходными данными. По умолчанию ./output.txt")
parser.add_argument("--compitence","-c", type=int, help="расчет коэффициентов компетентности экспертов, c заданой точностью")
parser.add_argument("--mediana","-m", action = "store_true", help="расчет медианы")
parser.add_argument("--generalized","-g", action = "store_true", help="расчет обобщенной выборки")
parser.add_argument("--significant","-s", help="Значимость оценок, коэффициент Спирмена, критический интервал", action = "store_true")
parser.add_argument("--kendal","-k", help="коэффициент конкордации Кендалла", action = "store_true")

args = parser.parse_args()
experiment = Experiment()


if (args.compitence):
    out = open(args.output, 'w')
    directory = args.input
    files = os.listdir(directory)
    a = files[0].endswith('txt')
    documents = list(filter(lambda x:  x.endswith('.txt'), files))

    #Добавляем в класс эксперимент экспертов
    ex = Expert(args.input,files[0])
    for document in documents:
        expert = Expert(args.input,document)
        experiment.addExpert(expert)
Beispiel #39
0
def addOrGetExperience(label, actionType):
    experiment = Experiment(label, actionType)
    EXPERIENCES[label] = experiment
    return EXPERIENCES[label]
Beispiel #40
0
 def __init__(self):
     exp = Experiment()
     exp.go(100)
def create_experiments_from_list(cells, cell_type, verbose=True):
    """
    Creates Experiment objects for cells in cells, adds all existing traces and saves them.
    
    Params:
        - cells: List with cell names or dictionairy where the keys are the cell names.
        
    """
    if type(cells) is dict:
        cell_names = list(cells.keys())
    else:
        cell_names = cells
    ncells = len(cell_names)

    for i in range(ncells):
        PATH = os.path.join(ROOT_PATH, cell_names[i])
        animal_files = sorted(os.listdir(PATH))
        ntraces = int(len(animal_files) / 2)

        current_exp = Experiment('Cell_' + cell_names[i] + '_single_traces', cell_type=cell_type)
        exp_merged_traces = Experiment('Cell_' + cell_names[i] + '_merged_idrest_traces', cell_type=cell_type)

        nincluded_idrest_traces = 0
        for j in np.arange(ntraces):
            # files end with 'recordingType_recordingNumber.ibw'
            file_split = str.split(animal_files[j][0:-4], '_')
            file_identifier = file_split[-2] + '_' + file_split[-1] + '.ibw'

            current_recording_type = file_split[-2]

            # find indeces of matching files in folder (current file always comes first because it's always Ch0)
            file_idc = [i for i, elem in enumerate(animal_files) if file_identifier in elem]
            current_file = animal_files[file_idc[0]]
            voltage_file = animal_files[file_idc[1]]

            current_exp.add_trainingset_trace(os.path.join(PATH, voltage_file), 10 ** -3,
                                              os.path.join(PATH, current_file), 10 ** -12, FILETYPE='Igor',
                                              verbose=verbose)
            tr = current_exp.trainingset_traces[j]
            tr.recording_type = current_recording_type
            tr.estimate_input_amp()

            if current_recording_type == 'IDRest':
                exp_merged_traces.add_trainingset_trace(os.path.join(PATH, voltage_file), 10 ** -3,
                                                        os.path.join(PATH, current_file), 10 ** -12, FILETYPE='Igor',
                                                        verbose=verbose)
                tr = current_exp.trainingset_traces[nincluded_idrest_traces]
                tr.recording_type = current_recording_type
                tr.estimate_input_amp()
                nincluded_idrest_traces += 1

        if not len(exp_merged_traces.trainingset_traces) < 3:
            exp_merged_traces.mergeTrainingTraces()
            exp_merged_traces.save(os.path.join(EXPM_PATH), verbose=verbose)

        current_exp.save(os.path.join(EXPM_PATH), verbose=verbose)
import time, datetime, os, glob

path_prefix = '.'
checkpoint_dir = path_prefix + '/chk'
dataset_dir = path_prefix + '/dataset'
valid_freq = 100
seed = 1337
tensorboard_dir = path_prefix + '/tensorboard'
#Set initial seed
np.random.seed(seed)
#Set up experiments
expList = []
expList.append(
    Experiment(name='unet_self_amp2',
               model_fn={'fn': ModelBuilder.build_unet_self_scale},
               device="/device:GPU:0",
               tensorboard_dir=tensorboard_dir,
               checkpoint_dir=checkpoint_dir))
expList.append(
    Experiment(name='unet_amp_infer2',
               model_fn={'fn': ModelBuilder.build_unet_amp_infer},
               device="/device:GPU:1",
               tensorboard_dir=tensorboard_dir,
               checkpoint_dir=checkpoint_dir))

#Load flat matrix
dataset = SIDLoader(dataset_dir,
                    patch_fn=SIDLoader.patch_unprocessed_sony,
                    keep_raw=True,
                    keep_gt=True)
validSet = None
Beispiel #43
0
 def toJson(self):
     conf = Experiment.toJson(self)
     conf['__type__'] = 'ActiveLearningExperiment'
     conf['labeling_method'] = self.labeling_method
     conf['conf'] = self.conf.toJson()
     return conf
Beispiel #44
0
test_dataset = torchvision.datasets.FashionMNIST(
    '../data/',
    train=False,
    download=True,
    transform=torchvision.transforms.Compose(
        [torchvision.transforms.ToTensor(),
         ReshapeTransform((-1, ))]))

dataset = torch.utils.data.ConcatDataset([train_dataset, test_dataset])

kf = KFold(n_splits=5, shuffle=True, random_state=seed)
for i_fold, (train_index, test_index) in enumerate(kf.split(dataset)):
    print("Fold: {}".format(i_fold + 1))
    # new fold - network from scratch
    experiment = Experiment(device)
    model = Network(model_dict)
    params_dict["fold"] = i_fold + 1
    # set the dataloaders for the fold
    train = torch.utils.data.Subset(dataset, train_index)
    test = torch.utils.data.Subset(dataset, test_index)
    train_loader = torch.utils.data.DataLoader(
        train, batch_size=params_dict["batch_size_train"], shuffle=True)
    test_loader = torch.utils.data.DataLoader(
        test, batch_size=params_dict["batch_size_test"], shuffle=True)
    # set up the experiment
    experiment.set_metadata(params_dict)
    experiment.set_network(model_dict)
    experiment.set_loaders(train_loader, test_loader)
    experiment.set_loss(torch.nn.CrossEntropyLoss())
Beispiel #45
0
import time, datetime, os, glob

path_prefix = '.'
checkpoint_dir = path_prefix + '/chk'
dataset_dir = path_prefix + '/dataset'
valid_freq = 20
seed = 1337
tensorboard_dir = path_prefix + '/tensorboard'
#Set initial seed
np.random.seed(seed)
#Set up experiments
expList = []
expList.append(
    Experiment(name='cchen_sony_noflip',
               model_fn={'fn': ModelBuilder.build_cchen_sony_exp},
               device="/device:GPU:0",
               tensorboard_dir=tensorboard_dir,
               checkpoint_dir=checkpoint_dir))
expList.append(
    Experiment(name='unet_s_sony_noflip',
               model_fn={'fn': ModelBuilder.build_unet_s_sony_exp},
               device="/device:GPU:1",
               tensorboard_dir=tensorboard_dir,
               checkpoint_dir=checkpoint_dir))
expList.append(
    Experiment(name='deep_isp_noflip',
               model_fn={'fn': ModelBuilder.build_deep_isp_exp},
               device="/device:GPU:2",
               tensorboard_dir=tensorboard_dir,
               checkpoint_dir=checkpoint_dir))
expList.append(
Beispiel #46
0
def test():

    # Building the wrapper
    wrapper = Experiment(test=True)

    wrapper.test()
Beispiel #47
0
    var = argv[2]
    output_file = argv[3]

    f = open(filename)
    experiment_values = []
    times = []
    experiments = []
    for line in f:
        l = line.split()
        # remove comments
        if '#' in l:
            l = l[0:l.index('#')]
        if len(l) == 0:
            # create experiment objects
            for exp_val in experiment_values:
                exp = Experiment(times, exp_val, var)
                experiments.append(exp)
                experiment_values = []
            times = []
            continue

        times.append(l[0])
        if (experiment_values == []):
            nof_exp_per_line = len(l) - 1
            experiment_values = [[] for i in range(len(l) - 1)]
        for i in range(1, len(l)):
            experiment_values[i - 1].append(float(l[i]))

    if experiment_values != []:
        for exp_val in experiment_values:
            exp = Experiment(times, exp_val, var)
Beispiel #48
0
    plt.xlabel('Training epochs (1e3)', fontsize=28)
    plt.ylabel('Mean reward over ' + str(bracket) + ' epochs', fontsize=28)
    #plt.title('Mean Reward over ' + str(bracket) + ' epochs with training')
    plt.tick_params(labelsize=24)
    plt.savefig(
        'D:\\Documents_Data\\UoM\\Projects\\First Year\\RL - Bioprocesses\\WorkBooks\\Reinforcement-Learning-for-Bioprocesses\\Accumulated_reward_'
        + str(pNo_mean) + 'MC.png')


# running experiement
for i in range(0, xi_.shape[0]):
    for j in range(0, disc1.shape[0]):
        for k in range(0, disc2.shape[0]):
            #run training
            env = Model_env(p, steps_, tf, x0, modulus)  # calling environment
            agent = greedye_MCL(num_actions, modulus, state_UB, disc1[j],
                                disc2[k], steps_)  # calling agent
            experiment = ExpTrain(env, agent, controls, episodes_train,
                                  xi_[i])  # calling training experiment
            reward_training[:, i, j, k], d = experiment.simulation(
            )  # running training experiment
            agent = Greedye_MCLearned(num_actions, d,
                                      steps_)  # calling learned agent
            exp_done = ExpValid(env, agent, controls, episodes_valid,
                                0)  # calling validation experiment
            reward_validation[:, i, j, k] = exp_done.simulation(
            )  # running validation experiment
            reward_train_mean = EpochNoMean(reward_training[:, i, j, k],
                                            bracket)
            x_o = Plotting(reward_train_mean, bracket, "rule_allocation")
Beispiel #49
0
 kw = ["cancer"]
 x.insert_keywords(kw)
 dic = {"samplecount" : "[200 TO 500]"}
 #dic = {}
 x.insert_criteria(dic)
 x.url_generator("experiments")                            # Creates the URL to be requested
 request = File_Requester(x.get_url())
 #print(request.get_url())
 request.do_request()
 request.get_request()
 request.do_content()                               # Returns the content of the requested URL
 #print(request.get_content())
 decoder = MetaData(request.get_content(), request.get_url())
 decoder.decode_json()                   # Decodes the content JSON
 #print(decoder.get_decoded())
 exp = Experiment(decoder.get_decoded()["experiments"]["experiment"][13])       # Created an Experiment Instance
 #print(exp.__dict__)
 #print(exp.accession)
 #exp.do_files_url()
 #print(exp.get_files_url())
 #exp.json_files_requester()
 #print(exp.get_json_files_request())
 #exp.json_files_decoder()
 #print(exp.get_decoded_json_files())
 #exp.create_files()
 #print(exp.files)
 #exp.download_all_files("C:/Users/utilizador/Google Drive/drive/Bioinformática/1_ano/2_Semestre/Projeto/Scripts/Downloads")
 exp.do_idf_file("C:/Users/utilizador/Google Drive/drive/Bioinformática/1_ano/2_Semestre/Projeto/Scripts/Downloads")          # Downloads and creates an data with IDF data
 #print(exp.get_idf_file())
 exp.do_sdrf_file("C:/Users/utilizador/Google Drive/drive/Bioinformática/1_ano/2_Semestre/Projeto/Scripts/Downloads") #Downloads and creates an data structure with SDRF data
 #print(exp.get_sdrf_file())
Beispiel #50
0
from Experiment import Experiment

exper = Experiment(
    '../Benchmarks',
    boogiePath='../Tools/Ours/Exp2/Boogie/Binaries/Boogie.exe',
    proverPath ='../Tools/z3.exe'
    )

exper.cleanIntermediateResult()
#exper.setRunMode(Experiment.RunMode.IDT)
exper.setRunMode(Experiment.RunMode.DT_Penalty)
exper.IDTParser = True
exper.setExperiment(limitedTime=60,itemSleep=0,roundSleep=0)

result = exper.experimentAllBplSerially()

exper.GenXlsxFromDict('../Results',result,infoExtra='Exp2',titleAdd='Exp2')
    Plotter.save_confusion_matrix_as_png(confMatrix, [l for l in labels_all],
                                         'tmp.png',
                                         normalize=True,
                                         cmap=plt.cm.YlGn)


temp()
exit()
server = "/run/user/477015036/gvfs/sftp:host=sonic.sb.dfki.de,user=mumu01"
exps_root_dir = server + "/home/mumu01/exps"
# exps = ["53", "54"]
exps = ["55"]

for exp_num in exps:
    exp_dir = exps_root_dir + '/exp' + exp_num
    exp_obj = Experiment(exp_dir)
    m_g = MetricsGenerator(exp_obj, eval_dataset="cs")
    # m_g.complete_pipeline()
    # m_g.pipeline_3()
    # m_g.pipeline_4()
    m_g.pipeline_5()
# evaluator = CityscapesEvaluationWrapper(exp_obj, "cityscapes")
# if re_eval:

# print("Evaluating for cityscapes")
# evaluator.evaluate_using_CS_script()

# evaluator = CityscapesEvaluationWrapper(exp_obj, "gta")
# if re_eval:
#     print("Evaluating for GTA")
#     evaluator.evaluate_using_CS_script(is_gta=True)
 def fromJson(obj, session):
     experiment = ValidationExperiment(obj['project'], obj['dataset'],
                                       session)
     Experiment.expParamFromJson(experiment, obj)
     return experiment
	def __init__(self, measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory):	
		Experiment.__init__(self,measurementSizeX, measurementSizeY, measurementSizeZ, psfSizeX, psfSizeY, psfSizeZ, homeDirectory)
		self.numpoints=7;
		self.background=100;
		self.directory=homeDirectory+"/RandomSpheresZRatio/"
Beispiel #54
0
                          gamma=params[3],
                          n_jobs=-1,
                          colsample_bytree=params[4],
                          eval_metric='mlogloss')  # subsample=0.9
    cv = RepeatedStratifiedKFold(n_splits=3, n_repeats=1, random_state=1)
    n_scores = cross_val_score(model,
                               X_train,
                               y_train,
                               scoring='accuracy',
                               cv=cv,
                               n_jobs=-1,
                               verbose=1)
    return 1 - mean(n_scores)


if __name__ == '__main__':
    search_space = init_space()
    experiment = Experiment(evaluate_model,
                            search_space,
                            numberOfEpochs=129,
                            numberOfRepetitions=1,
                            numberOfRandom=10)
    experiment.run(['EI'])
    experiment.plot_convergence()
    plt.legend()
    plt.show()

#===============================================================================
# # Viri in literatura
# https://www.kaggle.com/prashant111/a-guide-on-xgboost-hyperparameters-tuning
Beispiel #55
0
def main(jsonPath, testRun, setTarg, seed):
	"""
	example:
		python Experimenter.py -j "../../Experiments/experiments.json"
	"""
	# jsonPath = "./experiments.json"
	expIDs = getExpIDs(jsonPath)
	for expID in expIDs:
		experiment = Experiment(expID, loadPath=jsonPath)
		print("--- Experiment with ID:", experiment.ID, "---")
		if experiment.stage == "ready":
			trainModel(experiment, testRun, setTarg, seed)
			experiment.stage = "trained"
			experiment.saveToJson(jsonPath)
		if experiment.stage == "trained":
			writeModelOutput(experiment, testRun, seed)
			experiment.stage = "readyForTest"
			experiment.saveToJson(jsonPath)
		if experiment.stage == "readyForTest":
			testModel(experiment, testRun, setTarg, seed)
			experiment.stage = "tested"
			experiment.saveToJson(jsonPath)
		if experiment.stage == "tested":
			print("Model is trained and tested! results:", str(experiment.evaluation))
            print(
                "taking care of ",
                type(normalizer).__name__,
                "and feature selector ",
                type(feature_eliminator).__name__,
                "model",
                type(the_model).__name__,
            )
            file_path = "../Datasets/HIV_37_Samples/MergedDataset.csv"
            loaded_data = FileLoader.load_file(file_path)
            # feature_eliminator = SelectKBest(f_regression,k=k_value)
            the_data_manager = DataManager(feature_eliminator, normalizer=normalizer)
            the_data_manager.set_data(loaded_data)
            the_data_manager.split_data(test_split=0.15, train_split=0.70)

            exp = Experiment(the_data_manager, the_model)

            exp.run_experiment()
            arr_selected = feature_eliminator.get_support(indices=True)

            if (
                exp.get_r2(SplitTypes.Train) > 0
                and exp.get_r2(SplitTypes.Valid) > 0
                and exp.get_r2(SplitTypes.Test) > 0
            ):
                print(
                    feature_eliminator.get_support(indices=True),
                    type(normalizer).__name__,
                    type(feature_eliminator).__name__,
                    type(the_model).__name__,
                    "Fitness",