Esempio n. 1
0
def main(seed=0):
    Experiment1 = Experiment(False, 0.7, 0.001)
    log1_1 = Experiment1.setExperiment()
    log1_2 = Experiment1.setExperiment()
    log1_3 = Experiment1.setExperiment()
    getPlot(log1_1, log1_2, log1_3, 'Experiment1')

    Experiment2 = Experiment(True, 0.4, 0.001)
    log2_1 = Experiment2.setExperiment()
    log2_2 = Experiment2.setExperiment()
    log2_3 = Experiment2.setExperiment()
    getPlot(log2_1, log2_2, log2_3, 'Experiment2')

    Experiment3 = Experiment(True, 0.1, 0.001)
    log3_1 = Experiment3.setExperiment()
    log3_2 = Experiment3.setExperiment()
    log3_3 = Experiment3.setExperiment()
    getPlot(log3_1, log3_2, log3_3, 'Experiment3')

    Experiment4 = Experiment(True, 0.7, 0.01)
    log4_1 = Experiment4.setExperiment()
    log4_2 = Experiment4.setExperiment()
    log4_3 = Experiment4.setExperiment()
    getPlot(log4_1, log4_2, log4_3, 'Experiment4')

    Experiment5 = Experiment(True, 0.7, 0.1)
    log5_1 = Experiment5.setExperiment()
    log5_2 = Experiment5.setExperiment()
    log5_3 = Experiment5.setExperiment()
    getPlot(log5_1, log5_2, log5_3, 'Experiment5')

    return
Esempio n. 2
0
def open_file(filename, trigger, *args):
    binfile = filename[0:-4] + "_data.bin"
    if len(args) == 0:
        exp = Experiment.Experiment(binfile, filename, trigger)
    if len(args) == 1:
        exp = Experiment.Experiment(binfile,
                                    filename,
                                    trigger,
                                    stim_length=args[0])
    experiments[filename[0:-4]] = exp
    names.update(exp.stim_names)
Esempio n. 3
0
    def importdata(self):

        # Data[0] = Voltage, Data[1] = Current, Data[2] = Time

        self.trainData, self.testData = loading.Loader().dataload()

        self.myExp = Experiment('Experiment 1', .1)

        for n in self.trainData:
            print "Trials"
            print n
            self.myExp.addTrainingSetTrace(n[0],
                                           self.V_units,
                                           n[1],
                                           self.I_units,
                                           np.size(n[2]) / 10,
                                           FILETYPE='Array')
            self.myExp.trainingset_traces[n].setROI([[1000, 120000.0]])

        for n in self.testData:
            self.myExp.addTestSetTrace([n][0],
                                       self.V_units, [n][1],
                                       self.I_units,
                                       np.size([n][2]) / 10,
                                       FILETYPE='Array')
            self.myExp.testset_traces[n].setROI([[1000, 20000]])

        self.fitaec(self, self.myExp)
def adience_baseline_experiment_sgd(dataset):
    """Ejecuta un experimento entero (entrenamiento y test) con la configuración baseline con el optimizador SGD de nesterov con el dataset Adience.

    :param dataset: Dataset para realizar el experimento.
    """
    #    RUN ADIENCE BASELINE

    #Create net architecture
    baseline_resnet = Resnet_2x4()

    model = Model(inputs=baseline_resnet.inputs,
                  outputs=baseline_resnet.get_net())

    #Create experiment
    experiment = Experiment.Experiment(dataset, model)

    arguments = {
        'epochs': 100,
        'optimizer': SGD,
        'learning_rate': 1e-2,
        'momentum': 0.9,
        'loss_fn': 'categorical_crossentropy',
        'metrics': ['accuracy']
    }

    callbacks = ['ModelCheckpoint']

    #Train
    experiment.train('adience_baseline',
                     '/TFG/ordinal_unimodal_mio/src/logs/',
                     arguments=arguments,
                     callbacks=callbacks)

    #Test
    experiment.test()
def adience_poisson_experiment(dataset, tau_mode, tau=1.):
    """Ejecuta un experimento entero (entrenamiento y test) con la configuración poisson con el dataset Adience.

    :param dataset: Dataset para realizar el experimento.
    :param tau_mode: Modo de ejecución del parámetro tau en el experimento (constante o aprender valor)
    :param tau: Valor del parámetro tau. Valor inicial si se va a aprender, o valor constante.
    """
    #    RUN ADIENCE POISSON
    assert tau_mode in ["non_learnable", "sigm_learnable"]

    #Create net architecture
    poisson_resnet = Resnet_2x4_poisson(tau_mode)

    model = Model(inputs=poisson_resnet.inputs,
                  outputs=poisson_resnet.get_net())

    #Create experiment
    experiment = Experiment.Experiment(dataset, model)

    #Train
    experiment.train('adience_poisson_t=' + tau_mode,
                     '/TFG/ordinal_unimodal_mio/src/logs/')

    #Test
    experiment.test()
Esempio n. 6
0
def suggest_analysis_layout(solid_runs):
    """Generate a bash script to build the analysis directory scheme

    Given a set of SolidRuns, print a set of script commands for running the
    build_analysis_dir.py program to create and populate the analysis directories.

    The script can be edited before being executed by the user.

    Arguments:
      solid_runs: a list of SolidRun objects.
    """
    print "#!/bin/sh\n#\n# Script commands to build analysis directory structure"
    for run in solid_runs:
        build_analysis_dir_cmd = 'build_analysis_dir.py'
        top_dir = os.path.abspath(
            os.path.join(os.getcwd(), os.path.basename(run.run_dir)))
        for sample in run.samples:
            for project in sample.projects:
                # Create one experiment per project
                cmd_line = []
                expt = Experiment.Experiment()
                expt.name = project.getProjectName()
                expt.type = "expt"
                expt.sample = project.getSample().name
                expt.library = project.getLibraryNamePattern()
                # Print the arguments for the layout
                cmd_line.extend(
                    (build_analysis_dir_cmd, "--top-dir=%s_analysis" % top_dir,
                     "--link=absolute", "--naming-scheme=partial"))
                cmd_line.append(expt.describe())
                cmd_line.append(run.run_dir)
                print "#\n%s" % (' \\\n').join(cmd_line)
Esempio n. 7
0
def experiment(args, logger, dataProcessor):
    exp = Experiment.Experiment()

    model = dataProcessor.loadNetwork(args, 0)
    #PolicyValueFn.PolicyValueFn(args).to(args.device)
    data = exp.evaluationWithDifferentMinMaxSearchAgent(model)
#    data = exp.evaluationForNetworkWithFourRollout(model,start=10,end=50,step=10,random_cnt=1,numOfEvaluations=1)
    logger.info(data)
Esempio n. 8
0
def make_experiment():
    kwargs = {
        'agent': agent,
        'environment': env,
        'numPolicyChecks': numPolicyChecks,
        'numEpisodesPerCheck': numEpisodesPerCheck,
        'numTrainEpisodes': numTrainEpisodes,
    }
    for key, value in sorted(kwargs.items(), key=lambda x: x[0]):
        logging.info('\t%s = %s' % (str(key, ), str(value)))
    experiment = Experiment.Experiment(**kwargs)

    return experiment
Esempio n. 9
0
def train(args, logger, dataProcessor):
    Loss, WinRate = Training.Training()
    exp = Experiment.Experiment()
    fig = exp.simplePlot(range(args.trainround), Loss, "Training Loss Curve", xlabel='Training Rounds',
                   ylabel='Training Loss', color='blue', linestyle='-.')
    exp.saveFig(fig,name="Training Loss Curve")
    fig = exp.simplePlot(range(args.trainround), WinRate, "Winning Rate Curve", xlabel='Training Rounds',
                   ylabel='Winning Rate', color='blue', linestyle='-.')
    exp.saveFig(fig, name="Winning Rate Curve")
    logger.info("Loss and Winning Rate")
    loss_log = ''
    for i in Loss:
        loss_log += str(i) + ','
    logger.info(loss_log)
    loss_log = ''
    for i in WinRate:
        loss_log += str(i) + ','
    logger.info(loss_log)
def adience_baseline_experiment(dataset):
    """Ejecuta un experimento entero (entrenamiento y test) con la configuración baseline con el dataset Adience.

    :param dataset: Dataset para realizar el experimento.
    """
    #    RUN ADIENCE BASELINE

    #Create net architecture
    baseline_resnet = Resnet_2x4()

    model = Model(inputs=baseline_resnet.inputs,
                  outputs=baseline_resnet.get_net())

    #Create experiment
    experiment = Experiment.Experiment(dataset, model)

    #Train
    experiment.train('adience_baseline', '/TFG/ordinal_unimodal_mio/src/logs/')

    #Test
    experiment.test()
Esempio n. 11
0
    def simulate(self):

        # start experiments
        for exp in range(self.num_experiments):

            # create a new experiment instance
            experiment = Experiment(self.run_time, self.sizeBlue, self.sizeRed)

            print("Experiment: ", exp, "\t")

            agents = self.create_agents()

            # normalizer for accumulated utility
            # why 0.6?
            experiment.normalizer = 0.6 * (len(agents) - 1)

            # run the algorithm
            # for each round from 1000
            for current_round in range(self.run_time):
                self.simulate_round(current_round, agents, experiment, self.print_round_results)

            self.results.append(experiment)
Esempio n. 12
0
    def run(self,
            csv_f=None,
            csv_b=None,
            csv_e=None,
            experiments=None,
            verbose=False):

        # 1. Can't run if no one or no where
        if not self.walker or not self.grid or not self.walker.grid():
            return None

        # 2. Start with no results
        results = []

        # 3. Loop for all possible experiments
        if experiments == None:
            exps = range(len(EXPERIMENTS))
        else:
            exps = [experiments]
        for exp in exps:

            # 4. Determine the rewards for the experiment
            initial_rewards = REWARDS[EXPERIMENTS[exp][INITIAL]]
            changed_rewards = REWARDS[EXPERIMENTS[exp][CHANGED]]
            rewards = (initial_rewards, changed_rewards)

            # 5. Create the experiment
            experiment = Experiment.Experiment(walker=self.walker,
                                               grid=self.grid,
                                               rewards=rewards,
                                               switch=self.switch,
                                               stop=self.stop)
            if verbose:
                print "%s %2d" % (self.walker, exp),
                sys.stdout.flush()

            # 6. Loop for the number of repeats
            prefix = None
            rewards = 0
            for num in range(self.repeats):

                # 7. Run the experiment
                experiment.reset()
                if csv_f:
                    if csv_b:
                        prefix = '%s%d,%d,' % (csv_b, exp, num)
                    else:
                        prefix = '%d,%d,' % (exp, num)
                if verbose:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                rewards += experiment.run(csv_f=csv_f,
                                          csv_b=prefix,
                                          csv_e=csv_e)

            # 8. Save the results
            if verbose:
                print rewards
            results.append(rewards)

        # 9. Return the total reward
        return results
Esempio n. 13
0
 def xxx_testNoPerturbLong(self):
     g = Grid.Grid()
     l = QLearner.QLearner()
     e = Experiment(g, l)
     r = e.run()
     self.assertEqual(len(r), STOP_TURN)
Esempio n. 14
0
 def csv_header(self):
     exp = Experiment.Experiment(walker=self.walker, grid=self.grid)
     return CSV_HEADER + exp.csv_header()
Esempio n. 15
0
            '  o-o-o-o-o       o-o-o-o-o  ',
            '  |       |       |       |  ',
            'o-o-o   o-o-o   o-o-o   o-o-o',
        ],
        'start position': (7, 0),
        # 'reward locations': {(8, 14): 1},  # one reward, no switch
        'reward locations': {},  # no rewards
        'change reward location':
        False,
        'allow reversals':
        True
    },
    # {
    #     'maze type': 'Binary',
    #     'maze name': '6level_binary_maze',
    #     'number of levels': 6,
    #     # 'reward locations': {(5, 17): 10}, # no reward switching
    #     'reward locations': {}, # no reward switching
    #     'change reward location': False,
    #     # 'reward locations': {0: {(3,2): 100}, 50: {(3,3): 100}}, # change-episode, location (index from 0), reward
    #     'allow reversals': True
    # },
    # To test additional environments, insert here (must match with number of agents)
]

# Run experiment
Experiment = Experiment(name=experiment_name,
                        environments=env,
                        agents=agents,
                        nb_episodes=3000,
                        nb_trials=50)
Esempio n. 16
0
from Experiment import *
from ExpList_1_1 import *

experiment = Experiment(Exp7)
experiment.start()

experiment = Experiment(Exp8)
experiment.start()

experiment = Experiment(Exp9)
experiment.start()
Esempio n. 17
0
        file_names = os.listdir(path_data)
        for file_name in file_names:
            if '.abf' in file_name:
                ext = '.abf'
                break
            elif '.mat' in file_name:
                ext = '.mat'
                break

        # Load AEC data
        filename_AEC = path_data + cell_name + '_aec' + ext
        (sampling_timeAEC, voltage_traceAEC,
         current_traceAEC) = load_AEC_data(filename_AEC)

        # Create experiment
        experiment = Experiment('Experiment 1', sampling_timeAEC)
        experiment.setAECTrace(voltage_traceAEC,
                               10.**-3,
                               current_traceAEC,
                               10.**-12,
                               len(voltage_traceAEC) * sampling_timeAEC,
                               FILETYPE='Array')

        # Load training set data and add to experiment object
        filename_training = path_data + cell_name + '_training' + ext
        (sampling_time, voltage_trace, current_trace,
         time) = load_training_data(filename_training)
        experiment.addTrainingSetTrace(voltage_trace,
                                       10**-3,
                                       current_trace,
                                       10**-12,
Esempio n. 18
0
def process_all_files_for_iGIF_Ca_NP(is_E_Ca_fixed=True):
    if is_E_Ca_fixed:
        spec_GIF_Ca = 'ECa_fixed_'
    else:
        spec_GIF_Ca = 'ECa_free_'
    Md_star = {}
    epsilon_V_test = {}
    PVar = {}

    # List separate experiments in separate folder
    data_folders_for_separate_experiments = [
        'seventh_set', 'eighth_set', 'ninth_set'
    ]

    # For all experiments, extract the cell names
    CellNames = {}
    for experiment_folder in data_folders_for_separate_experiments:
        folder_path = './' + experiment_folder + '/'
        CellNames[experiment_folder] = [
            name for name in os.listdir(folder_path)
            if os.path.isdir(folder_path + name) and '_5HT' in name
        ]
    CellNames['eighth_set'].remove('DRN157_5HT')  # problematic cell
    CellNames['eighth_set'].remove('DRN164_5HT')  # problematic cell

    for experiment_folder in data_folders_for_separate_experiments:
        for cell_name in CellNames[experiment_folder]:
            print '\n\n#############################################'
            print '##########     process cell %s    ###' % cell_name
            print '#############################################'

            #################################################################################################
            # Load data
            #################################################################################################

            path_data = './' + experiment_folder + '/' + cell_name + '/'
            path_results = './Results/' + cell_name + '/'

            # Find extension of data files
            file_names = os.listdir(path_data)
            for file_name in file_names:
                if '.abf' in file_name:
                    ext = '.abf'
                    break
                elif '.mat' in file_name:
                    ext = '.mat'
                    break

            # Load AEC data
            filename_AEC = path_data + cell_name + '_aec' + ext
            (sampling_timeAEC, voltage_traceAEC,
             current_traceAEC) = load_AEC_data(filename_AEC)

            # Create experiment
            experiment = Experiment('Experiment 1', sampling_timeAEC)
            experiment.setAECTrace(voltage_traceAEC,
                                   10.**-3,
                                   current_traceAEC,
                                   10.**-12,
                                   len(voltage_traceAEC) * sampling_timeAEC,
                                   FILETYPE='Array')

            # Load training set data and add to experiment object
            filename_training = path_data + cell_name + '_training' + ext
            (sampling_time, voltage_trace, current_trace,
             time) = load_training_data(filename_training)
            experiment.addTrainingSetTrace(voltage_trace,
                                           10**-3,
                                           current_trace,
                                           10**-12,
                                           len(voltage_trace) * sampling_time,
                                           FILETYPE='Array')
            #Note: once added to experiment, current is converted to nA.

            # Load test set data
            filename_test = path_data + cell_name + '_test' + ext
            if filename_test.find('.mat') > 0:
                mat_contents = sio.loadmat(filename_test)
                analogSignals = mat_contents['analogSignals']
                times_test = mat_contents['times']
                times_test = times_test.reshape(times_test.size)
                times_test = times_test * 10.**3
                sampling_time_test = times_test[1] - times_test[0]
                for testnum in range(analogSignals.shape[1]):
                    voltage_test = analogSignals[0, testnum, :]
                    current_test = analogSignals[1, testnum, :] - 5.
                    experiment.addTestSetTrace(voltage_test,
                                               10.**-3,
                                               current_test,
                                               10.**-12,
                                               len(voltage_test) *
                                               sampling_time_test,
                                               FILETYPE='Array')
            elif filename_test.find('.abf') > 0:
                r = neo.io.AxonIO(filename=filename_test)
                bl = r.read_block()
                times_test = bl.segments[0].analogsignals[0].times.rescale(
                    'ms').magnitude
                sampling_time_test = times_test[1] - times_test[0]
                for i in xrange(len(bl.segments)):
                    voltage_test = bl.segments[i].analogsignals[0].magnitude
                    current_test = bl.segments[i].analogsignals[
                        1].magnitude - 5.
                    experiment.addTestSetTrace(voltage_test,
                                               10.**-3,
                                               current_test,
                                               10.**-12,
                                               len(voltage_test) *
                                               sampling_time_test,
                                               FILETYPE='Array')

            #################################################################################################
            # PERFORM ACTIVE ELECTRODE COMPENSATION
            #################################################################################################

            # Create new object to perform AEC
            myAEC = AEC_Badel(experiment.dt)

            # Define metaparametres
            myAEC.K_opt.setMetaParameters(length=150.0,
                                          binsize_lb=experiment.dt,
                                          binsize_ub=2.0,
                                          slope=30.0,
                                          clamp_period=1.0)
            myAEC.p_expFitRange = [3.0, 150.0]
            myAEC.p_nbRep = 15

            # Assign myAEC to experiment and compensate the voltage recordings
            experiment.setAEC(myAEC)
            experiment.performAEC()

            #################################################################################################
            # FIT GIF-Ca
            #################################################################################################

            # Create a new object GIF
            iGIF_Ca_NP_fit = iGIF_Ca_NP(experiment.dt)

            # Define parameters
            iGIF_Ca_NP_fit.Tref = 6.0
            iGIF_Ca_NP_fit.eta = Filter_Rect_LogSpaced()
            iGIF_Ca_NP_fit.eta.setMetaParameters(length=2000.0,
                                                 binsize_lb=0.5,
                                                 binsize_ub=500.0,
                                                 slope=10.0)
            iGIF_Ca_NP_fit.gamma = Filter_Rect_LogSpaced()
            iGIF_Ca_NP_fit.gamma.setMetaParameters(length=2000.0,
                                                   binsize_lb=2.0,
                                                   binsize_ub=500.0,
                                                   slope=5.0)

            for tr in experiment.trainingset_traces:
                tr.setROI(
                    [[2000.,
                      sampling_time * (len(voltage_trace) - 1) - 2000.]])

            # Define metaparameters used during the fit
            theta_inf_nbbins = 10  # Number of rect functions used to define the nonlinear coupling between
            theta_range_min = 10.
            theta_range_max = 20.
            theta_tau_all = np.linspace(
                theta_range_min, theta_range_max, theta_inf_nbbins
            )  # tau_theta is the timescale of the threshold-voltage coupling
            likelihoods = iGIF_Ca_NP_fit.fit(experiment,
                                             theta_inf_nbbins=theta_inf_nbbins,
                                             theta_tau_all=theta_tau_all,
                                             DT_beforeSpike=5.0,
                                             is_E_Ca_fixed=is_E_Ca_fixed)

            if iGIF_Ca_NP_fit.theta_tau < theta_tau_all[0] + 0.1 * (20. -
                                                                    10.) / 9.:
                theta_tau_all = np.linspace(theta_range_min - 9.,
                                            theta_range_max - 10.,
                                            theta_inf_nbbins)
                likelihoods = iGIF_Ca_NP_fit.fit(
                    experiment,
                    theta_inf_nbbins=theta_inf_nbbins,
                    theta_tau_all=theta_tau_all,
                    DT_beforeSpike=5.0,
                    is_E_Ca_fixed=is_E_Ca_fixed)

            while iGIF_Ca_NP_fit.theta_tau > theta_tau_all[-1] - 0.1 * (
                    20. - 10.) / 9.:
                theta_range_min = theta_range_min + 10.
                theta_range_max = theta_range_max + 10.
                theta_tau_all = np.linspace(theta_range_min, theta_range_max,
                                            theta_inf_nbbins)
                print 'testing range for theta_tau = [%f, %f]...' % (
                    theta_range_min, theta_range_max)
                likelihoods = iGIF_Ca_NP_fit.fit(
                    experiment,
                    theta_inf_nbbins=theta_inf_nbbins,
                    theta_tau_all=theta_tau_all,
                    DT_beforeSpike=5.0,
                    is_E_Ca_fixed=is_E_Ca_fixed)
                print 'max likelihood = %f' % np.max(np.array(likelihoods))
            iGIF_Ca_NP_fit.save(path_results + cell_name + '_iGIF_Ca_NP_' +
                                spec_GIF_Ca + 'ModelParams.pck')

            ###################################################################################################
            # EVALUATE MODEL PERFORMANCES ON THE TEST SET DATA
            ###################################################################################################

            # predict spike times in test set
            prediction = experiment.predictSpikes(iGIF_Ca_NP_fit, nb_rep=500)

            # Compute epsilon_V
            epsilon_V = 0.
            local_counter = 0.
            for tr in experiment.testset_traces:
                SSE = 0.
                VAR = 0.
                # tr.detectSpikesWithDerivative(threshold=10)
                (time, V_est, eta_sum_est
                 ) = iGIF_Ca_NP_fit.simulateDeterministic_forceSpikes(
                     tr.I, tr.V[0], tr.getSpikeTimes())
                indices_tmp = tr.getROI_FarFromSpikes(5., iGIF_Ca_NP_fit.Tref)

                SSE += sum((V_est[indices_tmp] - tr.V[indices_tmp])**2)
                VAR += len(indices_tmp) * np.var(tr.V[indices_tmp])
                epsilon_V += 1.0 - SSE / VAR
                local_counter += 1
            epsilon_V = epsilon_V / local_counter
            epsilon_V_test[cell_name] = epsilon_V

            # Compute Md*
            Md_star[cell_name] = prediction.computeMD_Kistler(
                8.0, iGIF_Ca_NP_fit.dt * 2.)
            fname = path_results + cell_name + '_iGIF_Ca_NP_' + spec_GIF_Ca + 'Raster.png'
            kernelForPSTH = 50.0
            PVar[cell_name] = prediction.plotRaster(fname, delta=kernelForPSTH)

            #################################################################################################
            #  PLOT TRAINING AND TEST TRACES, MODEL VS EXPERIMENT
            #################################################################################################

            #Comparison for training and test sets w/o inactivation
            V_training = experiment.trainingset_traces[0].V
            I_training = experiment.trainingset_traces[0].I
            (time, V, eta_sum, V_t,
             S) = iGIF_Ca_NP_fit.simulate(I_training, V_training[0])
            fig = plt.figure(figsize=(10, 6), facecolor='white')
            plt.subplot(2, 1, 1)
            plt.plot(time / 1000, V, '--r', lw=0.5, label='iGIF-Ca-NP')
            plt.plot(time / 1000, V_training, 'black', lw=0.5, label='Data')
            plt.xlim(18, 20)
            plt.ylim(-80, 20)
            plt.ylabel('Voltage [mV]')
            plt.title('Training')

            V_test = experiment.testset_traces[0].V
            I_test = experiment.testset_traces[0].I
            (time, V, eta_sum, V_t,
             S) = iGIF_Ca_NP_fit.simulate(I_test, V_test[0])
            plt.subplot(2, 1, 2)
            plt.plot(time / 1000, V, '--r', lw=0.5, label='iGIF-Ca-NP')
            plt.plot(time / 1000, V_test, 'black', lw=0.5, label='Data')
            plt.xlim(5, 7)
            plt.ylim(-80, 20)
            plt.xlabel('Times [s]')
            plt.ylabel('Voltage [mV]')
            plt.title('Test')
            plt.legend()
            plt.tight_layout()
            plt.savefig(path_results + cell_name + '_iGIF_Ca_NP_' +
                        spec_GIF_Ca + 'simulate.png',
                        format='png')
            plt.close()

    output_file = open(
        './Results/' + 'iGIF_Ca_NP_' + spec_GIF_Ca + 'FitPerformance.dat', 'w')
    output_file.write('#Cell name\tMd*\tEpsilonV\tPVar\n')

    for experiment_folder in data_folders_for_separate_experiments:
        for cell_name in CellNames[experiment_folder]:
            output_file.write(cell_name + '\t' + str(Md_star[cell_name]) +
                              '\t' + str(epsilon_V_test[cell_name]) + '\t' +
                              str(PVar[cell_name]) + '\n')
    output_file.close()
Esempio n. 19
0
def open_file(filename, trigger):
    binfile = filename[0:-4] + "_data.bin"
    exp = Experiment.Experiment(binfile, filename, trigger)
    experiments[filename[0:-4]] = exp
    names.update(exp.stim_names)
Esempio n. 20
0
print(y_features['Class'].value_counts())
"""
======================================
Fitting model_00: Logistic Regression (baseline)
Include all reducing features without ranking the important of features 
======================================
"""
"""Sampling a training set and a testing set = 80:20"""
X_train_baseline, X_test_baseline, y_train_baseline, y_test_baseline = train_test_split(
    X_features,
    y_features,
    test_size=0.20,
    random_state=0,
    stratify=y_features)

model_logr_baseline = exp.Experiment(X=X_train_baseline,
                                     y=y_train_baseline).logistic_regression()
"""Intercept and Coefficients"""

print("\n Intercept \n" +
      str(model_logr_baseline["tuned_model"].intercept_[0]))
print("\n Coefficients \n" + str(model_logr_baseline["tuned_model"].coef_[0]))
"""Evaluate and predict"""

model_logr_baseline["pop_DR"] = model_logr_baseline[
    "tuned_model"].predict_proba(X_test_baseline)[:, 1]
model_logr_baseline["yhat"] = model_logr_baseline["tuned_model"].predict(
    X_test_baseline)

model_logr_baseline["con_matrix"] = confusion_matrix(
    y_test_baseline, model_logr_baseline["yhat"], labels=[0, 1])
model_logr_baseline["class_report"] = classification_report(
Esempio n. 21
0
####################################################

dataArray = l.expLoad()[0]
dataPath = l.expLoad()[1]
V = [dataArray[n][0] for n, k in enumerate(dataPath)]
I = [dataArray[n][1] for n, k in enumerate(dataPath)]
V_units = 10**-3
I_units = 10**-9

assert np.size(I) == np.size(V)  # These need to be equal

####################################################
# Load Training Set
####################################################

experiment = Experiment('Experiment 1', 0.1)
experiment.addTrainingSetTrace(V[0],
                               V_units,
                               I[0],
                               I_units,
                               np.size(V[1]) / 10,
                               FILETYPE='Array')
for n, k in enumerate(dataPath[1:]):
    experiment.addTestSetTrace(V[n],
                               V_units,
                               I[n],
                               I_units,
                               np.size(V[n]) / 10,
                               FILETYPE='Array')

####################################################
Esempio n. 22
0
    def run(self,
            csv_f=None,
            csv_b=None,
            csv_e=None,
            experiments=None,
            verbose=False):

        # 1. Can't run if no one or no where
        if not self.walker or not self.grid or not self.walker.grid():
            return None

        # 2. Start with no results
        results = []
        '''#
        resultsForGraph=[]
        stepsForGraph=[]
        totalSteps=0
        rewardsForGraph=0
        plt.show()
        axes = plt.gca()
        axes.set_xlim(0, 10000000)
        axes.set_ylim(0, 100000000)
        line, = axes.plot(stepsForGraph, resultsForGraph, 'r-')'''

        # 3. Loop for all possible experiments
        if experiments == None:
            exps = range(len(EXPERIMENTS))
        else:
            exps = [experiments]
        for exp in exps:

            # 4. Determine the rewards for the experiment
            initial_rewards = REWARDS[EXPERIMENTS[exp][INITIAL]]
            changed_rewards = REWARDS[EXPERIMENTS[exp][CHANGED]]
            rewards = (initial_rewards, changed_rewards)

            # 5. Create the experiment
            experiment = Experiment.Experiment(walker=self.walker,
                                               grid=self.grid,
                                               rewards=rewards,
                                               switch=self.switch,
                                               stop=self.stop)
            if verbose:
                print "%s %2d" % (self.walker, exp),
                sys.stdout.flush()

            # 6. Loop for the number of repeats
            prefix = None
            rewards = 0
            for num in range(self.repeats):

                # 7. Run the experiment
                experiment.reset()
                if csv_f:
                    if csv_b:
                        prefix = '%s%d,%d,' % (csv_b, exp, num)
                    else:
                        prefix = '%d,%d,' % (exp, num)
                if verbose:
                    sys.stdout.write('.')
                    sys.stdout.flush()
                rewards += experiment.run(csv_f=csv_f,
                                          csv_b=prefix,
                                          csv_e=csv_e)

            # 8. Save the results
            if verbose:
                print rewards
            results.append(rewards)

            #
            '''#update graph flow here
            rewardsForGraph+=rewards
            if totalSteps%20000==0:
                stepsForGraph.append(totalSteps)
                resultsForGraph.append(rewardsForGraph)
                line.set_xdata(stepsForGraph)
                line.set_ydata(resultsForGraph)
                plt.draw()
                plt.pause(1e-17)
                time.sleep(0.1)
                rewardsForGraph=0         
            totalSteps+=1'''

        showRewardsOnGraph(results)
        # 9. Return the total reward
        return results
"""
This script fit 2 GIF models on the same experimental data:

- model 1 myGIF_rect : a standard GIF in which eta and gamma are expanded in a set of rectangular basis functions (as in Pozzorini et al. PLOS Comp. Biol. 2015)
- model 2 myGIF_exp  : a GIF model in which eta and gamma are expanded in a set of exponential functions with given timescales,
                       (this alternative approach can be used to fit a model in which the adaptation processes can be efficiently simulated by
                       solving linear differential equations). Note that the timescales are not free parameters but must be specifyied by the user.

A plot is produced in which the optimal model parameters are plotted on top of eaach others.
"""

############################################################################################################
# STEP 1: LOAD EXPERIMENTAL DATA
############################################################################################################

myExp = Experiment('Experiment 1', 0.1)

PATH = '../../data/gif_test/'

# Load AEC data
myExp.setAECTrace(PATH + 'Cell3_Ger1Elec_ch2_1007.ibw', 1.0, PATH + 'Cell3_Ger1Elec_ch3_1007.ibw', 1.0, 10000.0, FILETYPE='Igor')

# Load training set data
myExp.addTrainingSetTrace(PATH + 'Cell3_Ger1Training_ch2_1008.ibw', 1.0, PATH + 'Cell3_Ger1Training_ch3_1008.ibw', 1.0, 120000.0, FILETYPE='Igor')

# Specify the region of the training set that will be used to fit the models (here first 60 seconds)
myExp.trainingset_traces[0].setROI([[0,60000.0]])


# Load test set data
myExp.addTestSetTrace(PATH + 'Cell3_Ger1Test_ch2_1009.ibw', 1.0, PATH + 'Cell3_Ger1Test_ch3_1009.ibw', 1.0, 20000.0, FILETYPE='Igor')
Esempio n. 24
0
params['temp_prob'] = [1,1,1,1,0]


# determine number of trials for (practice) block
# 50 targets by abs/pres by short/long block by house/face/letter = 600 trials; divided by 6 blocks = 100 trials per block
# this gives 168 trials per category, collapsed over conditions

#### GO ####

# first a practice block with task instructions and example trials
params['block_type'] = 'practice'
params['ready_text'] = 'stimuli/ready_practice.txt'
params['ntrials']    = 24 # needs to be divided by 3 categories
params['rep_check']  = (12,4) # in chuncks of 12 trials, it is checked whether repetitions of 4 or more occur for absent/present
params['nblocks']    = 1 
exp = Experiment(params) 
exp.run_instruction('stimuli/instruct1.txt')
exp.run_example_trial(['face',inFiles[0][0],'present','short'])
exp.run_instruction('stimuli/instruct2.txt')
exp.run_example_trial(['house',inFiles[1][10],'absent','long'])
exp.run_instruction('stimuli/instruct3.txt')
exp.run()
exp.run_instruction('stimuli/finish_practice.txt')
##
# experimental block with only long intervals
params['block_type'] = 'long'
params['ready_text'] = 'stimuli/ready_long.txt'
params['ntrials']    = 300 # needs to be divided by 3*2 (category by absent/present) -> for real experiment 300
params['rep_check']  = (30,5) # 300 trials are divided up in chuncks of 30 where absent/present is shuffled such that no 5 repetitions are allowed
params['nblocks']    = 4 #                                                           -> for real experiment 4
exp = Experiment(params) 
Esempio n. 25
0
def loadExperimentFile(pickle):
    experiment = Experiment.Experiment(os.path.split(pickle)[0],
                        os.path.split(pickle)[1].split(".")[0])
    return experiment
Esempio n. 26
0
def main():
    # Parse options and arguments
    parser = OptionParser()
    parser.set_usage("wrapping.py <optimizer> [-s seed] [-t title]" +
                     "[--restore=/directory/]")
    parser.add_option("-p",
                      "--print",
                      action="store_true",
                      dest="printcmd",
                      default=False,
                      help="If set print the command instead of executing it")
    parser.add_option("-s",
                      "--seed",
                      dest="seed",
                      default=1,
                      type=int,
                      help="Set the seed of the optimizer")
    parser.add_option("-t",
                      "--title",
                      dest="title",
                      default=None,
                      help="A title for the experiment")
    restore_help_string = "Restore the state from a given directory"
    parser.add_option("--restore",
                      default=None,
                      dest="restore",
                      help=restore_help_string)

    (options, args) = parser.parse_args()

    # find out which optimizer we are supposed to use
    if len(args) < 1:
        parser.print_help(file=sys.stderr)
        sys.exit(1)

    optimizer = args[0]

    experiment_dir = os.getcwd()

    # _check_runsolver, _check_modules(), _check_config(experiment_dir)
    check_before_start._check_first(experiment_dir)

    # build call
    config_file = os.path.join(experiment_dir, "config.cfg")
    config = parse_config(config_file,
                          allow_no_value=True,
                          optimizer_module=optimizer)
    wrapping_dir = os.path.dirname(os.path.realpath(__file__))
    cmd = "export PYTHONPATH=$PYTHONPATH:" + wrapping_dir + "\n"

    # Load optimizer
    try:
        optimizer_module = imp.load_source(
            optimizer, wrapping_dir + "/" + optimizer + ".py")
    except (ImportError, IOError):
        print "Optimizer module", optimizer, "not found"
        import traceback
        print traceback.format_exc()
        sys.exit(1)
    optimizer_call, optimizer_dir = optimizer_module.main(
        config=config, options=options, experiment_dir=experiment_dir)
    cmd += optimizer_call

    # _check_function
    check_before_start._check_second(experiment_dir, optimizer_dir)

    # initialize/reload pickle file
    if options.restore:
        try:
            os.remove(os.path.join(optimizer_dir, optimizer + ".pkl.lock"))
        except OSError:
            pass
    folds = config.getint('DEFAULT', 'numberCV')
    trials = Experiment.Experiment(optimizer_dir,
                                   optimizer,
                                   folds=folds,
                                   max_wallclock_time=config.get(
                                       'DEFAULT', 'cpu_limit'),
                                   title=options.title)
    trials.optimizer = optimizer

    if options.restore:
        # Old versions did store NaNs instead of the worst possible result for
        # crashed runs in the instance_results. In order to be able to load
        # these files, these NaNs are replaced
        for i, trial in enumerate(trials.instance_order):
            _id, fold = trial
            instance_result = trials.get_trial_from_id(
                _id)['instance_results'][fold]
            if not np.isfinite(instance_result):
                # Make sure that we do delete the last job which was running but
                # did not finish
                if i == len(trials.instance_order) - 1 and \
                        len(trials.cv_starttime) != len(trials.cv_endtime):
                    # The last job obviously did not finish correctly, do not
                    # replace it
                    pass
                else:
                    trials.get_trial_from_id(_id)['instance_results'][fold] = \
                        config.getfloat('DEFAULT', 'result_on_terminate')
                    # Pretty sure we need this here:
                    trials.get_trial_from_id(_id)['instance_status'][fold] = \
                        Experiment.BROKEN_STATE

        #noinspection PyBroadException
        try:
            restored_runs = optimizer_module.restore(
                config=config, optimizer_dir=optimizer_dir, cmd=cmd)
        except:
            print "Could not restore runs for %s" % options.restore
            import traceback
            print traceback.format_exc()
            sys.exit(1)

        print "Restored %d runs" % restored_runs
        trials.remove_all_but_first_runs(restored_runs)
        fh = open(os.path.join(optimizer_dir, optimizer + ".out"), "a")
        fh.write("#" * 80 + "\n" +
                 "Restart! Restored %d runs.\n" % restored_runs)
        fh.close()

        if len(trials.endtime) < len(trials.starttime):
            trials.endtime.append(trials.cv_endtime[-1])
        trials.starttime.append(time.time())
    else:
        trials.starttime.append(time.time())
    #noinspection PyProtectedMember
    trials._save_jobs()
    del trials
    sys.stdout.flush()

    # Run call
    if options.printcmd:
        print cmd
        return 0
    else:
        print cmd
        output_file = os.path.join(optimizer_dir, optimizer + ".out")
        fh = open(output_file, "a")
        process = subprocess.Popen(cmd,
                                   stdout=fh,
                                   stderr=fh,
                                   shell=True,
                                   executable="/bin/bash")
        print "-----------------------RUNNING----------------------------------"
        ret = process.wait()
        trials = Experiment.Experiment(optimizer_dir, optimizer)
        trials.endtime.append(time.time())
        #noinspection PyProtectedMember
        trials._save_jobs()
        # trials.finish_experiment()
        print "Finished with return code: " + str(ret)
        total_time = 0
        print trials.get_best()
        #noinspection PyBroadException
        try:
            for starttime, endtime in zip(trials.starttime, trials.endtime):
                total_time += endtime - starttime
            print "Needed a total of %f seconds" % total_time
            print "The optimizer %s took %f seconds" % \
                  (optimizer, calculate_optimizer_time(trials))
            print "The overhead of the wrapping software is %f seconds" % \
                  (calculate_wrapping_overhead(trials))
            print "The algorithm itself took %f seconds" % \
                  trials.total_wallclock_time
        except:
            pass
        del trials
        return ret
Esempio n. 27
0
    plotTestFitness("CountOnesFitnessTest.png", CountOnesFitnessResults, "N", "Fitness Value", "Count Ones Fitness")
    plotTestFitness("FourPeaksFitnessTest.png", FourPeaksFitnessResults, "N", "Fitness Value", "Four Peaks Fitness")
    plotTestFitness("KnapsackFitnessTest.png", KnapSackFitnessResults, "N", "Fitness Value", "Knapsack Fitness")

    plotTestFitness("CountOneComputeTime.png", CountOnesComputeTime, "N", "Time", "Count Ones Compute Time")
    plotTestFitness("FourPeaksComputeTime.png", FourPeaksComputeTime, "N", "Time", "Four Peaks Compute Time")
    plotTestFitness("KnapSackComputTime.png", KnapSackComputeTime, "N", "Time", "Knapsack Compute Time")

def plotTestFitness(fileName, data, labelX, labelY, title):
    plt.figure()
    plt.title(title)
    plt.plot(data[:, 0], data[:, 1], "-o", label="Random Hill Climbing")
    plt.plot(data[:, 0], data[:, 2], "-o", label="Simulated Annealing")
    plt.plot(data[:, 0], data[:, 3], "-o", label="Genetic Algorithm")
    plt.plot(data[:, 0], data[:, 4], "-o", label="Mimic")
    plt.xlabel(labelX)
    plt.ylabel(labelY)
    plt.legend(loc="best")
    plt.grid()
    plt.xlim(20, 100)
    plt.savefig(fileName)


if __name__ == "__main__":
    testAlgs()
    exp = ex.Experiment()
    print 'hello'


Esempio n. 28
0
from Experiment import *
from ExpList_1_1 import *

experiment = Experiment(Exp4)
experiment.start()

experiment = Experiment(Exp5)
experiment.start()

experiment = Experiment(Exp6)
experiment.start()
Esempio n. 29
0
from Experiment import *
from ExpList_1_1 import *


experiment = Experiment(Exp1)
experiment.start()

experiment = Experiment(Exp2)
experiment.start()

experiment = Experiment(Exp3)
experiment.start()
Esempio n. 30
0
    folderName = c['folderName']

    groupBy_fn_ = c['groupBy_fn']
    select_fn_ = c['select_fn']
    lineLabel_fn_ = c['lineLabel_fn']
    xLabel = c['xLabel']
    xlog = c['xlog']
    ylog = c['ylog']

    # Run Experiments
    if rangeOverNumPartitions:
        assert len(numPartitions) == 2
        start = numPartitions[0]
        end = numPartitions[1]
        numPartitions = [i for i in range(start, end + 1)]
    E = Experiment(buildSizes, probeSizes, memSizes, F, numPartitions)
    E.run()
    for r in E.runs:
        print(r.join, r.join.stats())
        print("recursion: ", r.join.getRecursionDepth())
        print("build: ", r.join.build.stats())
        print("probe: ", r.join.probe.stats())

    # Do plots
    buildDone = False
    for ph in ['build', 'probe', 'total']:
        for m in Stats.getAttrNames():
            P = Plot(
                runs=E.runs,
                phase=ph,
                outFolder=folderName + '/' + ph,