def make_target_volts(self, orig_params, opt_stim_list):
     self.dts = []
     self.convert_allen_data()
     params = orig_params.reshape(-1, 1).T
     #params = np.repeat(params, 5 ,axis=0)
     data_volts_list = np.array([])
     allparams = allparams_from_mapping(list(params))
     for stimset in range(0, len(opt_stim_list), nGpus):
         p_objects = []
         for gpuId in range(nGpus):
             if (gpuId + stimset) >= len(opt_stim_list):
                 break
             if stimset != 0:
                 print("Swapping ", gpuId, gpuId + stimset)
                 stim_swap(gpuId, gpuId + stimset)
             p_objects.append(self.run_model(gpuId, []))
         for gpuId in range(nGpus):
             if (gpuId + stimset) >= len(opt_stim_list):
                 break
             p_objects[gpuId].wait()
             if len(data_volts_list) < 1:
                 data_volts_list = self.getVolts(gpuId)
             else:
                 data_volts_list = np.append(data_volts_list,
                                             self.getVolts(gpuId),
                                             axis=0)
             print(data_volts_list.shape)
     np.savetxt("targetVolts.csv", data_volts_list, delimiter=",")
     return data_volts_list
Пример #2
0
    def test_run_model_dat(self):
        """runs unit test for neuroGPU src code"""
        allparams = allparams_from_mapping(
            np.reshape(self.NG.orig_params, (1, -1)))
        if size > 1:
            #start running neuroGPU
            stim_range = np.arange(nGpus) + (nGpus * global_rank)
            p_objects = []
            for stim_num in stim_range:
                adjusted_ind = stim_num % nGpus
                p_objects.append(self.NG.run_model(adjusted_ind, []))
            for p_object in p_objects:
                p_object.wait()

        else:
            p_object = self.NG.run_model(0, [])
            p_object.wait()
            fn = '/tmp/Data/VHotP' + str(0) + '.h5'
            curr_volts = nrnMreadH5(fn)
            assert np.isnan(curr_volts).any() == False
            Nt = int(len(curr_volts) / self.NG.nindv)
            self.NG.data_volts_list = np.reshape(curr_volts,
                                                 [self.NG.nindv, Nt])
    def evaluate_with_lists(self, param_values):
        '''This function overrides the BPOP built in function. It is currently set up to run GPU tasks for each 
        stim in chunks based on number of GPU resources then stacks these results and sends them off to be
        evaluated. It runs concurrently so that while nGpus are busy, results ready for evaluation are evaluated.
        Parameters
        -------------------- 
        param_values: Population sized list of parameter sets to be ran through neruoGPU then scored and evaluated
        
        Return
        --------------------
        2d list of scalar scores for each parameter set w/ shape (nindv,1)
        '''
        self.dts = []
        self.convert_allen_data()  # reintialize allen stuff for clean run
        self.nindv = len(param_values)

        # insert negative param value back in to each set
        #full_params = np.insert(np.array(param_values), 1, orig_params[1], axis = 1)
        #np.savetxt("generatedBBPfull_params_50indv.csv", full_params)
        for reinsert_idx in self.fixed.keys():
            param_values = np.insert(np.array(param_values),
                                     reinsert_idx,
                                     self.fixed[reinsert_idx],
                                     axis=1)
        allparams = allparams_from_mapping(list(param_values))
        self.data_volts_list = np.array([])
        nstims = len(self.opt_stim_list)
        nGpus = len([
            devicenum for devicenum in os.environ['CUDA_VISIBLE_DEVICES']
            if devicenum != ","
        ])
        start_time_sim = time.time()
        p_objects = []
        score = []
        start_times = []  # a bunch of timers
        end_times = []
        eval_times = []
        run_num = 0

        #start running neuroGPU
        for i in range(0, nGpus):
            start_times.append(time.time())
            p_objects.append(self.run_model(i, []))
        # evlauate sets of volts and
        for i in range(0, nstims):
            idx = i % (nGpus)
            p_objects[idx].wait(
            )  #wait to get volts output from previous run then read and stack
            end_times.append(time.time())
            shaped_volts = self.getVolts(idx)

            if idx == 0:
                self.data_volts_list = shaped_volts  #start stacking volts
            else:
                self.data_volts_list = np.append(self.data_volts_list,
                                                 shaped_volts,
                                                 axis=0)  # check this
            first_batch = i < nGpus  # we only evaluate on first batch because we already started neuroGPU
            last_batch = i == (nstims - 1)  # True if we are on last iter
            if not first_batch:
                start_times.append(time.time())
                if i != idx:
                    print("replaced dts and stims for ", idx,
                          " with the ones for  ", i)
                    stim_swap(idx, i)
                p_objects[idx] = self.run_model(
                    idx, [])  #ship off job to neuroGPU for next iter
            if idx == nGpus - 1:
                self.data_volts_list = np.reshape(
                    self.data_volts_list, (nGpus, self.nindv, ntimestep))  # ok
                eval_start = time.time()
                if i == nGpus - 1:
                    self.targV = self.target_volts_list[:
                                                        nGpus]  # shifting targV and current dts
                    self.curr_dts = self.dts[:
                                             nGpus]  #  so that parallel evaluator can see just the relevant parts
                    score = self.map_par(run_num)  # call to parallel eval
                else:
                    self.targV = self.target_volts_list[
                        i - nGpus + 1:i + 1]  # i = 15, i-nGpus+1 = 8, i+1 = 16
                    self.curr_dts = self.dts[
                        i - nGpus + 1:i +
                        1]  # so therefore we get range(8,16) for dts and targ vs
                    score = np.append(score, self.map_par(run_num),
                                      axis=1)  #stacks scores by stim

                eval_end = time.time()
                eval_times.append(eval_end - eval_start)
                self.data_volts_list = np.array([])
                run_num += 1
            #### this part only runs on the last iteration, just to finish off the last two volts and stims
            #### same code as above but only runs two stims instead of 18
            if last_batch:  #end of batch
                for ii in range(3):
                    idx = ii % (nGpus)
                    p_objects[idx].wait(
                    )  #wait to get volts output from previous run then read and stack
                    end_times.append(time.time())
                    shaped_volts = self.getVolts(idx)
                    if idx == 0:
                        self.data_volts_list = shaped_volts  #start stacking volts
                    else:
                        self.data_volts_list = np.append(self.data_volts_list,
                                                         shaped_volts,
                                                         axis=0)
                    if ii == 2:
                        self.data_volts_list = np.reshape(
                            self.data_volts_list, (3, self.nindv, ntimestep))
                        self.targV = self.target_volts_list[16:18]
                        self.curr_dts = self.dts[16:18]
                        score = np.append(score, self.map_par(run_num), axis=1)

        # TODO: fix timers later
        #print("average neuroGPU runtime: ", np.mean(np.array(end_times) - np.array(start_times)))
        #print("neuroGPU runtimes: ", np.array(end_times) - np.array(start_times))
        #print("evaluation took: ", eval_times)
        print("everything took: ", eval_end - start_time_sim)
        score = np.reshape(np.sum(score, axis=1), (-1, 1))
        # Minimum element indices in list
        # Using list comprehension + min() + enumerate()
        temp = min(score)
        res = [i for i, j in enumerate(score) if j == temp]
        print("The Positions of minimum element : " + str(res))
        #testing
        #         for i in range(len(score)):
        #             print(score[i], ": " + str(i))

        print(score.shape, "SCORE SHAPE")
        return score
Пример #4
0
    def evaluate_with_lists(self, param_values):
        '''This function overrides the BPOP built in function. It is currently set up to run GPU tasks for each 
        stim in chunks based on number of GPU resources then stacks these results and sends them off to be
        evaluated. It runs concurrently so that while nGpus are busy, results ready for evaluation are evaluated.
        Parameters
        -------------------- 
        param_values: Population sized list of parameter sets to be ran through neruoGPU then scored and evaluated
        
        Return
        --------------------
        2d list of scalar scores for each parameter set w/ shape (nindv,1)
        '''
        total_stims = len(opt_stim_name_list)
        stim_range = np.arange(nGpus) + (nGpus * global_rank)

        if global_rank == 0:
            ##### TODO: write a function to check for missing data?
            self.dts = []
            utils.convert_allen_data(
                opt_stim_name_list, stim_file,
                self.dts)  # reintialize allen stuff for clean run
            # insert negative param value back in to each set
            #full_params = np.insert(np.array(param_values), 1, orig_params[1], axis = 1)
            for reinsert_idx in self.fixed.keys():
                param_values = np.insert(np.array(param_values),
                                         reinsert_idx,
                                         self.fixed[reinsert_idx],
                                         axis=1)
                full_params = param_values
        else:
            full_params = None
            self.dts = None
        ## with MPI we can have different populations so here we sync them up ##
        full_params = comm.bcast(full_params, root=0)
        self.dts = comm.bcast(self.dts, root=0)

        allparams = allparams_from_mapping(list(full_params))

        self.nindv = len(full_params)
        start_time_sim = time.time()
        p_objects = []
        score = []
        # a bunch of timers
        start_times = []
        end_times = []
        eval_times = []

        #start running neuroGPU
        for stim_num in stim_range:
            start_times.append(time.time())
            adjusted_ind = stim_num % nGpus
            p_objects.append(self.run_model(adjusted_ind, []))

        # evlauate sets of volts for this rank worker stim range
        for stim_num in stim_range:
            mod_stim_num = stim_num % (nGpus)
            p_objects[mod_stim_num].wait(
            )  #wait to get volts output from previous run then read and stack
            end_times.append(time.time())

            if mod_stim_num == nGpus - 1:
                eval_start = time.time()
                score = self.map_par()  # call to parallel eval
                eval_end = time.time()
                eval_times.append(eval_end - eval_start)

        print("average neuroGPU runtime: ",
              np.mean(np.array(end_times) - np.array(start_times)))
        #print("neuroGPU runtimes: ", np.array(end_times) - np.array(start_times))
        print("evaluation took: ", eval_times)
        print("everything took: ", eval_end - start_time_sim)
        sum_score = np.reshape(np.sum(score, axis=1), (-1, 1))

        sendbuf = sum_score
        recvbuf = None
        if global_rank == 0:
            recvbuf = np.empty([size, len(sum_score)], dtype=np.float64)
        comm.Gather(sendbuf, recvbuf, root=0)
        if global_rank == 0:
            #print(np.array(recvbuf).shape, "Rec buff")
            final_score = np.sum(recvbuf, axis=0)
        else:
            final_score = None
        final_score = comm.bcast(final_score, root=0)
        #print(np.array(final_score).shape, " : final score shape")
        # Minimum element indices in list
        # Using list comprehension + min() + enumerate()
        temp = min(final_score)
        res = [i for i, j in enumerate(final_score) if j == temp]
        print("The Positions of minimum element : " + str(res))
        #print(final_score, "FINAL SCORE")

        return final_score.reshape(-1, 1)
Пример #5
0
                         reinsert_idx,
                         fixed[reinsert_idx],
                         axis=1)

print(paramset[0])
print(orig_params)

nstims = len(opt_stim_list)
convert_allen_data()
#allparams_from_mapping(paramset)

###### TEN COPIES OF ORIG PARAMS FOR DEBUG #################
param_values = np.array(orig_params).reshape(1, -1)
param_values = np.repeat(param_values, 10, axis=0)
print(param_values.shape, "pvals shape!!!!!!!!")
allparams_from_mapping(param_values)
###### TEN COPIES OF ORIG PARAMS FOR DEBUG #################

for i in range(0, 3):
    if i != 0:
        p_object = run_model(0, i)
        p_object.wait()
        #getVolts(0)
        stim_swap(0, i)

import shutil, errno


def copyanything(src, dst):
    try:
        shutil.copytree(src, dst)