Esempio n. 1
0
def loadPypetTrajectory(filename, trajectoryName):
    """Read HDF file with simulation results and return the chosen trajectory.

    :param filename: HDF file path
    :type filename: str

    :return: pypet trajectory
    """
    assert pathlib.Path(filename).exists(), f"{filename} does not exist!"
    logging.info(f"Loading results from {filename}")

    # if trajectoryName is not specified, load the most recent trajectory
    if trajectoryName == None:
        trajectoryName = getTrajectorynamesInFile(filename)[-1]
    logging.info(f"Analyzing trajectory {trajectoryName}")

    trajLoaded = pypet.Trajectory(trajectoryName, add_time=False)
    trajLoaded.f_load(trajectoryName, filename=filename, force=True)
    trajLoaded.v_auto_load = True
    return trajLoaded
Esempio n. 2
0
            hdf5File.close()
            simulationTimes.append(simulationTime)

        simulationTimes = np.array(simulationTimes)
        means = np.mean(simulationTimes)
        variances = np.std(simulationTimes)

        print('$%.2f' % means + ' \pm ' + '%.2f$' % variances)
        exit()

    #Read the hdf5 file and collect the results
    hdf5File = h5py.File(sys.argv[1], 'r')

    if analyzeLTLFile:
        simulationTime = 0
        traj = pypet.Trajectory(filename=sys.argv[1])
        traj.v_auto_load = True
        traj.f_load(index=-1, force=True)
        it = tqdm.tqdm(range(len(traj)))
        for i in it:
            simulationTime += hdf5File[traj.name]['overview']['runs'][i][
                4] - hdf5File[traj.name]['overview']['runs'][i][3]
        simulationTimeWithMulticors = hdf5File[traj.name]['overview']['runs'][
            -1][4] - hdf5File[traj.name]['overview']['runs'][0][3]

    else:
        simulationTime = hdf5File['simulationTime'][0]

    hdf5File.close()
    print('Simtime: ' + str(simulationTime))
Esempio n. 3
0
        #Readin the full trajectories
        #Determine the lengths of the trajectories
        trajectories = []
        amountFiles = int((len(sys.argv) - 1) / 2)
        labels = [sys.argv[x] for x in range(2, len(sys.argv), 2)]
        #individuals = np.zeros((amountFiles, minLen, 2))
        popsizes = np.zeros(amountFiles, dtype=np.int)
        fitnesses = []

        minLen = np.inf

        fileIndex = 0
        for f in range(1, len(sys.argv), 2):
            fi = sys.argv[f]

            traj = pypet.Trajectory(filename=fi)
            traj.v_auto_load = True
            traj.f_load(index=-1, force=True)

            trajFitnesses = []
            #trajIndividuals = np.zeros((minLen, 2))

            popsizes[fileIndex] = GetPopSize(traj)
            iterations = GetIterations(traj)
            trajLen = iterations * popsizes[fileIndex]

            if trajLen < minLen:
                minLen = trajLen

            #Iterate over all runs and compute the fitness
            li = tqdm.tqdm(xrange(trajLen))
def inner_loop():
    parser = argparse.ArgumentParser()
    parser.add_argument('--cal',
                        type=str,
                        default='../adv/calibration_20.json')
    parser.add_argument('--dac',
                        type=str,
                        default='../adv/dac_07_chip_20.json')
    parser.add_argument('--load_from', type=str, default='')
    parser.add_argument('--out', type=str, default='')
    parser.add_argument('--pl', type=int, choices=range(32), default=4)
    parser.add_argument('--lr',
                        type=str,
                        choices=['q', 'greedy', 'ann'],
                        default='q')
    parser.add_argument('--generation', type=int, default=-1)
    parser.add_argument('--n_batch', type=int, default=1)
    parser.add_argument('--n_iter', type=int, default=1)
    parser.add_argument('--dependent', default=False, action='store_true')
    parser.add_argument('--verbose', default=False, action='store_true')
    args = parser.parse_args()

    with open(args.cal) as f:
        calibrated_config = json.load(f)
    with open(args.dac) as f:
        dac_config = json.load(f)

    pylogging.reset()
    pylogging.default_config(level=pylogging.LogLevel.INFO,
                             fname="",
                             print_location=False,
                             color=True,
                             date_format='RELATIVE')
    logger = pylogging.get('main')

    agent = SpikingBanditAgent(logger)

    n_batch = args.n_batch

    agent_hp = agent.default_hyperparameters
    if args.lr == 'q':
        learning_rule = IncrementalLearningRule()
    elif args.lr == 'ann':
        learning_rule = ANNLearningRule()

    if args.load_from != '':
        traj = pp.Trajectory(filename=args.load_from)
        traj.v_auto_load = True
        traj.f_load(index=-1, force=True)
        pop_size = traj.parameters.pop_size
        n_iter = traj.parameters.n_iteration
        max_fitness = -100
        best_individual = None
        if args.generation == -1:
            gen_index = n_iter - 1
        else:
            gen_index = args.generation
        for j in range(pop_size):
            traj.v_idx = gen_index * pop_size + j
            # print(traj.v_idx)
            fitness = traj.results.crun.fitness
            if fitness > max_fitness:
                max_fitness = fitness
                best_individual = dict(
                    traj.parameters.individual.f_get_children())
                best_individual.pop('seed', None)
                for k, v in best_individual.items():
                    best_individual[k] = best_individual[k][traj.v_idx]
        print(best_individual)

        if args.lr == 'q':
            agent_hp = dict(
                action_inhibition=best_individual['action_inhibition'],
                stim_inhibition=best_individual['stim_inhibition'])
            lr_hp = dict(
                learning_rate=best_individual['learning_rate'],
                learning_rate_decay=best_individual['learning_rate_decay'],
                weight_prior=best_individual['weight_prior'])
            learning_rule = IncrementalLearningRule(lr_hp)
        elif args.lr == 'ann':
            lr_hp = dict(learning_rate=best_individual['learning_rate'],
                         ann_parameters=best_individual['ann_parameters'])
            agent_hp = agent.default_hyperparameters
            learning_rule = ANNLearningRule(lr_hp)
        else:
            logger.error('Learning rule {:s} not supported yet'.format(
                args.lr))
            quit()
    bps = []
    ar = []
    regrets = []
    with Connector(calibrated_config, dac_config, args.pl) as connector:
        for i in range(args.n_iter):
            bandit_probabilities = np.random.rand(n_batch, 2)
            if args.dependent:
                bandit_probabilities[:, 1] = 1. - bandit_probabilities[:, 0]
            bandit_probabilities = bandit_probabilities.reshape((-1, ))
            try:
                r = agent.play_bandit_batch(bandit_probabilities, 100, n_batch,
                                            agent_hp, learning_rule, connector)
                regrets.append(r[0])
            except:
                continue
            ar.append(r[1]['a_r'])
            bps.append(bandit_probabilities)
            logger.info('iteration made')
    print(np.mean(regrets))
    if args.verbose:
        spikes = r[1]['spikes']
        logger.info(spikes[:20, :])
        logger.info('')
        logger.info(spikes[-20:, :])
        logger.info('A total of {} spikes was received'.format(
            spikes.shape[0]))
    if args.out != '':
        with open(args.out, 'wb') as f:
            pickle.dump(dict(bandit_probabilities=bps, a_r=ar), f)
    logger.info('Finished')