Example #1
0
    def runTest(self):
        data = util.load_data_from_csv(
            "test_files/sample_trial_data.csv",
            "test_files/sample_fixations.csv", useAngularDists=True)

        expectedRT = {'abc': {1: 100}, 'xyz': {1: 200}}
        expectedChoice = {'abc': {1: 1}, 'xyz': {1: -1}}
        expectedValueLeft = {'abc': {1: 1}, 'xyz': {1: 2}}
        expectedValueRight = {'abc': {1: 0}, 'xyz': {1: 1}}
        expectedFixItem = {'abc': {1: np.array([1, 2])},
                           'xyz': {1: np.array([1, 2, 1])}}
        expectedFixTime = {'abc': {1: np.array([50, 50])},
                           'xyz': {1: np.array([100, 50, 50])}}
        expectedIsCisTrial = {'abc': {1: False}, 'xyz': {1: True}}
        expectedIsTransTrial = {'abc': {1: True}, 'xyz': {1: False}}
        
        self.assertEqual(expectedRT, data.RT)
        self.assertEqual(expectedChoice, data.choice)
        self.assertEqual(expectedValueLeft, data.valueLeft)
        self.assertEqual(expectedValueRight, data.valueRight)
        np.testing.assert_equal(expectedFixItem, data.fixItem)
        np.testing.assert_equal(expectedFixTime, data.fixTime)
        self.assertEqual(expectedIsCisTrial, data.isCisTrial)
        self.assertEqual(expectedIsTransTrial, data.isTransTrial)
Example #2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num-threads", type=int, default=9,
                        help="Size of the thread pool.")
    parser.add_argument("--num-trials", type=int, default=800,
                        help="Number of artificial data trials to be generated "
                        "per trial condition.")
    parser.add_argument("--d", type=float, default=0.006,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--sigma", type=float, default=0.08,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--theta", type=float, default=0.5,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--range-d", nargs="+", type=float,
                        default=[0.005, 0.006, 0.007],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma", nargs="+", type=float,
                        default=[0.065, 0.08, 0.095],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta", nargs="+", type=float,
                        default=[0.4, 0.5, 0.6],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    parser.add_argument("--verbose", default=False, action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    # Get empirical distributions.
    try:
        dists = get_empirical_distributions(valueLeft, valueRight, fixItem,
                                            fixTime)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for artificial data generation.
    orientations = range(-15,20,5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate artificial data.
    if args.verbose:
        print("Running simulations...")
    try:
        simul = run_simulations(
            probLeftFixFirst, distLatencies, distTransitions, distFixations,
            args.num_trials, trialConditions, args.d, args.theta,
            sigma=args.sigma)
    except Exception as e:
        print("An exception occurred while generating artificial data: " +
              str(e))
        return
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime

    # Grid search to recover the parameters.
    if args.verbose:
        print("Starting grid search...")
    numModels = (len(args.range_d) * len(args.range_sigma) *
                 len(args.range_theta))
    models = list()
    posteriors = dict()
    for d in args.range_d:
        for theta in args.range_theta:
            for sigma in args.range_sigma:
                model = (d, theta, sigma)
                models.append(model)
                posteriors[model] = 1. / numModels

    trials = simulChoice.keys()
    for trial in trials:
        listParams = list()
        for model in models:
            listParams.append(
                (simulChoice[trial], simulValueLeft[trial],
                simulValueRight[trial], simulFixItem[trial],
                simulFixTime[trial], model[0], model[1], model[2]))
        try:
            likelihoods = pool.map(get_trial_likelihood_wrapper, listParams)
        except Exception as e:
            print("An exception occurred during the likelihood computation for "
                  "trial " + str(trial) + ": " + str(e))
            return

        # Get the denominator for normalizing the posteriors.
        i = 0
        denominator = 0
        for model in models:
            denominator += posteriors[model] * likelihoods[i]
            i += 1
        if denominator == 0:
            continue

        # Calculate the posteriors after this trial.
        i = 0
        for model in models:
            prior = posteriors[model]
            posteriors[model] = likelihoods[i] * prior / denominator
            i += 1

        if args.verbose and trial % 200 == 0:
            for model in posteriors:
                print("P" + str(model) + " = " + str(posteriors[model]))
            print("Sum: " + str(sum(posteriors.values())))
 
    if args.verbose:
        for model in posteriors:
            print("P" + str(model) + " = " + str(posteriors[model]))
        print("Sum: " + str(sum(posteriors.values())))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("subject", type=str, help="Subject name.")
    parser.add_argument("--num-threads", type=int, default=9,
                        help="Size of the thread pool.")
    parser.add_argument("--num-trials", type=int, default=100,
                        help="Number of trials to be used in the analysis; if "
                        "smaller than 1, all trials are used.")
    parser.add_argument("--num-samples", type=int, default=32,
                        help="Number of samples to be drawn from the posterior "
                        "distribution when generating simulations.")
    parser.add_argument("--num-simulations-per-sample", type=int, default=1,
                        help="Number of simulations to be genearated for each "
                        "sample drawn from the posterior distribution.")
    parser.add_argument("--range-d", nargs="+", type=float,
                        default=[0.003, 0.006, 0.009],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma", nargs="+", type=float,
                        default=[0.03, 0.06, 0.09],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta", nargs="+", type=float,
                        default=[0.3, 0.5, 0.7],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    parser.add_argument("--save-simulations", default=False,
                        action="store_true", help="Save simulations to CSV.")
    parser.add_argument("--verbose", default=False, action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    choice = dict()
    valueLeft = dict()
    valueRight = dict()
    fixItem = dict()
    fixTime = dict()

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice[args.subject] = data.choice[args.subject]
    valueLeft[args.subject] = data.valueLeft[args.subject]
    valueRight[args.subject] = data.valueRight[args.subject]
    fixItem[args.subject] = data.fixItem[args.subject]
    fixTime[args.subject] = data.fixTime[args.subject]

    # Posteriors estimation for the parameters of the model, using odd trials.
    if args.verbose:
        print("Starting grid search for subject " + args.subject + "...")
    numModels = (len(args.range_d) * len(args.range_theta) *
                 len(args.range_sigma))
    models = list()
    posteriors = dict()
    for d in args.range_d:
        for theta in args.range_theta:
            for sigma in args.range_sigma:
                model = (d, theta, sigma)
                models.append(model)
                posteriors[model] = 1. / numModels

    subjects = choice.keys()
    for subject in subjects:
        trials = choice[subject].keys()
        if args.num_trials < 1:
            args.num_trials = len(trials)
        trialSet = np.random.choice(
            [trial for trial in trials if trial % 2],
            args.num_trials, replace=False)
        for trial in trialSet:
            if not trial % 2:
                continue
            listParams = list()
            for model in models:
                listParams.append(
                    (choice[subject][trial], valueLeft[subject][trial],
                    valueRight[subject][trial], fixItem[subject][trial],
                    fixTime[subject][trial], model[0], model[1], model[2]))
            try:
                likelihoods = pool.map(get_trial_likelihood_wrapper, listParams)
            except Exception as e:
                print("An exception occurred during the likelihood computation "
                      "for trial " + str(trial) + ": " + str(e))
                return

            # Get the denominator for normalizing the posteriors.
            i = 0
            denominator = 0
            for model in models:
                denominator += posteriors[model] * likelihoods[i]
                i += 1
            if denominator == 0:
                continue

            # Calculate the posteriors after this trial.
            i = 0
            for model in models:
                prior = posteriors[model]
                posteriors[model] = likelihoods[i] * prior / denominator
                i += 1
        if args.verbose:
            for model in posteriors:
                print("P" + str(model) + " = " + str(posteriors[model]))
            print("Sum: " + str(sum(posteriors.values())))

    if args.verbose:
        print("Finished grid search!")

    # Get empirical distributions from even trials.
    try:
        dists = get_empirical_distributions(
            valueLeft, valueRight, fixItem, fixTime, useOddTrials=False,
            useEvenTrials=True)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for generating simulations.
    orientations = range(-15,20,5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate probabilistic simulations using the posteriors distribution.
    try:
        simul = generate_probabilistic_simulations(
            probLeftFixFirst, distLatencies, distTransitions, distFixations,
            trialConditions, posteriors, args.num_samples,
            args.num_simulations_per_sample)
    except Exception as e:
        print("An exception occurred while running simulations: " + str(e))
        return
    simulRT = simul.RT
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime
    simulFixRDV = simul.fixRDV

    if args.save_simulations:
        totalTrials = len(simulRT.keys())
        save_simulations_to_csv(
            simulChoice, simulRT, simulValueLeft, simulValueRight, simulFixItem,
            simulFixTime, simulFixRDV, totalTrials)
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num-threads", type=int, default=9,
                        help="Size of the thread pool.")
    parser.add_argument("--trials-per-subject", type=int, default=100,
                        help="Number of trials from each subject to be used in "
                        "the analysis; if smaller than 1, all trials are used.")
    parser.add_argument("--pop-size", type=int, default=18,
                        help="Number of individuals in each population.")
    parser.add_argument("--num-generations", type=int, default=20,
                        help="Number of generations.")
    parser.add_argument("--crossover-rate", type=float, default=0.5,
                        help="Crossover rate.")
    parser.add_argument("--mutation-rate", type=float, default=0.3,
                        help="Mutation rate.")
    parser.add_argument("--lower-bound-d", type=float, default=0.0001,
                        help="Lower search bound for parameter d.")
    parser.add_argument("--upper-bound-d", type=float, default=0.01,
                        help="Upper search bound for parameter d.")
    parser.add_argument("--lower-bound-theta", type=float, default=0,
                        help="Lower search bound for parameter theta.")
    parser.add_argument("--upper-bound-theta", type=float, default=1,
                        help="Upper search bound for parameter theta.")
    parser.add_argument("--lower-bound-sigma", type=float, default=0.001,
                        help="Lower search bound for parameter sigma.")
    parser.add_argument("--upper-bound-sigma", type=float, default=0.1,
                        help="Upper search bound for parameter sigma.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    args = parser.parse_args()

    global choice
    global valueLeft
    global valueRight
    global fixItem
    global fixTime
    global trialsPerSubject

    # Load experimental data from CSV file and update global variables.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    trialsPerSubject = args.trials_per_subject

    creator.create("FitnessMin", base.Fitness, weights=(-1.0,))
    creator.create("Individual", list, fitness=creator.FitnessMin)

    toolbox = base.Toolbox()

    # Create thread pool.
    pool = Pool(args.num_threads)
    toolbox.register("map", pool.map)

    # Create individual.
    toolbox.register("attr_d", random.uniform, args.lower_bound_d,
                     args.upper_bound_d)
    toolbox.register("attr_theta", random.uniform, args.lower_bound_theta,
                     args.upper_bound_theta)
    toolbox.register("attr_sigma", random.uniform, args.lower_bound_sigma,
                     args.upper_bound_sigma)
    toolbox.register("individual", tools.initCycle, creator.Individual,
                     (toolbox.attr_d, toolbox.attr_theta, toolbox.attr_sigma),
                     n=1)

    # Create population.
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    pop = toolbox.population(n=args.pop_size)

    # Create operators.
    toolbox.register("mate", tools.cxUniform, indpb=0.4)
    toolbox.register("mutate", tools.mutGaussian, mu=0,
                     sigma=[0.0005, 0.05, 0.005], indpb=0.4)
    toolbox.register("select", tools.selTournament, tournsize=3)
    toolbox.register("evaluate", evaluate)

    # Evaluate the entire population.
    try:
        fitnesses = toolbox.map(toolbox.evaluate, pop)
    except:
        print("An exception occurred during the first population evaluation: " +
              str(e))
        return
    bestFit = sys.float_info.max
    bestInd = None
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
        # Get best individual.
        if fit < bestFit:
            bestInd = ind

    for g in xrange(args.num_generations):
        print("Generation " + str(g) + "...")

        # Select the next generation individuals.
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals.
        offspring = map(toolbox.clone, offspring)

        # Apply crossover and mutation on the offspring.
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < args.crossover_rate:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < args.mutation_rate:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals which are valid but have an invalid fitness.
        invalidInd = list()
        for ind in offspring:
            if (ind[0] < args.lower_bound_d or
                ind[0] > args.upper_bound_d or
                ind[1] < args.lower_bound_theta or
                ind[1] > args.upper_bound_theta or
                ind[2] < args.lower_bound_sigma or
                ind[2] > args.upper_bound_sigma):
                ind.fitness.values = sys.float_info.max,
            elif not ind.fitness.valid:
                invalidInd.append(ind)
        try:
            fitnesses = map(toolbox.evaluate, invalidInd)
        except:
            print("An exception occurred during the population evaluation " +
                  "for generation " + str(g) + ": " + str(e))
            return
        for ind, fit in zip(invalidInd, fitnesses):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring.
        pop[:] = offspring

        # Update best individual.
        for ind in pop:
            if ind.fitness.values[0] < bestFit:
                bestFit = ind.fitness.values[0]
                bestInd = ind

    print("Best individual: " + str(bestInd))
    print("Fitness of best individual: " + str(bestFit))
Example #5
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num-threads",
                        type=int,
                        default=9,
                        help="Size of the thread pool.")
    parser.add_argument(
        "--trials-per-subject",
        type=int,
        default=100,
        help="Number of trials from each subject to be used in "
        "the analysis; if smaller than 1, all trials are used.")
    parser.add_argument(
        "--num-samples",
        type=int,
        default=100,
        help="Number of samples to be drawn from the posterior "
        "distribution when generating simulations.")
    parser.add_argument("--num-simulations-per-sample",
                        type=int,
                        default=10,
                        help="Number of simulations to be genearated for each "
                        "sample drawn from the posterior distribution.")
    parser.add_argument("--range-d",
                        nargs="+",
                        type=float,
                        default=[0.003, 0.006, 0.009],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma",
                        nargs="+",
                        type=float,
                        default=[0.03, 0.06, 0.09],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta",
                        nargs="+",
                        type=float,
                        default=[0.3, 0.5, 0.7],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name",
                        type=str,
                        default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name",
                        type=str,
                        default="fixations.csv",
                        help="Name of fixations file.")
    parser.add_argument("--save-simulations",
                        default=False,
                        action="store_true",
                        help="Save simulations to CSV.")
    parser.add_argument("--verbose",
                        default=False,
                        action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(args.expdata_file_name,
                                  args.fixations_file_name,
                                  useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    # Posteriors estimation for the parameters of the model, using odd trials.
    if args.verbose:
        print("Starting grid search...")
    numModels = (len(args.range_d) * len(args.range_theta) *
                 len(args.range_sigma))
    models = list()
    posteriors = dict()
    for d in args.range_d:
        for theta in args.range_theta:
            for sigma in args.range_sigma:
                model = (d, theta, sigma)
                models.append(model)
                posteriors[model] = 1. / numModels

    subjects = choice.keys()
    for subject in subjects:
        if args.verbose:
            print("Running subject " + subject + "...")
        trials = choice[subject].keys()
        if args.trials_per_subject < 1:
            args.trials_per_subject = len(trials)
        trialSet = np.random.choice([trial for trial in trials if trial % 2],
                                    args.trials_per_subject,
                                    replace=False)
        for trial in trialSet:
            listParams = list()
            for model in models:
                listParams.append(
                    (choice[subject][trial], valueLeft[subject][trial],
                     valueRight[subject][trial], fixItem[subject][trial],
                     fixTime[subject][trial], model[0], model[1], model[2]))
            try:
                likelihoods = pool.map(get_trial_likelihood_wrapper,
                                       listParams)
            except Exception as e:
                print(
                    "An exception occurred during the likelihood computation "
                    "for subject " + subject + ", trial " + str(trial) + ": " +
                    str(e))
                return

            # Get the denominator for normalizing the posteriors.
            i = 0
            denominator = 0
            for model in models:
                denominator += posteriors[model] * likelihoods[i]
                i += 1
            if denominator == 0:
                continue

            # Calculate the posteriors after this trial.
            i = 0
            for model in models:
                prior = posteriors[model]
                posteriors[model] = likelihoods[i] * prior / denominator
                i += 1

        if args.verbose:
            for model in posteriors:
                print("P" + str(model) + " = " + str(posteriors[model]))
            print("Sum: " + str(sum(posteriors.values())))

    if args.verbose:
        print("Finished grid search!")

    # Get empirical distributions from even trials.
    try:
        dists = get_empirical_distributions(valueLeft,
                                            valueRight,
                                            fixItem,
                                            fixTime,
                                            useOddTrials=False,
                                            useEvenTrials=True)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for generating simulations.
    orientations = range(-15, 20, 5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate probabilistic simulations using the posteriors distribution.
    try:
        simul = generate_probabilistic_simulations(
            probLeftFixFirst, distLatencies, distTransitions, distFixations,
            trialConditions, posteriors, args.num_samples,
            args.num_simulations_per_sample)
    except Exception as e:
        print("An exception occurred while running simulations: " + str(e))
        return
    simulRT = simul.RT
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime
    simulFixRDV = simul.fixRDV

    if args.save_simulations:
        totalTrials = len(simulRT.keys())
        save_simulations_to_csv(simulChoice, simulRT, simulValueLeft,
                                simulValueRight, simulFixItem, simulFixTime,
                                simulFixRDV, totalTrials)
Example #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num-threads", type=int, default=9,
                        help="Size of the thread pool.")
    parser.add_argument("--num-trials", type=int, default=10,
                        help="Number of artificial data trials to be generated "
                        "per trial condition.")
    parser.add_argument("--num-simulations", type=int, default=10,
                        help="Number of simulations to be generated per trial "
                        "condition, to be used in the RT histograms.")
    parser.add_argument("--bin-step", type=int, default=100,
                        help="Size of the bin step to be used in the RT "
                        "histograms.")
    parser.add_argument("--max-rt", type=int, default=8000,
                        help="Maximum RT to be used in the RT histograms.")
    parser.add_argument("--d", type=float, default=0.006,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--sigma", type=float, default=0.08,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--theta", type=float, default=0.5,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--range-d", nargs="+", type=float,
                        default=[0.005, 0.006, 0.007],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma", nargs="+", type=float,
                        default=[0.065, 0.08, 0.095],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta", nargs="+", type=float,
                        default=[0.4, 0.5, 0.6],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    parser.add_argument("--verbose", default=False, action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return

    # Get empirical distributions.
    try:
        dists = get_empirical_distributions(
            data.valueLeft, data.valueRight, data.fixItem, data.fixTime)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    if args.verbose:
        print("Done getting empirical distributions!")

    histBins = range(0, args.max_rt + args.bin_step, args.bin_step)

    orientations = range(-15,20,5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate histograms for artificial data.
    dataHistLeft = dict()
    dataHistRight = dict()
    for trialCondition in trialConditions:
        RTsLeft = list()
        RTsRight = list()
        trial = 0
        while trial < args.num_trials:
            try:
                results = addm(probLeftFixFirst, distLatencies, distTransitions,
                               distFixations, args.d, args.sigma, args.theta,
                               trialCondition[0], trialCondition[1])
            except Exception as e:
                print("An exception occurred while running the model for "
                      "artificial data generation, at trial " + str(trial) +
                      ": " + str(e))
                return
            if results.choice == -1:
                RTsLeft.append(results.RT)
            elif results.choice == 1:
                RTsRight.append(results.RT)
            trial += 1
        dataHistLeft[trialCondition] = np.histogram(RTsLeft, bins=histBins)[0]
        dataHistRight[trialCondition] = np.histogram(RTsRight, bins=histBins)[0]

    if args.verbose:
        print("Done generating histograms of artificial data!")
    
    # Grid search on the parameters of the model.
    models = list()
    for d in args.range_d:
        for sigma in args.range_sigma:
            for theta in args.range_theta:
                model = (d, sigma, theta)
                models.append(model)

    listParams = list()
    for model in models:
        listParams.append(
            (model[0], model[1], model[2], trialConditions,
            args.num_simulations, histBins, dataHistLeft, dataHistRight,
            probLeftFixFirst, distLatencies, distTransitions, distFixations))
    likelihoods = pool.map(get_model_likelihood_wrapper, listParams)

    if args.verbose:
        for i in xrange(len(models)):
            print("L" + str(models[i]) + " = " + str(likelihoods[i]))
        bestIndex = likelihoods.index(max(likelihoods))
        print("Best fit: " + str(models[bestIndex]))
Example #7
0
                        nargs='?',
                        default=1.0)
    parser.add_argument('-ep', '--epoch', type=int, nargs='?', default=10)
    parser.add_argument('-wn', '--word_ngrams', type=int, nargs='?', default=1)
    parser.add_argument('-mc', '--min_count', type=int, nargs='?', default=1)

    args = parser.parse_args()
    model_name = args.model_name
    learning_rate = args.learning_rate
    epoch = args.epoch
    word_ngrams = args.word_ngrams
    min_count = args.min_count

    # load train data
    logger.info("start load load")
    train_data_df = load_data_from_csv(config.train_data_path)
    validate_data_df = load_data_from_csv(config.validate_data_path)

    content_train = train_data_df.iloc[:, 1]

    logger.info("start seg train data")
    content_train = seg_words(content_train)
    logger.info("complete seg train data")

    logger.info("prepare train format")
    train_data_format = np.asarray([content_train]).T
    logger.info("complete formate train data")

    columns = train_data_df.columns.values.tolist()

    # model train
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("subject", type=str, help="Subject name.")
    parser.add_argument("--num-threads",
                        type=int,
                        default=9,
                        help="Size of the thread pool.")
    parser.add_argument(
        "--num-trials",
        type=int,
        default=200,
        help="Number of artificial data trials to be generated "
        "per trial condition.")
    parser.add_argument("--d",
                        type=float,
                        default=0.006,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--sigma",
                        type=float,
                        default=0.08,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--theta",
                        type=float,
                        default=0.5,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--range-d",
                        nargs="+",
                        type=float,
                        default=[0.005, 0.006, 0.007],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma",
                        nargs="+",
                        type=float,
                        default=[0.065, 0.08, 0.095],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta",
                        nargs="+",
                        type=float,
                        default=[0.4, 0.5, 0.6],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name",
                        type=str,
                        default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name",
                        type=str,
                        default="fixations.csv",
                        help="Name of fixations file.")
    parser.add_argument("--verbose",
                        default=False,
                        action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    valueLeft = dict()
    valueRight = dict()
    fixItem = dict()
    fixTime = dict()

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(args.expdata_file_name,
                                  args.fixations_file_name,
                                  useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    valueLeft[args.subject] = data.valueLeft[args.subject]
    valueRight[args.subject] = data.valueRight[args.subject]
    fixItem[args.subject] = data.fixItem[args.subject]
    fixTime[args.subject] = data.fixTime[args.subject]

    # Get empirical distributions.
    try:
        dists = get_empirical_distributions(valueLeft, valueRight, fixItem,
                                            fixTime)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for artificial data generation.
    orientations = range(-15, 20, 5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate artificial data.
    if args.verbose:
        print("Running simulations...")
    try:
        simul = run_simulations(probLeftFixFirst,
                                distLatencies,
                                distTransitions,
                                distFixations,
                                args.num_trials,
                                trialConditions,
                                args.d,
                                args.theta,
                                sigma=args.sigma)
    except Exception as e:
        print("An exception occurred while generating artificial data: " +
              str(e))
        return
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime

    # Grid search to recover the parameters.
    if args.verbose:
        print("Starting grid search...")
    numModels = (len(args.range_d) * len(args.range_theta) *
                 len(args.range_sigma))
    models = list()
    posteriors = dict()
    for d in args.range_d:
        for theta in args.range_theta:
            for sigma in args.range_sigma:
                model = (d, theta, sigma)
                models.append(model)
                posteriors[model] = 1. / numModels

    trials = simulChoice.keys()
    for trial in trials:
        listParams = list()
        for model in models:
            listParams.append(
                (simulChoice[trial], simulValueLeft[trial],
                 simulValueRight[trial], simulFixItem[trial],
                 simulFixTime[trial], model[0], model[1], model[2]))
        try:
            likelihoods = pool.map(get_trial_likelihood_wrapper, listParams)
        except Exception as e:
            print(
                "An exception occurred during the likelihood computation for "
                "trial " + str(trial) + ": " + str(e))
            return

        # Get the denominator for normalizing the posteriors.
        i = 0
        denominator = 0
        for model in models:
            denominator += posteriors[model] * likelihoods[i]
            i += 1
        if denominator == 0:
            continue

        # Calculate the posteriors after this trial.
        i = 0
        for model in models:
            prior = posteriors[model]
            posteriors[model] = likelihoods[i] * prior / denominator
            i += 1

        if args.verbose and trial % 200 == 0:
            for model in posteriors:
                print("P" + str(model) + " = " + str(posteriors[model]))
            print("Sum: " + str(sum(posteriors.values())))

    if args.verbose:
        for model in posteriors:
            print("P" + str(model) + " = " + str(posteriors[model]))
        print("Sum: " + str(sum(posteriors.values())))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num-threads",
                        type=int,
                        default=9,
                        help="Size of the thread pool.")
    parser.add_argument(
        "--trials-per-subject",
        type=int,
        default=100,
        help="Number of trials from each subject to be used in "
        "the analysis; if smaller than 1, all trials are used.")
    parser.add_argument("--pop-size",
                        type=int,
                        default=18,
                        help="Number of individuals in each population.")
    parser.add_argument("--num-generations",
                        type=int,
                        default=20,
                        help="Number of generations.")
    parser.add_argument("--crossover-rate",
                        type=float,
                        default=0.5,
                        help="Crossover rate.")
    parser.add_argument("--mutation-rate",
                        type=float,
                        default=0.3,
                        help="Mutation rate.")
    parser.add_argument("--lower-bound-d",
                        type=float,
                        default=0.0001,
                        help="Lower search bound for parameter d.")
    parser.add_argument("--upper-bound-d",
                        type=float,
                        default=0.01,
                        help="Upper search bound for parameter d.")
    parser.add_argument("--lower-bound-theta",
                        type=float,
                        default=0,
                        help="Lower search bound for parameter theta.")
    parser.add_argument("--upper-bound-theta",
                        type=float,
                        default=1,
                        help="Upper search bound for parameter theta.")
    parser.add_argument("--lower-bound-sigma",
                        type=float,
                        default=0.001,
                        help="Lower search bound for parameter sigma.")
    parser.add_argument("--upper-bound-sigma",
                        type=float,
                        default=0.1,
                        help="Upper search bound for parameter sigma.")
    parser.add_argument("--expdata-file-name",
                        type=str,
                        default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name",
                        type=str,
                        default="fixations.csv",
                        help="Name of fixations file.")
    args = parser.parse_args()

    global choice
    global valueLeft
    global valueRight
    global fixItem
    global fixTime
    global trialsPerSubject

    # Load experimental data from CSV file and update global variables.
    try:
        data = load_data_from_csv(args.expdata_file_name,
                                  args.fixations_file_name,
                                  useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    trialsPerSubject = args.trials_per_subject

    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMin)

    toolbox = base.Toolbox()

    # Create thread pool.
    pool = Pool(args.num_threads)
    toolbox.register("map", pool.map)

    # Create individual.
    toolbox.register("attr_d", random.uniform, args.lower_bound_d,
                     args.upper_bound_d)
    toolbox.register("attr_theta", random.uniform, args.lower_bound_theta,
                     args.upper_bound_theta)
    toolbox.register("attr_sigma", random.uniform, args.lower_bound_sigma,
                     args.upper_bound_sigma)
    toolbox.register("individual",
                     tools.initCycle,
                     creator.Individual,
                     (toolbox.attr_d, toolbox.attr_theta, toolbox.attr_sigma),
                     n=1)

    # Create population.
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)
    pop = toolbox.population(n=args.pop_size)

    # Create operators.
    toolbox.register("mate", tools.cxUniform, indpb=0.4)
    toolbox.register("mutate",
                     tools.mutGaussian,
                     mu=0,
                     sigma=[0.0005, 0.05, 0.005],
                     indpb=0.4)
    toolbox.register("select", tools.selTournament, tournsize=3)
    toolbox.register("evaluate", evaluate)

    # Evaluate the entire population.
    try:
        fitnesses = toolbox.map(toolbox.evaluate, pop)
    except:
        print(
            "An exception occurred during the first population evaluation: " +
            str(e))
        return
    bestFit = sys.float_info.max
    bestInd = None
    for ind, fit in zip(pop, fitnesses):
        ind.fitness.values = fit
        # Get best individual.
        if fit < bestFit:
            bestInd = ind

    for g in xrange(args.num_generations):
        print("Generation " + str(g) + "...")

        # Select the next generation individuals.
        offspring = toolbox.select(pop, len(pop))
        # Clone the selected individuals.
        offspring = map(toolbox.clone, offspring)

        # Apply crossover and mutation on the offspring.
        for child1, child2 in zip(offspring[::2], offspring[1::2]):
            if random.random() < args.crossover_rate:
                toolbox.mate(child1, child2)
                del child1.fitness.values
                del child2.fitness.values

        for mutant in offspring:
            if random.random() < args.mutation_rate:
                toolbox.mutate(mutant)
                del mutant.fitness.values

        # Evaluate the individuals which are valid but have an invalid fitness.
        invalidInd = list()
        for ind in offspring:
            if (ind[0] < args.lower_bound_d or ind[0] > args.upper_bound_d
                    or ind[1] < args.lower_bound_theta
                    or ind[1] > args.upper_bound_theta
                    or ind[2] < args.lower_bound_sigma
                    or ind[2] > args.upper_bound_sigma):
                ind.fitness.values = sys.float_info.max,
            elif not ind.fitness.valid:
                invalidInd.append(ind)
        try:
            fitnesses = map(toolbox.evaluate, invalidInd)
        except:
            print("An exception occurred during the population evaluation " +
                  "for generation " + str(g) + ": " + str(e))
            return
        for ind, fit in zip(invalidInd, fitnesses):
            ind.fitness.values = fit

        # The population is entirely replaced by the offspring.
        pop[:] = offspring

        # Update best individual.
        for ind in pop:
            if ind.fitness.values[0] < bestFit:
                bestFit = ind.fitness.values[0]
                bestInd = ind

    print("Best individual: " + str(bestInd))
    print("Fitness of best individual: " + str(bestFit))
Example #10
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--trials-per-subject", type=int, default=100,
                        help="Number of trials from each subject to be used in "
                        "the analysis; if smaller than 1, all trials are used.")
    parser.add_argument("--num-iterations", type=int, default=100,
                        help="Number of basin hopping iterations.")
    parser.add_argument("--step-size", type=float, default=0.001,
                        help="Step size for use in the random displacement of "
                        "the basin hopping algorithm.")
    parser.add_argument("--initial-d", type=float, default=0.005,
                        help="Initial value for parameter d.")
    parser.add_argument("--initial-theta", type=float, default=0.5,
                        help="Initial value for parameter theta.")
    parser.add_argument("--initial-sigma", type=float, default=0.05,
                        help="Initial value for parameter sigma.")
    parser.add_argument("--lower-bound-d", type=float, default=0.0001,
                        help="Lower search bound for parameter d.")
    parser.add_argument("--upper-bound-d", type=float, default=0.01,
                        help="Upper search bound for parameter d.")
    parser.add_argument("--lower-bound-theta", type=float, default=0,
                        help="Lower search bound for parameter theta.")
    parser.add_argument("--upper-bound-theta", type=float, default=1,
                        help="Upper search bound for parameter theta.")
    parser.add_argument("--lower-bound-sigma", type=float, default=0.001,
                        help="Lower search bound for parameter sigma.")
    parser.add_argument("--upper-bound-sigma", type=float, default=0.1,
                        help="Upper search bound for parameter sigma.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    args = parser.parse_args()

    global choice
    global valueLeft
    global valueRight
    global fixItem
    global fixTime
    global trialsPerSubject

    # Load experimental data from CSV file and update global variables.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    trialsPerSubject = args.trials_per_subject

    # Initial guess for the parameters: d, theta, sigma.
    initialParams = [args.initial_d, args.initial_theta, args.initial_sigma]

    # Search bounds.
    bounds = [(args.lower_bound_d, args.upper_bound_d),
              (args.lower_bound_theta, args.upper_bound_theta),
              (args.lower_bound_sigma, args.upper_bound_sigma)
             ]

    # Optimize using Basinhopping algorithm.
    minimizerKwargs = dict(method="L-BFGS-B", bounds=bounds)
    try:
        result = basinhopping(
            get_model_nll, initialParams, minimizer_kwargs=minimizerKwargs,
            niter=args.num_iterations,stepsize=args.step_size)
    except Exception as e:
        print("An exception occurred during the basinhopping optimization: " +
              str(e))
        return
    print("Optimization result: " + str(result))
Example #11
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("subject", type=str, help="Subject name")
    parser.add_argument("--num-threads",
                        type=int,
                        default=9,
                        help="Size of the thread pool.")
    parser.add_argument("--num-trials",
                        type=int,
                        default=100,
                        help="Number of trials to be used in the analysis; if "
                        "smaller than 1, all trials are used.")
    parser.add_argument("--num-simulations",
                        type=int,
                        default=32,
                        help="Number of simulations to be generated per trial "
                        "condition.")
    parser.add_argument("--range-d",
                        nargs="+",
                        type=float,
                        default=[0.003, 0.006, 0.009],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma",
                        nargs="+",
                        type=float,
                        default=[0.03, 0.06, 0.09],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta",
                        nargs="+",
                        type=float,
                        default=[0.3, 0.5, 0.7],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name",
                        type=str,
                        default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name",
                        type=str,
                        default="fixations.csv",
                        help="Name of fixations file.")
    parser.add_argument("--save-simulations",
                        default=False,
                        action="store_true",
                        help="Save simulations to CSV.")
    parser.add_argument("--save-figures",
                        default=False,
                        action="store_true",
                        help="Save figures comparing "
                        "choice and RT curves for data and simulations.")
    parser.add_argument("--verbose",
                        default=False,
                        action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    choice = dict()
    valueLeft = dict()
    valueRight = dict()
    fixItem = dict()
    fixTime = dict()

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(args.expdata_file_name,
                                  args.fixations_file_name,
                                  useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice[args.subject] = data.choice[args.subject]
    valueLeft[args.subject] = data.valueLeft[args.subject]
    valueRight[args.subject] = data.valueRight[args.subject]
    fixItem[args.subject] = data.fixItem[args.subject]
    fixTime[args.subject] = data.fixTime[args.subject]

    # Maximum likelihood estimation using odd trials only.
    # Grid search on the parameters of the model.
    if args.verbose:
        print("Starting grid search for subject " + args.subject + "...")
    models = list()
    listParams = list()
    for d in args.range_d:
        for theta in args.range_theta:
            for sigma in args.range_sigma:
                models.append((d, theta, sigma))
                params = (choice, valueLeft, valueRight, fixItem, fixTime, d,
                          theta, sigma, args.num_trials, True, False,
                          args.verbose)
                listParams.append(params)
    results = pool.map(get_model_nll_wrapper, listParams)

    # Get optimal parameters.
    minNegLogLikeIdx = results.index(min(results))
    optimD = models[minNegLogLikeIdx][0]
    optimTheta = models[minNegLogLikeIdx][1]
    optimSigma = models[minNegLogLikeIdx][2]
    if args.verbose:
        print("Finished grid search!")
        print("Optimal d: " + str(optimD))
        print("Optimal theta: " + str(optimTheta))
        print("Optimal sigma: " + str(optimSigma))
        print("Min NLL: " + str(min(results)))

    # Get empirical distributions from even trials.
    try:
        dists = get_empirical_distributions(valueLeft,
                                            valueRight,
                                            fixItem,
                                            fixTime,
                                            useOddTrials=False,
                                            useEvenTrials=True)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for generating simulations.
    orientations = range(-15, 20, 5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate simulations using the even trials distributions and the
    # estimated parameters.
    try:
        simul = run_simulations(probLeftFixFirst,
                                distLatencies,
                                distTransitions,
                                distFixations,
                                args.num_simulations,
                                trialConditions,
                                optimD,
                                optimTheta,
                                sigma=optimSigma)
    except Exception as e:
        print("An exception occurred while running simulations: " + str(e))
        return
    simulRT = simul.RT
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime
    simulFixRDV = simul.fixRDV

    totalTrials = args.num_simulations * len(trialConditions)

    if args.save_simulations:
        save_simulations_to_csv(simulChoice, simulRT, simulValueLeft,
                                simulValueRight, simulFixItem, simulFixTime,
                                simulFixRDV, totalTrials)

    if args.save_figures:
        # Create pdf file to save figures.
        pp = PdfPages("figures_" + str(optimD) + "_" + str(optimTheta) + "_" +
                      str(optimSigma) + "_" + str(args.num_simulations) +
                      ".pdf")

        # Generate choice and RT curves for real data (odd trials) and
        # simulations (generated from even trials).
        fig1 = generate_choice_curves(choice, valueLeft, valueRight,
                                      simulChoice, simulValueLeft,
                                      simulValueRight, totalTrials)
        pp.savefig(fig1)
        fig2 = generate_rt_curves(RT, valueLeft, valueRight, simulRT,
                                  simulValueLeft, simulValueRight, totalTrials)
        pp.savefig(fig2)
        pp.close()
Example #12
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--trials-per-subject",
        type=int,
        default=100,
        help="Number of trials from each subject to be used in "
        "the analysis; if smaller than 1, all trials are used.")
    parser.add_argument("--num-iterations",
                        type=int,
                        default=100,
                        help="Number of basin hopping iterations.")
    parser.add_argument("--step-size",
                        type=float,
                        default=0.001,
                        help="Step size for use in the random displacement of "
                        "the basin hopping algorithm.")
    parser.add_argument("--initial-d",
                        type=float,
                        default=0.005,
                        help="Initial value for parameter d.")
    parser.add_argument("--initial-theta",
                        type=float,
                        default=0.5,
                        help="Initial value for parameter theta.")
    parser.add_argument("--initial-sigma",
                        type=float,
                        default=0.05,
                        help="Initial value for parameter sigma.")
    parser.add_argument("--lower-bound-d",
                        type=float,
                        default=0.0001,
                        help="Lower search bound for parameter d.")
    parser.add_argument("--upper-bound-d",
                        type=float,
                        default=0.01,
                        help="Upper search bound for parameter d.")
    parser.add_argument("--lower-bound-theta",
                        type=float,
                        default=0,
                        help="Lower search bound for parameter theta.")
    parser.add_argument("--upper-bound-theta",
                        type=float,
                        default=1,
                        help="Upper search bound for parameter theta.")
    parser.add_argument("--lower-bound-sigma",
                        type=float,
                        default=0.001,
                        help="Lower search bound for parameter sigma.")
    parser.add_argument("--upper-bound-sigma",
                        type=float,
                        default=0.1,
                        help="Upper search bound for parameter sigma.")
    parser.add_argument("--expdata-file-name",
                        type=str,
                        default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name",
                        type=str,
                        default="fixations.csv",
                        help="Name of fixations file.")
    args = parser.parse_args()

    global choice
    global valueLeft
    global valueRight
    global fixItem
    global fixTime
    global trialsPerSubject

    # Load experimental data from CSV file and update global variables.
    try:
        data = load_data_from_csv(args.expdata_file_name,
                                  args.fixations_file_name,
                                  useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    trialsPerSubject = args.trials_per_subject

    # Initial guess for the parameters: d, theta, sigma.
    initialParams = [args.initial_d, args.initial_theta, args.initial_sigma]

    # Search bounds.
    bounds = [(args.lower_bound_d, args.upper_bound_d),
              (args.lower_bound_theta, args.upper_bound_theta),
              (args.lower_bound_sigma, args.upper_bound_sigma)]

    # Optimize using Basinhopping algorithm.
    minimizerKwargs = dict(method="L-BFGS-B", bounds=bounds)
    try:
        result = basinhopping(get_model_nll,
                              initialParams,
                              minimizer_kwargs=minimizerKwargs,
                              niter=args.num_iterations,
                              stepsize=args.step_size)
    except Exception as e:
        print("An exception occurred during the basinhopping optimization: " +
              str(e))
        return
    print("Optimization result: " + str(result))
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--bin-step", type=int, default=10,
                        help="Size of the bin step to be used in the fixation "
                        "distributions.")
    parser.add_argument("--max-fix-bin", type=int, default=3000,
                        help="Maximum fixation length to be used in the "
                        "fixation distributions.")
    parser.add_argument("--num-fix-dists", type=int, default=3,
                        help="Number of fixation distributions.")
    parser.add_argument("--num-iterations", type=int, default=3,
                        help="Number of iterations used to approximate the"
                        "true distributions.")
    parser.add_argument("--num-simulations", type=int, default=400,
                        help="Number of simulations to be generated per trial "
                        "condition.")
    parser.add_argument("--d", type=float, default=0.004,
                        help="aDDM parameter for generating simulations.")
    parser.add_argument("--sigma", type=float, default=0.07,
                        help="aDDM parameter for generating simulations.")
    parser.add_argument("--theta", type=float, default=0.25,
                        help="aDDM parameter for generating simulations.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    parser.add_argument("--save-simulations", default=False,
                        action="store_true", help="Save simulations to CSV.")
    parser.add_argument("--verbose", default=False, action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    # Time bins to be used in the fixation distributions.
    bins = range(args.bin_step, args.max_fix_bin + args.bin_step, args.bin_step)

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    RT = data.RT
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    # Get empirical distributions from even trials.
    try:
        dists = get_empirical_distributions(
            valueLeft, valueRight, fixItem, fixTime, useOddTrials=False,
            useEvenTrials=True)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for generating simulations.
    orientations = range(-15,20,5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Create original empirical distributions of fixations.
    empiricalFixDist = dict()
    for numFix in xrange(1, args.num_fix_dists + 1):
        empiricalFixDist[numFix] = dict()
        for valueDiff in xrange(-3,4):
            empiricalFixDist[numFix][valueDiff] = dict()
            for bin in bins:
                empiricalFixDist[numFix][valueDiff][bin] = 0
            for fixTime in distFixations[numFix][valueDiff]:
                bin = args.bin_step * min((fixTime // args.bin_step) + 1,
                                          len(bins))
                empiricalFixDist[numFix][valueDiff][bin] += 1

    # Normalize the distributions.
    for numFix in xrange(1, args.num_fix_dists + 1):
        for valueDiff in xrange(-3,4):
            sumBins = sum(empiricalFixDist[numFix][valueDiff].values())
            for bin in bins:
                empiricalFixDist[numFix][valueDiff][bin] = (
                    float(empiricalFixDist[numFix][valueDiff][bin]) /
                    float(sumBins))

    for it in xrange(args.num_iterations):
        if args.verbose:
            print("Iteration " + str(it + 1) + "/" + str(args.num_iterations))
        # Generate simulations using the current empirical distributions and the
        # model parameters.
        try:
            simul = run_simulations(
                probLeftFixFirst, distLatencies, distTransitions,
                empiricalFixDist, args.num_simulations, trialConditions, args.d,
                args.theta, args.sigma, bins, args.num_fix_dists)
        except Exception as e:
            print("An exception occurred while running simulations in " +
                  "iteration " + str(it) + ": " + str(e))
            return
        simulRT = simul.RT
        simulChoice = simul.choice
        simulValueLeft = simul.valueLeft
        simulValueRight = simul.valueRight
        simulFixItem = simul.fixItem
        simulFixTime = simul.fixTime
        simulFixRDV = simul.fixRDV
        simulUninterruptedLastFixTime = simul.uninterruptedLastFixTime

        countLastFix = dict()
        countTotal = dict()
        for numFix in xrange(1, args.num_fix_dists + 1):
            countLastFix[numFix] = dict()
            countTotal[numFix] = dict()
            for valueDiff in xrange(-3,4):
                countLastFix[numFix][valueDiff] = dict()
                countTotal[numFix][valueDiff] = dict()
                for bin in bins:
                    countLastFix[numFix][valueDiff][bin] = 0
                    countTotal[numFix][valueDiff][bin] = 0

        for trial in simulRT.keys():
            # Count all item fixations, except last.
            fixUnfixValueDiffs = {
                1: simulValueLeft[trial] - simulValueRight[trial],
                2: simulValueRight[trial] - simulValueLeft[trial]}
            lastItemFixSkipped = False
            lastFixItem = -1
            numFix = 1
            for item, time in zip(
                    simulFixItem[trial][::-1], simulFixTime[trial][::-1]):
                if not lastItemFixSkipped and (item == 1 or item == 2):
                    lastFixItem = item
                    lastItemFixSkipped = True
                    continue
                if item == 1 or item == 2:
                    bin = args.bin_step * min((time // args.bin_step) + 1,
                                              len(bins))
                    vDiff = fixUnfixValueDiffs[item]
                    countTotal[numFix][vDiff][bin] += 1
                    if numFix < args.num_fix_dists:
                        numFix += 1
            # Count last fixation.
            vDiff = fixUnfixValueDiffs[lastFixItem]
            bin = args.bin_step * min(
                (simulUninterruptedLastFixTime[trial] // args.bin_step) + 1,
                len(bins))
            countLastFix[numFix][vDiff][bin] += 1
            countTotal[numFix][vDiff][bin] += 1

        # Obtain true distributions of fixations.
        trueFixDist = dict()
        for numFix in xrange(1, args.num_fix_dists + 1):
            trueFixDist[numFix] = dict()
            for valueDiff in xrange(-3,4):
                trueFixDist[numFix][valueDiff] = dict()
                for bin in bins:
                    probNotLastFix = 1
                    if countTotal[numFix][valueDiff][bin] > 0:
                        probNotLastFix = 1 - (
                            float(countLastFix[numFix][valueDiff][bin]) /
                            float(countTotal[numFix][valueDiff][bin]))
                    if probNotLastFix == 0:
                        trueFixDist[numFix][valueDiff][bin] = (
                            empiricalFixDist[numFix][valueDiff][bin])
                    else:
                        trueFixDist[numFix][valueDiff][bin] = (
                            float(empiricalFixDist[numFix][valueDiff][bin]) /
                            float(probNotLastFix))
        # Normalize the distributions.
        for numFix in xrange(1, args.num_fix_dists + 1):
            for valueDiff in xrange(-3,4):
                sumBins = sum(trueFixDist[numFix][valueDiff].values())
                if sumBins > 0:
                    for bin in bins:
                        trueFixDist[numFix][valueDiff][bin] = (
                            float(trueFixDist[numFix][valueDiff][bin]) /
                            float(sumBins))

        # Update empirical distributions using the current true distributions.
        empiricalFixDist = trueFixDist

    # Generate final simulations.
    try:
        simul = run_simulations(
            probLeftFixFirst, distLatencies, distTransitions, empiricalFixDist,
            args.num_simulations, trialConditions, args.d, args.theta,
            args.sigma, bins, args.num_fix_dists)
    except Exception as e:
            print("An exception occurred while running the final " +
                  "simulations: " + str(e))
            return
    simulRT = simul.RT
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime
    simulFixRDV = simul.fixRDV
    simulUninterruptedLastFixTime = simul.uninterruptedLastFixTime

    if args.save_simulations:
        totalTrials = args.num_simulations * len(trialConditions)
        save_simulations_to_csv(
            simulChoice, simulRT, simulValueLeft, simulValueRight, simulFixItem,
            simulFixTime, simulFixRDV, totalTrials)
Example #14
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num-threads",
                        type=int,
                        default=9,
                        help="Size of the thread pool.")
    parser.add_argument(
        "--num-trials",
        type=int,
        default=10,
        help="Number of artificial data trials to be generated "
        "per trial condition.")
    parser.add_argument("--num-simulations",
                        type=int,
                        default=10,
                        help="Number of simulations to be generated per trial "
                        "condition, to be used in the RT histograms.")
    parser.add_argument("--bin-step",
                        type=int,
                        default=100,
                        help="Size of the bin step to be used in the RT "
                        "histograms.")
    parser.add_argument("--max-rt",
                        type=int,
                        default=8000,
                        help="Maximum RT to be used in the RT histograms.")
    parser.add_argument("--d",
                        type=float,
                        default=0.006,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--sigma",
                        type=float,
                        default=0.08,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--theta",
                        type=float,
                        default=0.5,
                        help="aDDM parameter for generating artificial data.")
    parser.add_argument("--range-d",
                        nargs="+",
                        type=float,
                        default=[0.005, 0.006, 0.007],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma",
                        nargs="+",
                        type=float,
                        default=[0.065, 0.08, 0.095],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta",
                        nargs="+",
                        type=float,
                        default=[0.4, 0.5, 0.6],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name",
                        type=str,
                        default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name",
                        type=str,
                        default="fixations.csv",
                        help="Name of fixations file.")
    parser.add_argument("--verbose",
                        default=False,
                        action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(args.expdata_file_name,
                                  args.fixations_file_name,
                                  useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return

    # Get empirical distributions.
    try:
        dists = get_empirical_distributions(data.valueLeft, data.valueRight,
                                            data.fixItem, data.fixTime)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    if args.verbose:
        print("Done getting empirical distributions!")

    histBins = range(0, args.max_rt + args.bin_step, args.bin_step)

    orientations = range(-15, 20, 5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate histograms for artificial data.
    dataHistLeft = dict()
    dataHistRight = dict()
    for trialCondition in trialConditions:
        RTsLeft = list()
        RTsRight = list()
        trial = 0
        while trial < args.num_trials:
            try:
                results = addm(probLeftFixFirst, distLatencies,
                               distTransitions, distFixations, args.d,
                               args.sigma, args.theta, trialCondition[0],
                               trialCondition[1])
            except Exception as e:
                print("An exception occurred while running the model for "
                      "artificial data generation, at trial " + str(trial) +
                      ": " + str(e))
                return
            if results.choice == -1:
                RTsLeft.append(results.RT)
            elif results.choice == 1:
                RTsRight.append(results.RT)
            trial += 1
        dataHistLeft[trialCondition] = np.histogram(RTsLeft, bins=histBins)[0]
        dataHistRight[trialCondition] = np.histogram(RTsRight,
                                                     bins=histBins)[0]

    if args.verbose:
        print("Done generating histograms of artificial data!")

    # Grid search on the parameters of the model.
    models = list()
    for d in args.range_d:
        for sigma in args.range_sigma:
            for theta in args.range_theta:
                model = (d, sigma, theta)
                models.append(model)

    listParams = list()
    for model in models:
        listParams.append(
            (model[0], model[1], model[2], trialConditions,
             args.num_simulations, histBins, dataHistLeft, dataHistRight,
             probLeftFixFirst, distLatencies, distTransitions, distFixations))
    likelihoods = pool.map(get_model_likelihood_wrapper, listParams)

    if args.verbose:
        for i in xrange(len(models)):
            print("L" + str(models[i]) + " = " + str(likelihoods[i]))
        bestIndex = likelihoods.index(max(likelihoods))
        print("Best fit: " + str(models[bestIndex]))
Example #15
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--num-threads", type=int, default=9,
                        help="Size of the thread pool.")
    parser.add_argument("--trials-per-subject", type=int, default=100,
                        help="Number of trials from each subject to be used in "
                        "the analysis; if smaller than 1, all trials are used.")
    parser.add_argument("--num-simulations", type=int, default=400,
                        help="Number of simulations to be generated per trial "
                        "condition.")
    parser.add_argument("--range-d", nargs="+", type=float,
                        default=[0.003, 0.006, 0.009],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma", nargs="+", type=float,
                        default=[0.03, 0.06, 0.09],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta", nargs="+", type=float,
                        default=[0.3, 0.5, 0.7],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    parser.add_argument("--use-cis-trials", default=False, action="store_true",
                        help="Use CIS trials in the analysis.")
    parser.add_argument("--use-trans-trials", default=False,
                        action="store_true", help="Use TRANS trials in the "
                        "analysis.")
    parser.add_argument("--save-simulations", default=False,
                        action="store_true", help="Save simulations to CSV.")
    parser.add_argument("--verbose", default=False, action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime
    isCisTrial = data.isCisTrial
    isTransTrial = data.isTransTrial

    # Maximum likelihood estimation.
    # Grid search on the parameters of the model using odd trials only.
    if args.verbose:
        print("Starting grid search...")
    models = list()
    listParams = list()
    for d in args.range_d:
        for theta in args.range_theta:
            for sigma in args.range_sigma:
                models.append((d, theta, sigma))
                params = (choice, valueLeft, valueRight, fixItem, fixTime, d,
                          theta, sigma, args.trials_per_subject, True, False,
                          isCisTrial, isTransTrial, args.use_cis_trials,
                          args.use_trans_trials, args.verbose)
                listParams.append(params)
    results = pool.map(get_model_nll_wrapper, listParams)

    # Get optimal parameters.
    minNegLogLikeIdx = results.index(min(results))
    optimD = models[minNegLogLikeIdx][0]
    optimTheta = models[minNegLogLikeIdx][1]
    optimSigma = models[minNegLogLikeIdx][2]
    if args.verbose:
        print("Finished grid search!")
        print("Optimal d: " + str(optimD))
        print("Optimal theta: " + str(optimTheta))
        print("Optimal sigma: " + str(optimSigma))
        print("Min NLL: " + str(min(results)))

    # Get empirical distributions from even trials only.
    try:
        dists = get_empirical_distributions(
            valueLeft, valueRight, fixItem, fixTime, useOddTrials=False,
            useEvenTrials=True, isCisTrial=isCisTrial,
            isTransTrial=isTransTrial, useCisTrials=args.use_cis_trials,
            useTransTrials=args.use_trans_trials)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Parameters for generating simulations.
    orientations = range(-15,20,5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
            vRight = np.absolute((np.absolute(oRight) - 15) / 5)
            if oLeft != oRight and args.use_cis_trials and oLeft * oRight >= 0:
                trialConditions.append((vLeft, vRight))
            elif (oLeft != oRight and args.use_trans_trials and
                  oLeft * oRight <= 0):
                trialConditions.append((vLeft, vRight))

    # Generate simulations using the empirical distributions and the
    # estimated parameters.
    try:
        simul = run_simulations(
            probLeftFixFirst, distLatencies, distTransitions, distFixations,
            args.num_simulations, trialConditions, optimD, optimTheta,
            sigma=optimSigma)
    except Exception as e:
        print("An exception occurred while running simulations: " + str(e))
        return
    simulRT = simul.RT
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime
    simulFixRDV = simul.fixRDV

    if args.save_simulations:
        totalTrials = args.num_simulations * len(trialConditions)
        save_simulations_to_csv(
            simulChoice, simulRT, simulValueLeft, simulValueRight, simulFixItem,
            simulFixTime, simulFixRDV, totalTrials)
Example #16
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("subject", type=str, help="Subject name")
    parser.add_argument("--num-threads", type=int, default=9,
                        help="Size of the thread pool.")
    parser.add_argument("--num-trials", type=int, default=100,
                        help="Number of trials to be used in the analysis; if "
                        "smaller than 1, all trials are used.")
    parser.add_argument("--num-simulations", type=int, default=32,
                        help="Number of simulations to be generated per trial "
                        "condition.")
    parser.add_argument("--range-d", nargs="+", type=float,
                        default=[0.003, 0.006, 0.009],
                        help="Search range for parameter d.")
    parser.add_argument("--range-sigma", nargs="+", type=float,
                        default=[0.03, 0.06, 0.09],
                        help="Search range for parameter sigma.")
    parser.add_argument("--range-theta", nargs="+", type=float,
                        default=[0.3, 0.5, 0.7],
                        help="Search range for parameter theta.")
    parser.add_argument("--expdata-file-name", type=str, default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name", type=str,
                        default="fixations.csv", help="Name of fixations file.")
    parser.add_argument("--save-simulations", default=False,
                        action="store_true", help="Save simulations to CSV.")
    parser.add_argument("--save-figures", default=False,
                        action="store_true", help="Save figures comparing "
                        "choice and RT curves for data and simulations.")
    parser.add_argument("--verbose", default=False, action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    pool = Pool(args.num_threads)

    choice = dict()
    valueLeft = dict()
    valueRight = dict()
    fixItem = dict()
    fixTime = dict()

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(
            args.expdata_file_name, args.fixations_file_name,
            useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    choice[args.subject] = data.choice[args.subject]
    valueLeft[args.subject] = data.valueLeft[args.subject]
    valueRight[args.subject] = data.valueRight[args.subject]
    fixItem[args.subject] = data.fixItem[args.subject]
    fixTime[args.subject] = data.fixTime[args.subject]

    # Maximum likelihood estimation using odd trials only.
    # Grid search on the parameters of the model.
    if args.verbose:
        print("Starting grid search for subject " + args.subject + "...")
    models = list()
    listParams = list()
    for d in args.range_d:
        for theta in args.range_theta:
            for sigma in args.range_sigma:
                models.append((d, theta, sigma))
                params = (choice, valueLeft, valueRight, fixItem, fixTime, d,
                          theta, sigma, args.num_trials, True, False,
                          args.verbose)
                listParams.append(params)
    results = pool.map(get_model_nll_wrapper, listParams)

    # Get optimal parameters.
    minNegLogLikeIdx = results.index(min(results))
    optimD = models[minNegLogLikeIdx][0]
    optimTheta = models[minNegLogLikeIdx][1]
    optimSigma = models[minNegLogLikeIdx][2]
    if args.verbose:
        print("Finished grid search!")
        print("Optimal d: " + str(optimD))
        print("Optimal theta: " + str(optimTheta))
        print("Optimal sigma: " + str(optimSigma))
        print("Min NLL: " + str(min(results)))

    # Get empirical distributions from even trials.
    try:
        dists = get_empirical_distributions(
            valueLeft, valueRight, fixItem, fixTime, useOddTrials=False,
            useEvenTrials=True)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for generating simulations.
    orientations = range(-15,20,5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Generate simulations using the even trials distributions and the
    # estimated parameters.
    try:
        simul = run_simulations(
            probLeftFixFirst, distLatencies, distTransitions, distFixations,
            args.num_simulations, trialConditions, optimD, optimTheta,
            sigma=optimSigma)
    except Exception as e:
        print("An exception occurred while running simulations: " + str(e))
        return
    simulRT = simul.RT
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime
    simulFixRDV = simul.fixRDV

    totalTrials = args.num_simulations * len(trialConditions)

    if args.save_simulations:
        save_simulations_to_csv(
            simulChoice, simulRT, simulValueLeft, simulValueRight, simulFixItem,
            simulFixTime, simulFixRDV, totalTrials)

    if args.save_figures:
        # Create pdf file to save figures.
        pp = PdfPages(
            "figures_" + str(optimD) + "_" + str(optimTheta) + "_" +
            str(optimSigma) + "_" + str(args.num_simulations) + ".pdf")

        # Generate choice and RT curves for real data (odd trials) and
        # simulations (generated from even trials).
        fig1 = generate_choice_curves(
            choice, valueLeft, valueRight, simulChoice, simulValueLeft,
            simulValueRight, totalTrials)
        pp.savefig(fig1)
        fig2 = generate_rt_curves(
            RT, valueLeft, valueRight, simulRT, simulValueLeft, simulValueRight,
            totalTrials)
        pp.savefig(fig2)
        pp.close()
Example #17
0
logger = logging.getLogger(__name__)


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('-mn', '--model_name', type=str, nargs='?',
                        default='fasttext_model.pkl',
                        help='the name of model')

    args = parser.parse_args()
    model_name = args.model_name

    # load data
    logger.info("start load load")
    test_data_df = load_data_from_csv(config.test_data_path)

    # load model
    logger.info("start load model")
    classifier_dict = joblib.load(config.model_path + model_name)

    content_test = test_data_df['content']

    logger.info("start seg train data")
    content_test = seg_words(content_test)

    logger.info("complete seg train data")

    logger.info("prepare predict data format")
    test_data_format = np.asarray([content_test]).T
    logger.info("complete prepare predict formate data")
Example #18
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--bin-step",
                        type=int,
                        default=10,
                        help="Size of the bin step to be used in the fixation "
                        "distributions.")
    parser.add_argument("--max-fix-bin",
                        type=int,
                        default=3000,
                        help="Maximum fixation length to be used in the "
                        "fixation distributions.")
    parser.add_argument("--num-fix-dists",
                        type=int,
                        default=3,
                        help="Number of fixation distributions.")
    parser.add_argument("--num-iterations",
                        type=int,
                        default=3,
                        help="Number of iterations used to approximate the"
                        "true distributions.")
    parser.add_argument("--num-simulations",
                        type=int,
                        default=400,
                        help="Number of simulations to be generated per trial "
                        "condition.")
    parser.add_argument("--d",
                        type=float,
                        default=0.004,
                        help="aDDM parameter for generating simulations.")
    parser.add_argument("--sigma",
                        type=float,
                        default=0.07,
                        help="aDDM parameter for generating simulations.")
    parser.add_argument("--theta",
                        type=float,
                        default=0.25,
                        help="aDDM parameter for generating simulations.")
    parser.add_argument("--expdata-file-name",
                        type=str,
                        default="expdata.csv",
                        help="Name of experimental data file.")
    parser.add_argument("--fixations-file-name",
                        type=str,
                        default="fixations.csv",
                        help="Name of fixations file.")
    parser.add_argument("--save-simulations",
                        default=False,
                        action="store_true",
                        help="Save simulations to CSV.")
    parser.add_argument("--verbose",
                        default=False,
                        action="store_true",
                        help="Increase output verbosity.")
    args = parser.parse_args()

    # Time bins to be used in the fixation distributions.
    bins = range(args.bin_step, args.max_fix_bin + args.bin_step,
                 args.bin_step)

    # Load experimental data from CSV file.
    try:
        data = load_data_from_csv(args.expdata_file_name,
                                  args.fixations_file_name,
                                  useAngularDists=True)
    except Exception as e:
        print("An exception occurred while loading the data: " + str(e))
        return
    RT = data.RT
    choice = data.choice
    valueLeft = data.valueLeft
    valueRight = data.valueRight
    fixItem = data.fixItem
    fixTime = data.fixTime

    # Get empirical distributions from even trials.
    try:
        dists = get_empirical_distributions(valueLeft,
                                            valueRight,
                                            fixItem,
                                            fixTime,
                                            useOddTrials=False,
                                            useEvenTrials=True)
    except Exception as e:
        print("An exception occurred while getting empirical distributions: " +
              str(e))
        return
    probLeftFixFirst = dists.probLeftFixFirst
    distLatencies = dists.distLatencies
    distTransitions = dists.distTransitions
    distFixations = dists.distFixations

    # Trial conditions for generating simulations.
    orientations = range(-15, 20, 5)
    trialConditions = list()
    for oLeft in orientations:
        for oRight in orientations:
            if oLeft != oRight:
                vLeft = np.absolute((np.absolute(oLeft) - 15) / 5)
                vRight = np.absolute((np.absolute(oRight) - 15) / 5)
                trialConditions.append((vLeft, vRight))

    # Create original empirical distributions of fixations.
    empiricalFixDist = dict()
    for numFix in xrange(1, args.num_fix_dists + 1):
        empiricalFixDist[numFix] = dict()
        for valueDiff in xrange(-3, 4):
            empiricalFixDist[numFix][valueDiff] = dict()
            for bin in bins:
                empiricalFixDist[numFix][valueDiff][bin] = 0
            for fixTime in distFixations[numFix][valueDiff]:
                bin = args.bin_step * min(
                    (fixTime // args.bin_step) + 1, len(bins))
                empiricalFixDist[numFix][valueDiff][bin] += 1

    # Normalize the distributions.
    for numFix in xrange(1, args.num_fix_dists + 1):
        for valueDiff in xrange(-3, 4):
            sumBins = sum(empiricalFixDist[numFix][valueDiff].values())
            for bin in bins:
                empiricalFixDist[numFix][valueDiff][bin] = (
                    float(empiricalFixDist[numFix][valueDiff][bin]) /
                    float(sumBins))

    for it in xrange(args.num_iterations):
        if args.verbose:
            print("Iteration " + str(it + 1) + "/" + str(args.num_iterations))
        # Generate simulations using the current empirical distributions and the
        # model parameters.
        try:
            simul = run_simulations(probLeftFixFirst, distLatencies,
                                    distTransitions, empiricalFixDist,
                                    args.num_simulations, trialConditions,
                                    args.d, args.theta, args.sigma, bins,
                                    args.num_fix_dists)
        except Exception as e:
            print("An exception occurred while running simulations in " +
                  "iteration " + str(it) + ": " + str(e))
            return
        simulRT = simul.RT
        simulChoice = simul.choice
        simulValueLeft = simul.valueLeft
        simulValueRight = simul.valueRight
        simulFixItem = simul.fixItem
        simulFixTime = simul.fixTime
        simulFixRDV = simul.fixRDV
        simulUninterruptedLastFixTime = simul.uninterruptedLastFixTime

        countLastFix = dict()
        countTotal = dict()
        for numFix in xrange(1, args.num_fix_dists + 1):
            countLastFix[numFix] = dict()
            countTotal[numFix] = dict()
            for valueDiff in xrange(-3, 4):
                countLastFix[numFix][valueDiff] = dict()
                countTotal[numFix][valueDiff] = dict()
                for bin in bins:
                    countLastFix[numFix][valueDiff][bin] = 0
                    countTotal[numFix][valueDiff][bin] = 0

        for trial in simulRT.keys():
            # Count all item fixations, except last.
            fixUnfixValueDiffs = {
                1: simulValueLeft[trial] - simulValueRight[trial],
                2: simulValueRight[trial] - simulValueLeft[trial]
            }
            lastItemFixSkipped = False
            lastFixItem = -1
            numFix = 1
            for item, time in zip(simulFixItem[trial][::-1],
                                  simulFixTime[trial][::-1]):
                if not lastItemFixSkipped and (item == 1 or item == 2):
                    lastFixItem = item
                    lastItemFixSkipped = True
                    continue
                if item == 1 or item == 2:
                    bin = args.bin_step * min(
                        (time // args.bin_step) + 1, len(bins))
                    vDiff = fixUnfixValueDiffs[item]
                    countTotal[numFix][vDiff][bin] += 1
                    if numFix < args.num_fix_dists:
                        numFix += 1
            # Count last fixation.
            vDiff = fixUnfixValueDiffs[lastFixItem]
            bin = args.bin_step * min(
                (simulUninterruptedLastFixTime[trial] // args.bin_step) + 1,
                len(bins))
            countLastFix[numFix][vDiff][bin] += 1
            countTotal[numFix][vDiff][bin] += 1

        # Obtain true distributions of fixations.
        trueFixDist = dict()
        for numFix in xrange(1, args.num_fix_dists + 1):
            trueFixDist[numFix] = dict()
            for valueDiff in xrange(-3, 4):
                trueFixDist[numFix][valueDiff] = dict()
                for bin in bins:
                    probNotLastFix = 1
                    if countTotal[numFix][valueDiff][bin] > 0:
                        probNotLastFix = 1 - (
                            float(countLastFix[numFix][valueDiff][bin]) /
                            float(countTotal[numFix][valueDiff][bin]))
                    if probNotLastFix == 0:
                        trueFixDist[numFix][valueDiff][bin] = (
                            empiricalFixDist[numFix][valueDiff][bin])
                    else:
                        trueFixDist[numFix][valueDiff][bin] = (
                            float(empiricalFixDist[numFix][valueDiff][bin]) /
                            float(probNotLastFix))
        # Normalize the distributions.
        for numFix in xrange(1, args.num_fix_dists + 1):
            for valueDiff in xrange(-3, 4):
                sumBins = sum(trueFixDist[numFix][valueDiff].values())
                if sumBins > 0:
                    for bin in bins:
                        trueFixDist[numFix][valueDiff][bin] = (
                            float(trueFixDist[numFix][valueDiff][bin]) /
                            float(sumBins))

        # Update empirical distributions using the current true distributions.
        empiricalFixDist = trueFixDist

    # Generate final simulations.
    try:
        simul = run_simulations(probLeftFixFirst, distLatencies,
                                distTransitions, empiricalFixDist,
                                args.num_simulations, trialConditions, args.d,
                                args.theta, args.sigma, bins,
                                args.num_fix_dists)
    except Exception as e:
        print("An exception occurred while running the final " +
              "simulations: " + str(e))
        return
    simulRT = simul.RT
    simulChoice = simul.choice
    simulValueLeft = simul.valueLeft
    simulValueRight = simul.valueRight
    simulFixItem = simul.fixItem
    simulFixTime = simul.fixTime
    simulFixRDV = simul.fixRDV
    simulUninterruptedLastFixTime = simul.uninterruptedLastFixTime

    if args.save_simulations:
        totalTrials = args.num_simulations * len(trialConditions)
        save_simulations_to_csv(simulChoice, simulRT, simulValueLeft,
                                simulValueRight, simulFixItem, simulFixTime,
                                simulFixRDV, totalTrials)