def run_self_paced_task():
    """
    Run loop of self paced reading.
    """

    stimuli_csv = ut.load_file(SENTENCES, sep=",") #sentences with frequencies
    words = ut.load_file(WORDS, sep="\t")
    labels = ut.load_file(LABELS, sep="\t")
    activations = ut.load_file(ACTIVATIONS, sep="\t")
    DM = parser.decmem.copy()

    #prepare dictionaries to calculate spreading activation
    word_freq = {k: sum(g["FREQ"].tolist()) for k,g in words.groupby("WORD")} #this method sums up frequencies of words across all POS
    label_freq = labels.set_index('LABEL')['FREQ'].to_dict()

    if rank == 1 or rank == 2:
        condition = "subj_ext"
    elif rank == 3 or rank == 4:
        condition = "obj_ext"

    if rank == 1 or rank == 3:
        sent_nrs = range(1, 9)
    elif rank == 2 or rank == 4:
        sent_nrs = range(9, 17)

    while True:

        received_list = np.empty(4, dtype=np.float)
        comm.Recv([received_list, MPI.FLOAT], source=0, tag=rank)
        if received_list[0] == -1:
            break
        parser.model_parameters["latency_factor"] = received_list[0]
        parser.model_parameters["latency_exponent"] = received_list[1]
        parser.model_parameters["rule_firing"] = received_list[2]
        parser.model_parameters["buffer_spreading_activation"] = {"g": received_list[3]}

        final_times_in_s = np.array([0, 0, 0, 0, 0, 0], dtype=np.float)

        len_sen = 0

        for sent_nr in sent_nrs:
            subset_stimuli = stimuli_csv[stimuli_csv.label.isin([condition]) & stimuli_csv.item.isin([sent_nr])]
            try:
                times_in_s = rp.read(parser, sentence=subset_stimuli.word.tolist(), pos=subset_stimuli.function.tolist(), activations=activations, condition=str(condition),\
                    sent_nr=str(sent_nr), word_freq=word_freq, label_freq=label_freq, weight=received_list[3],\
                    decmem=DM, lexical=True, syntactic=True, visual=False, reanalysis=True, prints=False)
                final_times_in_s += times_in_s
                #print(sent_nr, "FS", times_in_s, flush=True)
            except:
                pass
            else:
                len_sen += 1
        comm.Send([np.array(final_times_in_s/len_sen), MPI.FLOAT], dest=0, tag=1) #len_sen - number of items
Beispiel #2
0
def get_avail_wires() -> list:
    bomb_dict = util.load_file(file_string)

    # the second argument of range() is exclusive. so range(1, 10) produces numbers 1 - 9
    # bomb_dict will have all players + bad_wire
    # instead of doing len(bomb_dict) - 1 after pop('bad_wire'), we reverse it
    avail_wires = list(range(1, len(bomb_dict)))
    bomb_dict.pop('bad_wire')
    chosen_wires = list(bomb_dict.values())

    # for every wire in avail_wires, if they are not in chosen_wires, add them to this list
    return [wire for wire in avail_wires if wire not in chosen_wires]
Beispiel #3
0
def new_round() -> str:
    # returns true if there will be a new round and false if only one player remains
    bomb_dict = util.load_file(file_string)
    if len(bomb_dict) == 2:
        winner = get_players[0]
        points.change_points(winner, 300, '+')
        return f'{winner} is the last man standing and has won 300 points!'
    else:
        bomb_dict = {x: 0 for x in bomb_dict}  # resets all values to 0
        bomb_dict['bad_wire'] = util.rng(1, len(bomb_dict) - 1)
        util.write_file(file_string, bomb_dict)
        return f'{get_players[active_player_pos]}, you\'re up next. Cut one of these wires with !bomb cut: {bomb_squad.get_avail_wires()}'
Beispiel #4
0
def evaluation_metrics(path, labels):
    pred_score = load_file(path_file=path)
    pred_score = np.array([float(score) for score in pred_score])
    labels = labels[:pred_score.shape[0]]

    acc = accuracy_score(y_true=labels, y_pred=convert_to_binary(pred_score))
    prc = precision_score(y_true=labels, y_pred=convert_to_binary(pred_score))
    rc = recall_score(y_true=labels, y_pred=convert_to_binary(pred_score))
    f1 = f1_score(y_true=labels, y_pred=convert_to_binary(pred_score))
    auc = roc_auc_score(y_true=labels, y_score=pred_score)

    print('Accuracy: %f -- Precision: %f -- Recall: %f -- F1: %f -- AUC: %f' %
          (acc, prc, rc, f1, auc))
Beispiel #5
0
    def open_node_from_file(self, filename, force=False, **kwargs):

        if not os.path.exists(filename):
            raise FileNotFoundError('Source not found: "{0}"\n'.format(os.path.basename(filename)))

        try:
            df, load_time = load_file(filename, **kwargs)
        except UnrecognizedFileTypeException:
                    
            self.app.view.display_message('File extension not in (csv/p): {0}\n'.format(filename), type='error')
            raise NotImplementedError

        node_frame = NodeFrame(df=df, load_time=load_time, metadata={'filename':filename})
        node = self.create_node((node_frame,), parent=self.app.model.root, force=force)
        return node
Beispiel #6
0
def choose_wire(player: str, num: int) -> str:
    bomb_dict = util.load_file(file_string)
    if num == bomb_dict['bad_wire']:
        bomb_dict.pop(player)
        util.write_file(file_string, bomb_dict)
        if active_player_pos > len(bomb_dict) - 1:
            active_player_pos -= len(bomb_dict) - 1
        return f'{player} blew up the bomb! cmonBruh'
    else:
        bomb_dict[player] = num
        util.write_file(file_string, bomb_dict)
        active_player_pos += 1
        if active_player_pos > len(bomb_dict) - 1:
            active_player_pos -= len(bomb_dict) - 1
        return f'{player} lives! Clap'
Beispiel #7
0
def get_GSM_data(param):
    nang = param["nori"]
    scale = param["scale"]
    fdist = param["fdist"]

    fname = "GSM_{}_{}_{}".format(scale, nang, fdist)

    imlist = glob.glob("/home/gbarello/data/BSDS300/images/*.jpg")

    Clist = []

    try:
        FF = utils.load_file("/home/gbarello/data/datasets/GSM_filters/" +
                             fname)
        LOG.log("Using pre-saved filters")

    except:
        LOG.log("Measuring Filters")
        for i in imlist:
            Clist.append(proc.get_phased_filter_samples(i, nang, scale, fdist))
            LOG.log(i + "\t{}".format(len(Clist[-1])))

        utils.dump_file(Clist,
                        "/home/gbarello/data/datasets/GSM_filters/" + fname)

        #we want to sample from each image equally, so we find the list with the fewest entries
    mlen = min([len(c) for c in Clist])
    #randomise the list and cocnatenate them all into one list
    Clist = np.array([c[np.random.choice(range(len(c)), mlen)] for c in Clist])
    Clist = np.array([k for c in Clist for k in c])

    fac = np.array([IQR(Clist[:, :, :, 0]), IQR(Clist[:, :, :, 1])])

    Clist = Clist / np.array([[fac]])

    np.random.shuffle(Clist)

    return Clist, fac
Beispiel #8
0
cycles = 0
mode = 0

for a in sys.argv[2:]:
    if (a == '-v'):
        verbose = 1
    elif (a == '-p1'):
        mode = 1
    elif (a == '-q'):
        mode = 0xE
    else:
        cycles = int(a)

if (cycles < 0):
    sys_error("Number of cycles must be nonnegative ({}).".format(cycles))

core = core_sc.Core_SC()
utilities.load_file(core.I_Mem, sys.argv[1])
core.I_Mem.dump()
core.set_PC(core.I_Mem.get_starting_address())

core.I_Mem.set_verbose(verbose)
core.RF.set_verbose(verbose)
core.set_mode(mode)

actual_cycles = core.run(cycles)

core.RF.dump()
core.D_Mem.dump()
print("Number of cycles=%d" % actual_cycles)
Beispiel #9
0
 def __init__(self):
     self.lottery_dict = util.load_file(file_string)
     self.init_val = 1000
     self.max_ticket_number = 5000
Beispiel #10
0
def get_players() -> list:
    bomb_dict = util.load_file(file_string)
    bomb_dict.pop('bad_wire')
    return bomb_dict.keys()
Beispiel #11
0
def elim_player(player: str):
    bomb_dict = util.load_file(file_string)
    choose_wire(player, bomb_dict['bad_wire'])
def run_reading_task():
    """
    Run loop of self paced reading.
    """
    def calculate_times(times_in_s, reanalyses, recall_failures,
                        prob_regression):
        """
        Helper function to calculate first pass times on last region (several words, so regression could be triggered; when regression is triggered, first-pass RT is stopped).
        """
        final_time = times_in_s[0]

        number_of_regressions = 0

        recall = 1 - recall_failures[0]

        for i in range(1, len(times_in_s)):
            if reanalyses[i - 1] == 1:
                number_of_regressions += 1
            recall *= 1 - recall_failures[i]
            final_time += times_in_s[i] * (
                recall * (1 - prob_regression)**number_of_regressions
            )  #only non-regressed RTs are added
        return final_time

    stimuli_csv = ut.load_file(SENTENCES, sep=",")  #sentences with frequencies
    activations = ut.load_file(ACTIVATIONS, sep="\t")
    DM = parser.decmem.copy()

    if rank == 1 or rank == 2:
        condition = "gram_high"
    elif rank == 3 or rank == 4:
        condition = "gram_low"
    elif rank == 5 or rank == 6:
        condition = "ungram_high"
    elif rank == 7 or rank == 8:
        condition = "ungram_low"

    if rank % 2 == 1:
        sent_nrs = range(1, 12)
    else:
        sent_nrs = range(12, 23)

    used_activations = activations[activations.critical.isin(
        ["0", "1", "2", "3"])]

    del activations

    COUNTING = 0

    while True:
        received_list = np.empty(6, dtype=np.float)
        comm.Recv([received_list, MPI.FLOAT], source=0, tag=rank)
        if received_list[0] == -1:
            break
        parser.model_parameters["latency_factor"] = received_list[0]
        parser.model_parameters["latency_exponent"] = received_list[1]
        parser.model_parameters["rule_firing"] = received_list[2]
        parser.model_parameters["emma_preparation_time"] = received_list[3]
        prob_regression = received_list[4]
        threshold = received_list[5]

        del received_list
        #print(rank, condition, sent_nrs)

        final_times_in_s = np.array([0, 0, 0], dtype=np.float)
        regressions = np.array([0, 0, 0], dtype=np.float)

        len_sen = 0
        extra_prints = False

        for sent_nr in sent_nrs:
            subset_activations = used_activations[
                used_activations.condition.isin([condition])
                & used_activations.item.isin([sent_nr])]
            subset_stimuli = stimuli_csv[
                stimuli_csv.label.isin([condition])
                & stimuli_csv.item.isin([sent_nr]) & stimuli_csv.position.isin(
                    subset_activations.position.to_numpy())]
            try:
                if COUNTING % 200 == 0:
                    start = time.process_time()
                times_in_s, reanalyses, recall_failures = rp.read(parser, sentence=subset_stimuli.word.tolist(), pos=subset_stimuli.function.tolist(), activations=subset_activations, weight=10, threshold=threshold,\
                    decmem=DM.copy(), lexical=True, syntactic=True, visual=True, reanalysis=True, prints=False, extra_prints=extra_prints, condition=condition, sent_nr=sent_nr)
                if COUNTING % 200 == 0:
                    end = time.process_time()
                    start = time.process_time()

                regressions += np.array(
                    [
                        recall_failures[0] + prob_regression * reanalyses[0],
                        recall_failures[1] + prob_regression * reanalyses[1],
                        min(
                            1, 1 - binom.pmf(0,
                                             n=len(times_in_s[2:]),
                                             p=np.mean(recall_failures[2:])) +
                            1 - binom.pmf(
                                0, n=sum(reanalyses[2:]), p=prob_regression))
                    ],
                    dtype=np.float
                )  #calculate prob. of regressions for 3 regions in Staub: 0 (pre-critical), 1 (critical), 2 (all later words; post-critical)
                final_times_in_s += np.array([
                    times_in_s[0], times_in_s[1],
                    calculate_times(times_in_s[2:], reanalyses[2:],
                                    recall_failures[2:], prob_regression)
                ],
                                             dtype=np.float)
            except:
                pass
            else:
                len_sen += 1

        COUNTING += 1

        to_be_sent = np.append(final_times_in_s / len_sen,
                               regressions / len_sen)
        comm.Send([to_be_sent, MPI.FLOAT], dest=0,
                  tag=1)  #len_sen - number of items
Beispiel #13
0
def join(player: str):
    bomb_dict = util.load_file(file_string)
    bomb_dict[player] = 0
    util.write_file(file_string, temp_dict)
Beispiel #14
0
def in_progress() -> bool:
    bomb_dict = util.load_file(file_string)
    return bomb_dict['value'] != 0
Beispiel #15
0
def get_idle(player: str) -> int:
    # returns true if a player hasn't made a choice yet and vice-versa
    bomb_dict = util.load_file(file_string)
    return bomb_dict[player] != 0
comm = MPI.COMM_WORLD
rank = int(comm.Get_rank())

N_GROUPS = comm.Get_size() - 1 #Groups used for simulation - one less than used cores

if rank == 0: #master
    NDRAWS = int(sys.argv[1])
    CHAIN = int(sys.argv[2])
    NCHAINS = int(sys.argv[1])

    testval_std, testval_lf, testval_le, testval_rf, testval_weight = 25, 0.1, 0.5, 0.067, 50
    
    # this part collects last estimations if draws were run before
    try:
        past_simulations = ut.load_file("gg_6words_chain"+str(CHAIN)+"/chain-0.csv", sep=",")
    except:
        pass
    else:
        testval_lf = past_simulations['lf'].iloc[-1]
        testval_rf = past_simulations['rf'].iloc[-1]
        testval_le = past_simulations['le'].iloc[-1]
        testval_weight = past_simulations['weight'].iloc[-1]
        testval_std = past_simulations['std'].iloc[-1]

    # the model starts here
    parser_with_bayes = pm.Model()

    with parser_with_bayes:
        # prior for activation
        #decay = Uniform('decay', lower=0, upper=1) #currently, ignored because it leads to problems in sampling
            if prints:
                print("DRAWING TREE")
                print("********************************")
                # print(final_tree)
                # final_tree.pretty_print()
                final_tree.draw()

    return words_list, activations_list[:
                                        -1], wh_gaps_list, reanalysis_list, agreeing_actions_list[:
                                                                                                  -1], matching_fs_list[:
                                                                                                                        -1], total_fan_list[:
                                                                                                                                            -1]


if __name__ == "__main__":
    stimuli_csv = ut.load_file(SENTENCES, sep=",")  #sentences with frequencies
    words = ut.load_file(WORDS, sep="\t")
    labels = ut.load_file(LABELS, sep="\t")
    actions = ut.load_file(ACTIONS, sep="\t")
    blind_actions = ut.load_file(BLIND_ACTIONS, sep="\t")
    DM = parser.decmem.copy()

    #prepare dictionaries to calculate spreading activation
    word_freq = {k: sum(g["FREQ"].tolist())
                 for k, g in words.groupby("WORD")
                 }  #this method sums up frequencies of words across all POS
    label_freq = labels.set_index('LABEL')['FREQ'].to_dict()

    # Save the respective activations
    total = {
        "activation": [],
N_GROUPS = comm.Get_size(
) - 1  #Groups used for simulation - one less than used cores

if rank == 0:  #master
    NDRAWS = int(sys.argv[1])
    CHAIN = int(sys.argv[2])

    testval_std, testval_lf, testval_le = 25, 0.1, 0.5

    testval_threshold, testval_emma_prep_time, testval_prob_regression = 0, 0.133, 0.25

    # collect previous values from the existing chains

    try:
        past_simulations = ut.load_file("staub_exp3_chain" + str(CHAIN) +
                                        "/chain-0.csv",
                                        sep=",")
    except:
        pass
    else:
        testval_lf = past_simulations['lf'].iloc[-1]
        testval_le = past_simulations['le'].iloc[-1]
        testval_threshold = past_simulations['threshold'].iloc[-1]
        testval_emma_prep_time = past_simulations['emma_prep_time'].iloc[-1]
        testval_prob_regression = past_simulations['prob_regression'].iloc[-1]
        testval_std = past_simulations['std'].iloc[-1]

    # here the model starts

    parser_with_bayes = pm.Model()