def random_pairs( how_many_pairs, *, isomorph_probability=0.5, non_isomoprh_trivial_probability=0.25, non_isomoprh_non_trivial_probability=0.25, **kwargs, ) -> Iterator[Tuple[CNF, CNF, int]]: """Iterate through pairs of cnfs generated based on the probabilities.""" # normalize probabilities tot_probs = isomorph_probability + non_isomoprh_trivial_probability + non_isomoprh_non_trivial_probability isomorph_probability /= tot_probs non_isomoprh_trivial_probability /= tot_probs non_isomoprh_non_trivial_probability /= tot_probs funcs = [ cnf_isomorphic_generator, cnf_generator_trivial, non_trivial_non_isomorphic_cnf_generator, ] funcs_probs = [ isomorph_probability, non_isomoprh_trivial_probability, non_isomoprh_non_trivial_probability, ] for _ in range(how_many_pairs): original_cnf = random_cnf(**kwargs) func_idx = np_choice(3, p=funcs_probs) func_generator = funcs[func_idx] new_cnf = func_generator(original_cnf, **kwargs) yield original_cnf, new_cnf, func_idx
def pick_move(self, pheromone, dist, visited): pheromone = np.copy(pheromone) pheromone[list(visited)] = 0 row = pheromone**self.alpha * ((1.0 / dist)**self.beta) norm_row = row / row.sum() move = np_choice(self.all_inds, 1, p=norm_row)[0] return move
def add_node(self, trace, visibility, alpha, beta, q0, sensors): current_node = self.path[-1] next_nodes = self.next_nodes(current_node) if len(next_nodes) == 0: return False if random() < q0: best_node = None best_x = 0 for next_node in next_nodes: x = (trace[(sensors[current_node], sensors[next_node])] ** alpha) * \ (visibility[(sensors[current_node], sensors[next_node])] ** beta) if x > best_x: best_node = next_node best_x = x self.path.append(best_node) else: probability_distribution = [] sum_x = 0 for next_node in next_nodes: sum_x += (trace[(sensors[current_node], sensors[next_node])] ** alpha) * \ (visibility[(sensors[current_node], sensors[next_node])] ** beta) for next_node in next_nodes: probability_distribution.append( (trace[(sensors[current_node], sensors[next_node])]**alpha) * (visibility[(sensors[current_node], sensors[next_node])] **beta) / sum_x) draw = np_choice(next_nodes, 1, probability_distribution) self.path.append(draw[0]) return True
def gen_paths(self): length = len(self.distances_table) L = [] for k in range(0, length): L.append(k) ret=[] start=0 L.pop(0) ret=L paths=[] paths1=[] leng=len(ret) for k in range(0,self.ants_number): path=[] path.append(start) while ret!=[]: ind=np_choice(np.array(ret),1) indice=ret.index(ind) path.append(ret[indice]) ret.pop(indice) path.append(start) paths.append(path) for j in range (1,len(self.distances_table)): ret.append(j) for k in paths: path1=[] l=len(k) for j in range (0,l-1): kha=(k[j],k[j+1]) path1.append(kha) paths1.append(path1) print('the paths of the '+str(self.ants_number) +' that exist :') print(paths1) return paths1
def pick_next_path(self, prev, visited_edges): """ :param self: :param prev: :param visited_edges: :return: path given by np_choice Get all the neighbouring_unvisited_paths and take the most attractive """ paths = get_neighbouring_unvisited_paths(prev, visited_edges) if len(paths) == 0: return None if len(paths) == 1: return paths[0] proba_incoming = np.array([ path.pheromone**(self.alpha) * ((1.0 / path.length)**self.beta) for path in paths ]) s = proba_incoming.sum() proba = proba_incoming / s # for i in range(len(proba)): # for i in range(len(paths)): print(f'{i}: p = {proba[i]} {paths[i]}') choice = np_choice(len(paths), 1, p=proba)[0] res_path = paths[choice] print(f"Result path: ({choice}) {res_path}") return res_path
def sample(self): """Samples a random batch of experiences.""" all_weights = [e.weight for e in self.memory if e is not None] s = sum(all_weights) idxs = np_choice(len(self.memory), self.batch_size, p=[w / s for w in all_weights], replace=False) experiences = [self.memory[i] for i in idxs] # let's convert that into tensors, sent into the appropriate device. states = torch.from_numpy( np.vstack([e.state for e in experiences if e is not None])).float().to(self.device) actions = torch.from_numpy( np.vstack([e.action for e in experiences if e is not None])).float().to(self.device) rewards = torch.from_numpy( np.vstack([e.reward for e in experiences if e is not None])).float().to(self.device) dones = torch.from_numpy( np.vstack([e.done for e in experiences if e is not None])).float().to(self.device) next_states = torch.from_numpy( np.vstack([e.next_state for e in experiences if e is not None])).float().to(self.device) return (states, actions, rewards, next_states, dones)
def travel(self): """Makes ant travel to next vertex. Generates allowed moves and their probabilities. Makes choice. """ # Generate valid vertices. self.generate_allowed_moves() probabilities = np.array(list( map(self.get_probability, self.allowed_moves)), dtype='float64') self.validate_probabilities(probabilities) next_vertex = np_choice(self.allowed_moves, p=probabilities) '''Add next edge value to total cost. If previous value was bigger, then subtract the previous value, and add it 10 times bigger.''' if self.previous_vertex is not None: if self.aco.graph.matrix[self.previous_vertex, self.current_vertex] > \ self.aco.graph.matrix[self.current_vertex, next_vertex]: self.total_cost -= self.aco.graph.matrix[self.previous_vertex, self.current_vertex] self.total_cost += self.aco.graph.matrix[ self.previous_vertex, self.current_vertex] * 10 self.total_cost += self.aco.graph.matrix[self.current_vertex, next_vertex] # On next move ant can't go to previous and current. if self.previous_vertex is not None: self.tabu_moves.append(self.previous_vertex) self.tabu_moves.append(self.current_vertex) # Set a new current vertex, update previous. self.visited_vertices.append(next_vertex) self.previous_vertex = self.current_vertex self.current_vertex = next_vertex
def pick_move(self, pheromone, dist, visited): pheromone = np.copy(pheromone) pheromone[list( visited)] = 0 # посещенные города не должны посещаться снова row = pheromone**self.alpha * ((1.0 / dist)**self.beta) norm_row = row / row.sum() move = np_choice(self.all_inds, 1, p=norm_row)[0] return move
def _chose_path(self): dirs = self._labirynth.dir_list(self._pos) neigbour_fields = self._labirynth.neigbour_fields(self._pos) neigbour_fields_probability = [((self._labirynth._labirynth[pos.y()][pos.x( )]+1))/(self._labirynth.find_max_pheromon()+1) for pos in neigbour_fields] neigbour_fields_probability_with_adjusted_p = [ prob/sum(neigbour_fields_probability) for prob in neigbour_fields_probability] return np_choice(dirs, p=neigbour_fields_probability_with_adjusted_p)
def next(self, left): if left is None: return r_choice(tuple(self._fragments.keys())) probabilities = self.probabilities(left) rights = tuple(probabilities.keys()) rights_probabilities = tuple(map(lambda r: probabilities[r], rights)) return np_choice(rights, p=rights_probabilities)
def pickWeightedRand(self, sampleArray, weightsArray, pick_n, np=False): picks = [] if (np == True): picks = np_choice(sampleArray, pick_n, weightsArray) else: picks = r_choices(sampleArray, weightsArray, k=pick_n) return picks
def pickRand(self, pick_n, np=False): sampleArray = self.getArrayNums(); picks = []; if (np==True): picks = np_choice(sampleArray, pick_n) else: picks = r.choices(sampleArray, k=pick_n) picks.sort(); return picks;
def pick_move(self, pheromone, dist, visited): # here we use this line to avoid any change in the reference array pheromone = np.copy(pheromone) #to make a probabilty being zero for those nodes which is already visited pheromone[list(visited)] = 0 row = pheromone**self.alpha * ((1.0 / dist)**self.beta) norm_row = row / row.sum() # here we pick a random number from the nodes ,according to it's probapilities and using direct access as the choice function return list not element move = np_choice(self.all_inds, 1, p=norm_row)[0] return move
def get_weather(self): occurrence = np_choice(['rare', 'uncommon', 'common'], 1, p=[0.1, 0.3, 0.6])[0] terrain = choice(self.terrain) weather_options = [ weather for weather in terrain_tags[terrain]['weather'] if weather['occurrence'] == occurrence ] return choice(weather_options)
def genRobSol(self): while True: cmpltBool, updateRobLst = self._mpdaUpdater.updateState() # print(cmpltBool) if cmpltBool: break # print(updateRobLst) nextUpdateRobTaskPairLst = [] random.shuffle(updateRobLst) self.updateRobDic = dict() for robID in updateRobLst: self.updateRobDic[robID] = np.inf for robID in updateRobLst: rob = self.robotLst[robID] pheromoneLst = np.copy( self.taskPheromoneLst[robID][rob.taskID]) pheromoneLst[self.encode[robID]] = 0 row = np.zeros([self._taskNum]) for taskID, pheromone in enumerate(pheromoneLst): # print(pheromone) if pheromone == 0: continue else: eta = self.calHeuristic(robID, taskID, dummy=False) row[taskID] = pheromone**self.alpha * (eta**self.beta) row_sum = row.sum() if row_sum == 0: rob.stopBool = True break norm_row = row / row_sum next_taskID = np_choice(range(self._taskNum), 1, p=norm_row)[0] nextUpdateRobTaskPairLst.append( RobTaskPair(robID=robID, taskID=next_taskID)) self.updateRobDic[robID] = next_taskID # print(nextUpdateRobTaskPairLst) if len(nextUpdateRobTaskPairLst) == 0: continue if False not in self._mpdaUpdater.cmpltLst: break encode, fitness, nextUpdateRobTaskPairLst = self.fixSol( nextUpdateRobTaskPairLst, dummy=False) if fitness != 0: return encode, fitness, [] self._mpdaUpdater.updateEncode(nextUpdateRobTaskPairLst) # print(self.encode) fitness = self.calFitness() encode = self.encode arrCmpltTaskLst = self._mpdaUpdater._arrCmpltTaskLst if fitness == sys.float_info.max: pass return encode, fitness, arrCmpltTaskLst
def pickWeightedRand(self, pick_n, score_type="overall", np=False): sampleArray = self.getArrayNums(); weightsArray = self.getArrayScores(score_type) picks = []; if (np==True): picks = np_choice(sampleArray, pick_n, weightsArray) else: picks = r.choices(sampleArray, weightsArray, k=pick_n, replace=False) picks.sort(); return picks;
def pick_move(self, pheromone, dist, visited): pheromone = np.copy(pheromone) pheromone[list(visited)] = 0 # adjust pheromone level acc to weight row = pheromone ** self.alpha * ((1.0 / dist) ** self.beta) norm_row = row / row.sum() move = np_choice(self.all_inds, 1, p=norm_row)[ 0] # pick 1 random index for move return move
def pick_move(self, pheromone_exclude: np.ndarray, pheromone_include: np.ndarray, i: int) -> int: excluded = pheromone_exclude * (self.times_taken[0][i] / (self.n_ants * self.current_iteration)) included = pheromone_include * (self.times_taken[1][i] / (self.n_ants * self.current_iteration)) rows = np.array([excluded, included]) / (excluded + included) move = np_choice([0, 1], 1, p=rows)[0] return move
def _get_optimized_learning_step(self, partial_semantics): """Calculates optimized learning step.""" """ bootstrap samples; compute OLS for each; use desired criterion to select the final LS """ if self.bootstrap_ols: weights = [] size = self.target_vector.shape[0] for sample in range(self.bootstrap_ols_samples): idx = np_choice(arange(size), size, replace=True) bootstrap_delta_target = copy( self.target_vector[idx]).astype(float) if self.champion: full_predictions = self.champion.neural_network.get_predictions( ) bootstrap_delta_target -= full_predictions[idx] bootstrap_partial_semantics = partial_semantics[idx] inverse = array( pinv( resize(bootstrap_partial_semantics, (1, bootstrap_partial_semantics.size)))) ols = dot(inverse.transpose(), bootstrap_delta_target)[0] weights += [ols] ols_median = median(weights) ols_mean = mean(weights) ols = self._compute_ols(partial_semantics) abs_dif = abs(ols_median - ols_mean) if abs_dif >= self.high_absolute_ls_difference: self.high_absolute_differences_history.append( [abs_dif, ols_median, ols_mean, ols]) #=============================================================== # print('Absolute difference: %.3f, median vs. mean: %.3f vs. %.3f' % (abs_dif, ols_median, ols_mean)) #=============================================================== #=============================================================== # print('Absolute difference: %.3f, median vs. mean vs. OLS: %.3f vs. %.3f vs. %.3f' % (abs_dif, ols_median, ols_mean, ols)) # print() #=============================================================== if self.bootstrap_ols_criterion == 'median': return median(weights) else: return mean(weights) else: return self._compute_ols(partial_semantics)
def pick_move(self, pheromone, dist, visited): pheromone = np.copy(pheromone) # Sets pheromone of visited nodes to zero so the ants do not go backwards pheromone[list(visited)] = 0 # Careful with alpha and beta values. Large exponents may cause overflow or underflow (i.e. e+16 == inf and e-16 == 0) row = pheromone**self.alpha * ((1.0 / dist)**self.beta) # Ensures that we return to start_node after reaching an End of path if (row.sum() == 0): move = self.start_node return move norm_row = row / row.sum() # This returns 0 if all probabilities are zero (i.e. no more places to go) move = np_choice(self.all_inds, 1, p=norm_row)[0] return move
def sample_multicomb(seq, l): """ Sample uniformly from the set of all multisets of elements of seq with size l. Multisets are represented as sorted tuples. """ # sample a pattern according to the number of possible instantiations pats = compute_patterns(len(seq), l) ps = [p for p, n in pats] ns = [n for p, n in pats] divisor = sum(ns) ns = list(map(lambda x: x / divisor, ns)) draw = np_choice(range(len(ps)), 1, p=ns)[0] res_pat = ps[draw] # res_pat = random.choices(ps, weights=ns, k=1)[0] res = instantiate_pattern(seq, res_pat) return tuple(sorted(res))
def pick_next(self, prev_pheromone, prev_row, visited): """ :param prev_pheromone (1D array): Previous row of the matrix of pheromone. :param prev_row (1D array): Previous row of the matrix of distances. :param visited (set): Set of visited nodes. :return (int): Next node by using probability. """ row_pheromone = np.copy(prev_pheromone) row_pheromone[list(visited)] = 0 row = row_pheromone**self.alpha * ((1.0 / prev_row)**self.beta) norm_row = row / row.sum() next = np_choice(range(self.n), 1, p=norm_row)[0] return next
def pick_move(pheromone, dist, visited): pheromone = np.copy(pheromone) #Make zero if the path has been visited pheromone[list(visited)] = 0 #Ant makes a decision on what city to go using this formula row = pheromone**alpha * ((1.0 / dist)**beta) #Probability formula norm_row = row / row.sum() #Move randomly using probability (select path to go using probability) #p=probability #Get index of an element that has bigger probability move = np_choice(all_inds, 1, p=norm_row)[0] #print(move) #Return path that randomly selected return move
def genRobFirstActTrad(self): ''' is used to generate first event. :return the first event: ''' robInitVisitLst = [] robSeq = [x for x in range(self._robNum)] random.shuffle(robSeq) robInitVisitLst = [np.inf for x in range(self._robNum)] self.robInitVisitLst = [np.inf for x in range(self._robNum)] for robID in robSeq: row = np.zeros([self._taskNum]) for taskID, pheromone in enumerate( self.robTaskPheromoneLst[robID]): eta = self.calHeuristic(robID, taskID, dummy=True) row[taskID] = pheromone**self.alpha * (eta**self.beta) # roadDur = self._rob2taskDisMat[robID][taskID] / self._robVelLst[robID] row_sum = row.sum() norm_row = row / row_sum firstAct = np_choice(range(self._taskNum), 1, p=norm_row)[0] robInitVisitLst[robID] = firstAct self.robInitVisitLst[robID] = firstAct ''' 此处需要和后续的生成方法一致 ''' # print('robInitVisitLst = ',robInitVisitLst) nextUpdateRobTaskPairLst = [] for robID, taskID in enumerate(robInitVisitLst): nextUpdateRobTaskPairLst.append(RobTaskPair(robID, taskID)) encode, fitness, nextUpdateRobTaskPairLst = self.fixSol( nextUpdateRobTaskPairLst, dummy=True) robInitVisitLst = [x.taskID for x in nextUpdateRobTaskPairLst] # print('robInitVisitLst = ',robInitVisitLst) return robInitVisitLst
def updatingLimited(self, nextUpdateRobTaskPairLst): updateRobLst = [] for robID, taskID in nextUpdateRobTaskPairLst: updateRobLst.append(robID) curAccRobAbiLst = [0 for x in self._taskRateLst] for robID in range(self._robNum): if robID in updateRobLst: nextVisitTaskID = nextUpdateRobTaskPairLst[updateRobLst.index( robID)].taskID curAccRobAbiLst[nextVisitTaskID] = curAccRobAbiLst[ nextVisitTaskID] + self._robAbiLst[robID] else: nextVisitTaskID = self._mpdaUpdater.encode[robID][-1] curAccRobAbiLst[nextVisitTaskID] = curAccRobAbiLst[ nextVisitTaskID] + self._robAbiLst[robID] curTaskRateLst = [] for taskID in range(self._taskNum): if curAccRobAbiLst[taskID] == 0: continue if self._mpdaUpdater.cmpltLst[taskID]: continue curTaskRateLst.append( (taskID, self._taskRateLst[taskID] - curAccRobAbiLst[taskID])) # print('length curTaskRateLst = ',len(curTaskRateLst)) if len(curTaskRateLst) <= self.limitedNum: return nextUpdateRobTaskPairLst minTaskID, minCurTaskRate = min(curTaskRateLst, key=lambda x: x[1]) limitedSet = set() for robID in range(self._robNum): if robID in updateRobLst: pass else: taskID = self.encode[robID][-1] if self._mpdaUpdater.cmpltLst[taskID] == False: limitedSet.add(taskID) newVisitedTaskLst = [] for robID, taskID in nextUpdateRobTaskPairLst: if taskID not in limitedSet: newVisitedTaskLst.append(taskID) # print(limitedSet) # print(newVisitedTaskLst) ''' 此处可以修正 如何将 newVisitedTaskLst 的元素加入 limitedSet 中 ''' random.shuffle(newVisitedTaskLst) # print(newVisitedTaskLst) while len(limitedSet) < self.limitedNum: limitedSet.add(newVisitedTaskLst[0]) newVisitedTaskLst.remove(newVisitedTaskLst[0]) # print(limitedSet) nextUpdateRobTaskPairLst = [] for robID in updateRobLst: rob = self.robotLst[robID] pheromoneLst = np.copy(self.taskPheromoneLst[robID][rob.taskID]) pheromoneLst[self.encode[robID]] = 0 row = np.zeros([self._taskNum]) for taskID, pheromone in enumerate(pheromoneLst): # print(pheromone) if taskID not in limitedSet: continue if pheromone == 0: continue else: eta = self.calHeuristic(robID, taskID, dummy=False) roadDur = self._taskDisMat[ rob.taskID][taskID] / self._robVelLst[robID] predictArrTime = self.robotLst[robID].leaveTime + roadDur if predictArrTime > self.taskLst[taskID].cmpltTime: continue row[taskID] = pheromone**self.alpha * (eta**self.beta) row_sum = row.sum() if row_sum == 0: rob.stopBool = True break norm_row = row / row_sum # print(norm_row) next_taskID = np_choice(range(self._taskNum), 1, p=norm_row)[0] nextUpdateRobTaskPairLst.append( RobTaskPair(robID=robID, taskID=next_taskID)) return nextUpdateRobTaskPairLst
def play(self, board, v=0): # print "PLAYING" chosen = None if v: print "playing..." if board.pos['turn'] == "player_1": playable = [1, 2, 3, 4, 5, 6] else: playable = [8, 9, 10, 11, 12, 13] playable = filter(lambda bin: board.pos[bin], playable) if len(playable) == 1: return playable[0] elif playable: odds = [] for bin in playable: b_board = Board() b_board.pos = dict(board.pos) b_board.play(bin) pos = [b_board.pos[i] for i in range(14)] if board.pos['turn'] == "player_1": p = [0, 1] else: # board.pos['turn'] == "player_2" p = [1, 0] # odds of winning - odds of losing pos = array(pos).reshape(1, -1) # print self.model.predict_proba(pos) my_odds = self.model.predict_proba(pos)[0][p[0]] opponents_odds = self.model.predict_proba(pos)[0][p[1]] if opponents_odds > 0.0: win_odds = my_odds / opponents_odds elif (my_odds - opponents_odds) <= 0.0: win_odds = 0.0 else: win_odds = my_odds - opponents_odds odds.append(win_odds) # normalize odds to be from 0 to 1 min_, max_ = min(odds), max(odds) odds = map(lambda x: scale(x, (min_, max_), ( 0.00, 1.0, )), odds) # so their sum == 1 raw_sum = sum(odds) weights = [(float(i) / float(raw_sum)) for i in odds] # weighted choice if not self.monte_carlo: chosen = np_choice(playable, p=weights) # TODO: add monte carlo choice # Monte Carlo playouts if self.monte_carlo: # print "monte_carlo", self.v end = time() + self.time_per_move monte_counts = defaultdict(list) mc_count = 0 while time() < end: mc_count += 1 # make a monte_dojo with save=False and p1=LearnerBot(model=self.model, monte_carlo=False) and # p2=LearnerBot(model=self.model, monte_carlo=False) p1 = LearnerBot(model=self.model, monte_carlo=False) p2 = LearnerBot(model=self.model, monte_carlo=False) monte_dojo = Dojo(p1=p1, p2=p2, csv_name="raw_games.csv", save=False) # replace monte_dojo.board.pos with dict(board.pos) monte_dojo.board.pos = dict(board.pos) # choose a move and play it out # print weights chosen = np_choice(playable, p=weights) monte_dojo.board.play(chosen) # have dojo finish the game monte_dojo.play_game() # determine winner with monte_dojo.board.pos[0] and monte_dojo.board.pos[7] if monte_dojo.board.pos[0] < monte_dojo.board.pos[7]: winner = 'player_1' elif monte_dojo.board.pos[0] > monte_dojo.board.pos[7]: winner = 'player_2' else: winner = "tie" # add winner to monte_weights[chosen] monte_counts[chosen].append(winner) # player = board.pos['turn'] # which is 'player_1' or 'player_2' or None monte_weights = dict(monte_counts) player = board.pos['turn'] opponent = list({'player_1', 'player_2'} - {player})[0] for bin, winner_list in monte_weights.iteritems(): opponent_wins = float(winner_list.count(opponent)) my_wins = float(winner_list.count(player)) if opponent_wins: monte_weights[bin] = my_wins / opponent_wins else: monte_weights[bin] = None max_odds = max(monte_weights.values()) for bin, odds in monte_weights.iteritems(): if odds == None: monte_weights[bin] = max_odds if set(monte_weights.values()) == {None}: for bin, odds in monte_weights.iteritems(): if odds == None: monte_weights[bin] = 1.0 # convert monte_weights from dict(list) to list(float) of wins-losses possible_bins = monte_weights.keys() weight_list = monte_weights.values() # normalize odds to be from 0 to 1 min_, max_ = min(weight_list), max(weight_list) if min_ == max_: min_ = max_ - 1 odds = map(lambda x: scale(x, (min_, max_), ( 0.0, 1.0, )), weight_list) # so their sum == 1 raw_sum = sum(odds) weights = [(float(i) / float(raw_sum)) for i in odds] # chosen = np_choice(playable, p=weights) chosen = np_choice(possible_bins, p=weights) if self.v: print mc_count, 'game playouts' return chosen else: return None if v: print chosen return chosen
def get_word(word_list): """ Choosing a random word from a given list """ choice = np_choice(word_list) return choice, len(choice)
def do_run (run, trials): #present instructions #if fMRI, wait for scanner trigger instruct1.draw() win.flip() logging.data('** START RUN %s **' % specific_run) event.waitKeys(keyList=('space')) instruct2.draw() win.flip() event.waitKeys(keyList=('space')) keyPress.draw() win.flip() event.waitKeys(keyList=('space')) instruct3.draw() win.flip() event.waitKeys(keyList=('space')) if run == 1: ITI = ITI_1 sampling_durList = sampling_dur1 if run == 2: ITI = ITI_2 sampling_durList = sampling_dur2 for trial in trials: #add trial logic: show stimuli, get resp, add data to 'trial' idx = trials.thisIndex sampling_dur = sampling_durList[idx] ITItime=ITI[idx] core.checkPygletDuringWait = False ### RANDOMIZATION #also it should be noted that in the fmriProblemSet_20180802 file I was dumb and the OUTCOME and PROBABILITIES have different naming conventions SMH. They are re-streamlined here. #as of 20190114 I have renamed the PROBABILITIES naming conventions so that they match with the OUTCOME. prob1_1 = float(trial['P1_1']) #probability1 for option1 prob2_1 = float(trial['P2_1']) #probability2 for option1 prob1_2 = float(trial['P1_2']) #probability1 for option2 prob2_2 = float(trial['P2_2']) #probability2 for option2 if '-' not in trial['O1_1'] and trial['O1_1'] != '0': #O1_1 O1_1 = '+%s' % (trial['O1_1']) else: O1_1 = trial['O1_1'] if '-' not in trial['O1_2'] and trial['O1_2'] != '0': #O1_2 O1_2 = '+%s' % (trial['O1_2']) else: O1_2 = trial['O1_2'] if '-' not in trial['O2_1'] and trial['O2_1'] != '0': #O2_1 O2_1 = '+%s' % (trial['O2_1']) else: O2_1 = trial['O2_1'] if '-' not in trial['O2_2'] and trial['O2_2'] != '0': #O2_2 O2_2 = '+%s' % (trial['O2_2']) else: O2_2 = trial['O2_2'] Prob1 = { 'p': [prob1_1, prob2_1], 'out': [O1_1, O2_1] } Prob2 = { 'p': [prob1_2, prob2_2], 'out': [O1_2, O2_2] } sampling_RT_list = [] sampling_resp_list = [] sampling_lr_list= [] sampling_outcome_list = [] sampling_respOnset_list = [] samplingCount = 0 safeCount = 0 riskyCount = 0 switchCount = 0 leftRight = random.randint(0,1) #if 0, P1 is on the left, P2 is on the right trialStartTime = globalClock.getTime() trials.addData('trialStartTime', trialStartTime) logging.data('Trial Onset - ProblemType: %s, ProblemNumber: %s, leftRight: %s, samplingDur: %s, assignedITI: %s' % (trial['ProbType'], trial['ProbNumber'], leftRight, sampling_dur, ITItime)) timer.reset() #https://github.com/alishir/IGT_net/blob/master/igt_psychtoolbox/igt_mri.m logging.data('Sampling Onset') while timer.getTime() < sampling_dur: samplingOnset = timer.getTime() #SET PROBABILITIES FOR EACH SUBTRIAL draw1 = np_choice(a=Prob1['out'], size=1, p=Prob1['p']) draw2 = np_choice(a=Prob2['out'], size=1, p=Prob2['p']) box1.draw() box2.draw() press1.draw() press2.draw() win.flip() if DEBUG: resp_val = random.randint(1,2) rt_onset = globalClock.getTime() logging.data('DEBUG MODE Sampled Response: %s' % resp_val) core.wait(.5) else: resp = event.getKeys(keyList = responseKeys, timeStamped=globalClock) resp_val = None prev_resp = None #prev_rt = None if len(resp) > 0: resp_val = int(resp[0][0]) rt_onset = resp[0][1] sampling_respOnset_list.append(rt_onset) if samplingCount == 0: #and idx == 0: sampling_RT_list.append(rt_onset-trialStartTime) else: sampling_RT_list.append(rt_onset-prev_rt) if samplingCount != 0: #won't have a previous response for the first sample prev_resp = sampling_resp_list[-1] prev_rt = sampling_respOnset_list[-1] #for adding in SwitchCount later #save prev_response #if resp_val != prev_resp: #switchCount += 1 #make new response replace previous response if resp_val == 1: samplingCount += 1 #logic to decide which outcome to display if leftRight == 0: #if 0, P1 is on the left, P2 is on the right out1.setText(draw1[0]) response = 1 else: #if 1, P1 is on the right, P2 is on the left out1.setText(draw2[0]) response = 2 if response == int(trial['SafeOption']): safeCount += 1 else: riskyCount += 1 if prev_resp and response != prev_resp: #won't have a previous response for the first sample switchCount += 1 box1.draw() box2.draw() press1.draw() press2.draw() out1.draw() win.flip() core.wait(feedback_dur) logging.data('Sampled Response: %s, Outcome Shown: %s' % (response, draw1[0])) sampling_resp_list.append(response) sampling_lr_list.append(leftRight) sampling_outcome_list.append(draw1[0]) if resp_val == 2: samplingCount += 1 if leftRight == 0: out2.setText(draw2[0]) response = 2 else: out2.setText(draw1[0]) response = 1 if response == int(trial['SafeOption']): safeCount += 1 else: riskyCount += 1 if prev_resp and response != prev_resp: #won't have a previous response for the first sample switchCount += 1 box1.draw() box2.draw() press1.draw() press2.draw() out2.draw() win.flip() core.wait(feedback_dur) logging.data('Sampled Response: %s, Outcome Shown: %s' % (response, draw2[0])) sampling_resp_list.append(response) sampling_lr_list.append(leftRight) sampling_outcome_list.append(draw2[0]) event.clearEvents() if timer.getTime() >= sampling_dur-.5 : logging.data('** broke sampling loop (<.5s left in trial) **') break for idx in range(samplingCount): sampling_file.writerow([ trials.thisIndex+1, sampling_resp_list[idx], sampling_lr_list[idx], sampling_outcome_list[idx], sampling_RT_list[idx], sampling_respOnset_list[idx]]) samplingEndTime = globalClock.getTime() sampling_pad = sampling_dur-timer.getTime() trials.addData('samplingCount_total', samplingCount) #jk #I starting samplingCount at 1 to use it as an index for correctly calculating response time. We subtract that here to log it accurately. trials.addData('samplingCount_risky', riskyCount) trials.addData('samplingCount_safe', safeCount) trials.addData('switchCount', switchCount) trials.addData('samplingDur_assigned', sampling_dur) trials.addData('samplingDur_total', samplingEndTime-trialStartTime) trials.addData('samplingEndTime', samplingEndTime) timer.reset() box1.setLineColor('white') box2.setLineColor('white') thinkOnset = globalClock.getTime() logging.data('samplingCount: %s' % samplingCount) logging.data('Think Onset') trials.addData('thinkOnset', thinkOnset) while timer.getTime() < think_dur: box1.draw() box2.draw() think.draw() win.flip() box1.draw() box2.draw() press1.draw() press2.draw() decide.draw() win.flip() timer.reset() event.clearEvents() answer = 0 response = [] decide_onset = globalClock.getTime() logging.data('Decide Onset') trials.addData('decideOnset', decide_onset) timer.reset() while timer.getTime() < decision_dur: if DEBUG: resp = [1] resp_val = random.randint(1,2) logging.data('DEBUG MODE Decision Response: %s' % resp_val) core.wait(.5) else: resp = event.getKeys(keyList = responseKeys) if len(resp) > 0: resp_onset = globalClock.getTime() rt = resp_onset-decide_onset answer=1 if not DEBUG: resp_val = int(resp[0]) logging.data('Decision Button Press: %s' % resp) if resp_val == 1: box1.setLineColor('red') if leftRight == 0: response = 1 #response is 1 for P1/option1 and 2 for P2/option2 always else: response = 2 if resp_val == 2: box2.setLineColor('red') if leftRight == 0: response = 2 #response is 1 for P1/option1 and 2 for P2/option2 always else: response = 1 if response == int(trial['SafeOption']): safeChoice = 0 #safeChoice is 0 if the safe choice was chosen, 1 if the risky option was chosen else: safeChoice = 1 logging.data('Decision Response: %s' % response) box1.draw() box2.draw() press1.draw() press2.draw() win.flip() core.wait(feedback_dur) decide_pad = decision_dur-rt-.3 decideDur_total = resp_onset-trialStartTime+.3 break if answer == 0: response = 'NA' resp_val = 'NA' resp_onset = 'NA' rt = 'NA' decide_pad = 'NA' safeChoice = 'NA' decideDur_total = decision_dur trials.addData('leftRight', leftRight) trials.addData('resp', response) trials.addData('safeChoice', safeChoice) #0 if safe, 1 if risky trials.addData('resp_onset', resp_onset) trials.addData('rt', rt) trials.addData('decideDur_total', decideDur_total) #reset box colors, reset outcomes box1.setLineColor('white') box2.setLineColor('white') out1.setText() out2.setText() timer.reset() #ITI ITI_onset=globalClock.getTime() logging.data('ITI Onset') if answer == 0: totalITI = ITItime+sampling_pad else: totalITI = ITItime+decide_pad+sampling_pad while timer.getTime() < totalITI: fixation.draw() win.flip() trials.addData('assignedITI', ITItime) trials.addData('sampling_pad', sampling_pad) trials.addData('decide_pad', decide_pad) trials.addData('totalITI', totalITI) trials.addData('ITI_onset', ITI_onset) trialEndTime = globalClock.getTime() trials.addData('TrialEndTime', trialEndTime) logging.data('Trial End Time - totalITI: %s' % totalITI) timer.reset() event.clearEvents() trials.saveAsWideText(fileName=log_file.format(subj_id, subj_id, run), delim=',', appendFile=True) logging.data('*****END RUN %s*****' % specific_run)