def __depth_limited__maxValue(self, current_node, alpha, beta, maxDepth): parent_node = copy.deepcopy(current_node) v = -infinity track_of_child_nodes = [] for j in range(0, 7, 1): current_state = self.playPiece(j) if current_state != None: # print j track_of_child_nodes.append(self.gameBoard) self.gameBoard = copy.deepcopy(parent_node) if track_of_child_nodes == [] or maxDepth == 0: self.countScore1(self.gameBoard) return self.__eval_function(self.gameBoard) else: for child in track_of_child_nodes: self.gameBoard = copy.deepcopy(child) v = max( v, self.__depth_limited_minValue(child, alpha, beta, maxDepth - 1)) if v >= beta: return v alpha = max(alpha, v) return v
def test_tracking_step_energy_offset(lattice, parametr, update_ref_values=False): """Tracking step function test :parametr=0 - tracking with default MethodTM() :parametr=1 - tracking without MethodTM() - global_method = SecondTM """ p = Particle(x=0.0, p=0.02) navi = Navigator(lattice[parametr]) dz = 0.01 P = [copy.deepcopy(p)] n_end = int(lattice[parametr].totalLen / dz) for iii in range(n_end): tracking_step(lattice[parametr], [p], dz=dz, navi=navi) P.append(copy.deepcopy(p)) P = obj2dict(P) if update_ref_values: return P p_ref = json_read(REF_RES_DIR + sys._getframe().f_code.co_name + str(parametr) + '.json') result = check_dict(P, p_ref, TOL, 'absotute', assert_info=' P - ') assert check_result(result)
def alpha_beta_decision(self, current_node): current_state = copy.deepcopy(current_node) for i in range(0, 6, 1): if self.playPiece(i) != None: # print self.gameBoard if self.pieceCount == 42: self.gameBoard = copy.deepcopy(current_state) return i # return np.argmax(score_list) else: #print "this is the child" #print i # print self.gameBoard # print "first tree" # print self.gameBoard v = self.minValue(self.gameBoard, -infinity, infinity) utility_list[i] = v self.gameBoard = copy.deepcopy(current_state) #return max(utility_list, key=utility_list.get) max_util_value = max([i for i in utility_list.values()]) #print utility_list for i in range(0, 6, 1): if i in utility_list: if utility_list[i] == max_util_value: return i
def minValue(self, current_node, alpha, beta): parent_node = copy.deepcopy(current_node) if self.currentTurn == 1: opponent = 2 elif self.currentTurn == 2: opponent = 1 v = infinity track_of_child_nodes = [] for j in range(0, 6, 1): current_state = self.checkPiece(j, opponent) if current_state != None: track_of_child_nodes.append(self.gameBoard) self.gameBoard = copy.deepcopy(parent_node) if track_of_child_nodes == []: self.countScore1(self.gameBoard) return self.player1Score - self.player2Score else: for child in track_of_child_nodes: self.gameBoard = copy.deepcopy(child) v = min(v, self.maxValue(child, alpha, beta)) if v <= alpha: return v beta = min(beta, v) return v
def depth_limited_minValue(self, current_node, alpha, beta, maxDepth): small_node = 0 small_node = current_node maxval = 9999 minval = -9999 # makes a deep copy to check against them parent_node = copy.deepcopy(current_node) if self.currentTurn == 1: opponent = 2 elif self.currentTurn == 2: opponent = 1 v = infinity track_of_child_nodes = [] for j in range(0, 7, 1): current_state = self.checkPiece(j, opponent) if current_state != None: track_of_child_nodes.append(self.gameBoard) self.gameBoard = copy.deepcopy(parent_node) if track_of_child_nodes == [] or maxDepth == 0: # print "this is the final score for minimum" self.countScore1(self.gameBoard) return self.eval_function(self.gameBoard) else: for child in track_of_child_nodes: self.gameBoard = copy.deepcopy(child) v = min( v, self.depth_limited_maxValue(child, alpha, beta, maxDepth - 1)) if v <= alpha: return v beta = min(beta, v) return v
def maxValue(self, current_node, alpha, beta): parent_node = copy.deepcopy(current_node) v = -infinity track_of_child_nodes = [] for j in range(0, 6, 1): current_state = self.playPiece(j) if current_state != None: # print j track_of_child_nodes.append(self.gameBoard) self.gameBoard = copy.deepcopy(parent_node) if track_of_child_nodes == []: # print "This is the score for maximum" # print self.player1Score # score = self.eval_function(self,self.gameBoard) self.countScore1(self.gameBoard) return self.player1Score - self.player2Score else: max_score_list = [] for child in track_of_child_nodes: # print self.pieceCount # print "This is for Max Player" # print child self.gameBoard = copy.deepcopy(child) v = max(v, self.minValue(child, alpha, beta)) if v >= beta: return v alpha = max(alpha, v) # print "Value of v from max" # print v # max_score_list.append(v) return v
def minValue(self, current_node, alpha, beta): parent_node = copy.deepcopy(current_node) if self.currentTurn == 1: opponent = 2 elif self.currentTurn == 2: opponent = 1 # if self.pieceCount == 42: # return self.player1Score v = infinity track_of_child_nodes = [] for j in range(0, 6, 1): current_state = self.checkPiece(j, opponent) if current_state != None: track_of_child_nodes.append(self.gameBoard) self.gameBoard = copy.deepcopy(parent_node) if track_of_child_nodes == []: # print "this is the final score for minimum" self.countScore1(self.gameBoard) return self.player1Score - self.player2Score else: for child in track_of_child_nodes: # print self.pieceCount # print "This is for opponent" # print child self.gameBoard = copy.deepcopy(child) v = min(v, self.maxValue(child, alpha, beta)) if v <= alpha: return v # print "this is the value of v which will be sent" beta = min(beta, v) # print v return v
def __depth_limited_minValue(self, current_node, alpha, beta, maxDepth): parent_node = copy.deepcopy(current_node) if self.currentTurn == 1: opponent = 2 elif self.currentTurn == 2: opponent = 1 v = infinity track_of_child_nodes = [] for j in range(0, 7, 1): current_state = self.__checkPiece(j, opponent) if current_state != None: track_of_child_nodes.append(self.gameBoard) self.gameBoard = copy.deepcopy(parent_node) if track_of_child_nodes == [] or maxDepth == 0: # print "this is the final score for minimum" self.countScore1(self.gameBoard) return self.__eval_function(self.gameBoard) else: for child in track_of_child_nodes: self.gameBoard = copy.deepcopy(child) v = min( v, self.__depth_limited__maxValue(child, alpha, beta, maxDepth - 1)) if v <= alpha: return v # print "this is the value of v which will be sent" beta = min(beta, v) return v
def depth_limited_alpha_beta_pruning(self, current_node, maxDepth): #print current_node #print "This was current node" current_state = copy.deepcopy(current_node) #print current_state #print "this was current state" for i in range(0, 7, 1): if self.playPiece(i) != None: # print self.gameBoard if self.pieceCount == 42 or maxDepth == 0: self.gameBoard = copy.deepcopy(current_state) return i # return np.argmax(score_list) else: print "this is the child" print i # print self.gameBoard # print "first tree" # print self.gameBoard v = self.depth_limited_minValue(self.gameBoard, -infinity, infinity, maxDepth - 1) utility_list[i] = v self.gameBoard = copy.deepcopy(current_state) print utility_list #return max(utility_list, key=utility_list.get) max_util_value = max([i for i in utility_list.values()]) #print max_util_value for i in range(0, 7, 1): if i in utility_list: if utility_list[i] == max_util_value: utility_list.clear() print i return i
def __deepcopy__(self, memo=None): if memo is None: memo = {} result = self.__class__() memo[id(self)] = result for key, value in dict.items(self): dict.__setitem__(result, copy.deepcopy(key, memo), copy.deepcopy(value, memo)) return result
def lonSplit(self, lon=0): """returns 2 bounding boxes from the current one split at a longitude """ if lon < self.w or self.e < lon: raise RuntimeError( "Splitting longitude not within the bounding box") left = copy.deepcopy(self) left.e = lon right = copy.deepcopy(self) right.w = lon return left, right
def timeSplit(self, t=None): """return 2 bounding boxes split up at a certain time point""" if not t: t = self.ts + (self.te - self.ts) / 2 if t < self.ts or self.te < t: raise RuntimeError("Splitting Time dimension not within timerange") before = copy.deepcopy(self) before.te = t after = copy.deepcopy(self) after.ts = t return before, after
def bfs_search(state, dfs_route): stateQueue = PriorityQueue() if fit_regular(state) == False: return None new_state = copy.deepcopy(state) new_state.sol_matrix = np.where(new_state.sol_matrix > 0, True, False) if checkSolution(new_state): return new_state stateQueue.put((len((np.where(state.isfilled_matrix == False))[0]), state)) while not stateQueue.empty(): notfilled_num, state = stateQueue.get() row_branch = None for row_positons in dfs_route: if False in state.isfilled_matrix[row_positons[1]]: state.isfilled_matrix[row_positons[1]] = True row_branch = row_positons break if row_branch == None: continue for row in row_branch[2]: # Cut useless branch continue_flag = False for idx, val in enumerate(state.sol_matrix[row_branch[1]]): if val != 0 and val != row[idx]: continue_flag = True break if continue_flag == True: continue new_state = copy.deepcopy(state) new_state.sol_matrix[row_branch[1]] = row if fit_regular(new_state) == True: notfilled_num = len( (np.where(new_state.isfilled_matrix == False))[0]) if settings.DEBUGG: print notfilled_num, " size: ", stateQueue.qsize() if notfilled_num == 0: new_state.sol_matrix = np.where(new_state.sol_matrix > 0, True, False) if checkSolution(new_state): del stateQueue return new_state else: # print new_state.sol_matrix if settings.DEBUGG: print "error" del new_state continue stateQueue.put((notfilled_num, new_state)) return None
def dfs_search(state, d, dfs_route): while True: old_sol_matrix = copy.deepcopy(state.sol_matrix) old_row_run_matrix = copy.deepcopy(state.row_run_matrix) old_col_run_matrix = copy.deepcopy(state.col_run_matrix) try: ret = regular(state) if ret[0] == False: # print ret[1] # print state.sol_matrix return None except: return None if (old_sol_matrix == state.sol_matrix).all() and (old_row_run_matrix == state.row_run_matrix).all() and \ (old_col_run_matrix == state.col_run_matrix).all(): break if len(np.where(state.isfilled_matrix == False)[0]) == 0: state.sol_matrix = np.where(state.sol_matrix > 0, True, False) if checkSolution(state): return state else: return None else: row_branch = None for row_positons in dfs_route: if False in state.isfilled_matrix[row_positons[1]]: state.isfilled_matrix[row_positons[1]] = True row_branch = row_positons break if row_branch == None: return None for row in row_branch[2]: # Cut useless branch continue_flag = False for idx, val in enumerate(state.sol_matrix[row_branch[1]]): if val != 0 and val != row[idx]: continue_flag = True break if continue_flag == True: continue next_state = copy.deepcopy(state) next_state.sol_matrix[row_branch[1]] = row ret_state = dfs_search(next_state, d + 1, dfs_route) if ret_state != None: return ret_state return None
def cross_over(self, instance_a, instance_b, point=0.5, rate=0.8): point = random.random() if random.random() > rate: return instance_a, instance_b instance_a_cpy = copy.deepcopy(instance_a) instance_b_cpy = copy.deepcopy(instance_b) loc = int(self.size_of_slices * point) instance_a_cpy.genes[: loc], instance_b_cpy.genes[: loc] = instance_b_cpy.genes[: loc], instance_a_cpy.genes[: loc] return instance_a_cpy, instance_b_cpy
def update_population(self): # self.next_generation = copy.deepcopy(self.population.individuals) + copy.deepcopy(self.next_generation) self.next_generation.sort(reverse=True) keep_pop = int(self.pop_size * 0.3) rand_pop = int(self.pop_size * 0.3) next_pop = self.pop_size - keep_pop - rand_pop - 2 # best_of_the_bests and all_1 is also included random.shuffle(self.population.individuals) keep_gen = self.population.individuals[:keep_pop] rand_ind = Population(self.size_of_slices, rand_pop) rand_ind.generate_popluation() rand_gen = rand_ind.individuals[:] for i in rand_ind.individuals: i.calc_fitness(self.slices, self.max_slices) all_1 = Population(self.size_of_slices, 1) all_1.generate_popluation(options='1') all_1_gen = all_1.individuals[:] all_1_gen[0].calc_fitness(self.slices, self.max_slices) next_gen = self.next_generation[:next_pop] self.population.individuals = [copy.deepcopy( self.best_of_the_bests)] + keep_gen[:] + next_gen[:] + all_1_gen[:] for j in range(len(self.population.individuals) - 1, -1, -1): if self.population.individuals[j].fitness[1] < 0: t_i = self.reduce_individual(copy.deepcopy( self.population.individuals[j]), random.random(), option=1) self.population.individuals.append(t_i) t_i = self.reduce_individual(copy.deepcopy( self.population.individuals[j]), random.random(), option=0) self.population.individuals.append(t_i) t_i_2 = self.mutate(self.population.individuals[j], chance=0.05) t_i_2.calc_fitness(self.slices, self.max_slices) self.population.individuals.append(t_i_2) self.population.individuals += rand_gen[:] self.population.individuals = self.population.individuals[:self. pop_size]
def next_pes_packet(filepath, pes_id): '''Given the path to a .TS file, generate a series of PES packet structures, given an PES id ''' pes_packet = None pes = [] for packet in next_ts_packet(infilename): #always process timestamp info, regardless of PID if packet.adapatation_field() and packet.adapatation_field().PCR(): current_timestamp = packet.adapatation_field().PCR() initial_timestamp = initial_timestamp or current_timestamp delta = current_timestamp - initial_timestamp elapsed_time_s = float(delta) / 90000.0 #print('{i} {c} {d} {s}'.format(i=initial_timestamp, c=current_timestamp, d=delta, s=elapsed_time_s)) #if this is the stream PID we're interestd in, reconstruct the ES if packet.pid() == pes_id: print('packet of interest.') if packet.payload_start(): #print('Payload start:' + str(packet.payload_start())) pes = copy.deepcopy(packet.payload()) #print('initial length of packet payload is {l}'.format(l=len(pes))) else: #print('Packet continued.') #print('length of ts packet payload is {l}'.format(l=len(packet.payload()))) pes.extend(packet.payload()) #print('Current length of packet payload is {l}'.format(l=len(pes))) pes_packet = PESPacket(pes) #print 'Pes packet length: {p} and header size {h} and payload length: {l}'.format(p=pes_packet._pes_packet_length, h=pes_packet.header_size(), l=len(pes_packet._payload)) if pes_packet.length() == (pes_packet.header_size() + pes_packet.payload_size()): yield pes_packet
def result(oldGame, column): newGame = maxConnect4Game() try: newGame.nodeDepth = oldGame.nodeDepth + 1 except AttributeError: newGame.nodeDepth = 1 newGame.pieceCount = oldGame.pieceCount newGame.gameBoard = copy.deepcopy(oldGame.gameBoard) if not newGame.gameBoard[0][column]: for i in range(5, -1, -1): if not newGame.gameBoard[i][column]: newGame.gameBoard[i][column] = oldGame.currentTurn newGame.pieceCount += 1 break if oldGame.currentTurn == 1: newGame.currentTurn = 2 elif oldGame.currentTurn == 2: newGame.currentTurn = 1 newGame.checkPieceCount() newGame.countScore() return newGame
def mutate(self, individual, chance=0.015): individual_tmp = copy.deepcopy(individual) for i in range(individual_tmp.length): ch = random.random() if ch <= chance: individual_tmp.genes[i] ^= 1 return individual_tmp
def createGameforSearch(currGame, column): newGame = maxConnect4Game() try: newGame.nodeDepth = currGame.nodeDepth + 1 except AttributeError: newGame.nodeDepth = 1 newGame.pieceCount = currGame.pieceCount newGame.gameBoard = copy.deepcopy(currGame.gameBoard) if not newGame.gameBoard[0][column]: for iRow in range(5, -1, -1): if not newGame.gameBoard[iRow][column]: newGame.gameBoard[iRow][column] = currGame.currentTurn newGame.pieceCount += 1 break if currGame.currentTurn == 1: newGame.currentTurn = 2 elif currGame.currentTurn == 2: newGame.currentTurn = 1 newGame.checkPieceCount() newGame.countScore() return newGame
def test_copy_2(): import copy ensure_tainted( copy.copy(TAINTED_LIST), copy.deepcopy(TAINTED_LIST), )
def preprocess(self, data): """ splits the routes of mould parts (design + mould) """ orders = data["input"]["BOM"]["orders"] stations = data["input"]["BOM"]["stations"] for order in orders: orderComponents = order.get("componentsList", []) componentsToAdd = [] for index, component in enumerate(orderComponents): route = component.get("route", []) design_step_list = [] # for each step of the components route find out if it is of a design route (ENG - CAD) or of mould route (ASSM-INJM). If the route contains none of these technology-types steps then the component is normal routeList = copy.deepcopy(route) i = 0 for step in routeList: stepTechnology = step.get('technology', []) assert stepTechnology in ROUTE_STEPS_SET, 'the technology provided does not exist' if stepTechnology in DESIGN_ROUTE_STEPS_SET: design_step_list.append(step) route.pop(i) else: i += 1 if design_step_list: design = { "componentName": component.get("componentName", "") + "_Design", "componentID": component.get("componentID", "") + "_D", "quantity": component.get("quantity", 1), "route": design_step_list } componentsToAdd.append(design) for design in componentsToAdd: orderComponents.append(design) return data
def remove_epsilon(self): new_edges = [] for node in self.nodes: visited = {node_: False for node_ in self.nodes} queue = [node] while queue: current = queue.pop(0) if current in self.final: self.final.add(node) if current in self.edges: if current != node: for (letter, edge) in self.edges[current].items(): if letter != EPSILON: new_edges.append((node, (letter, copy.deepcopy(edge)))) if EPSILON in self.edges[current]: for new_node in self.edges[current][EPSILON]: if not visited[new_node]: queue.append(new_node) visited[new_node] = True for (out, (letter, to)) in new_edges: self._add_edge(out, letter, to) for all_edges in self.edges.values(): all_edges.pop(EPSILON, None) if EPSILON in self.alphabet: self.alphabet.remove(EPSILON)
def make_tex(self, filename): subprocess.run('if ! [ -d out ]; then mkdir ./out; fi', shell=True, check=True) self.prepared_files.append(filename) tex_text = '' with open('tex/start', 'r') as begin_file: tex_text += begin_file.read() + '\n' sort_nodes = list(copy.deepcopy(self.nodes)) sort_nodes.sort() nodes_num = dict() for num, node in enumerate(sort_nodes): nodes_num.update({node: num}) tex_text += self._make_tex_node(node, num) tex_text += '\n\\path\n' new_E = other_edges(self.edges) for (from_, to_), letters in new_E.items(): tex_text += '({})'.format(from_) + '\n' for index, letter in enumerate(letters): tex_text += self._make_tex_edge(letter, nodes_num[from_], index, from_, to_) tex_text += ';\n' with open('tex/end', 'r') as end_file: tex_text += end_file.read() with open('out/{}.tex'.format(filename), 'w') as out_file: out_file.write(tex_text)
def next_pes_packet(filepath, pes_id): '''Given the path to a .TS file, generate a series of PES packet structures, given an PES id ''' pes_packet = None pes = [] for packet in next_ts_packet(infilename): #always process timestamp info, regardless of PID if packet.adapatation_field() and packet.adapatation_field().PCR(): current_timestamp = packet.adapatation_field().PCR() initial_timestamp = initial_timestamp or current_timestamp delta = current_timestamp - initial_timestamp elapsed_time_s = float(delta)/90000.0 #print('{i} {c} {d} {s}'.format(i=initial_timestamp, c=current_timestamp, d=delta, s=elapsed_time_s)) #if this is the stream PID we're interestd in, reconstruct the ES if packet.pid() == pes_id: print('packet of interest.') if packet.payload_start(): #print('Payload start:' + str(packet.payload_start())) pes = copy.deepcopy(packet.payload()) #print('initial length of packet payload is {l}'.format(l=len(pes))) else: #print('Packet continued.') #print('length of ts packet payload is {l}'.format(l=len(packet.payload()))) pes.extend(packet.payload()) #print('Current length of packet payload is {l}'.format(l=len(pes))) pes_packet = PESPacket(pes) #print 'Pes packet length: {p} and header size {h} and payload length: {l}'.format(p=pes_packet._pes_packet_length, h=pes_packet.header_size(), l=len(pes_packet._payload)) if pes_packet.length() == (pes_packet.header_size()+pes_packet.payload_size()): yield pes_packet
def __getitem__(self, key): self._key = copy.deepcopy(key) if self._encoder is not None: return self._encode_key(key) elif not isinstance(key, Hashable): key = tuple(key) return super(Dict, self).__getitem__(key)
def _clear_edges(self): for node, edges in copy.deepcopy(self.edges).items(): for letter, set_ in copy.copy(edges).items(): if not set_: del self.edges[node][letter] if not edges: del self.edges[node]
def check_true_false(knowledgebase, statement, model): symbols = [] negation = '(not ' + statement + ')' # Convert statement into a negative logical stament and verify it is valid statement = read_expression(statement) if not valid_expression(statement): sys.exit('invalid statement') # Show us what the statement is print '\nChecking statement: ', print_expression(statement, '') print #getting symbols. symbols = get_symbols(symbols, knowledgebase, statement) #elemenate the duplicates symbols = list(set(symbols)) symbols = copy.deepcopy(symbols) for i in model.keys(): symbols.remove(i) negative_statement = read_expression(negation, [0]) #check ttentail for both statement and its conjugate statement1 = check_tt_entail(knowledgebase, statement, symbols, model) statement2 = check_tt_entail(knowledgebase, negative_statement, symbols, model) # get equivalent output string result = get_result_statement(statement1, statement2) # write it into result file print(result) write_result(result)
def trim_intro(mid): """ Many MIDI files begin with a count-in on a drum. Some might even begin with some silence. Given a L{midi.EventStream}, returns a version with any drum intro or silence trimmed from the beginning, so that the first thing heard is a non-drum note. It is assumed that this is General MIDI data, so channel 10 is necessarily a drum channel. """ from midi import EndOfTrackEvent import copy mid = copy.deepcopy(mid) first_note = None events = iter(sorted(mid.trackpool)) # Find the time of the first played note that's not a drum note while first_note is None: ev = events.next() if isinstance(ev, NoteOnEvent) and ev.channel != 9: first_note = ev.tick # Shift everything backwards to_remove = [] intro_events = [] intro_length = 0 for track in mid.tracklist.values(): # Keep all the events in the right order, but remove as much # empty time as possible for ev in track: intro_times = {} if ev.tick < first_note: # Remove all note-ons and -offs if isinstance(ev, (NoteOnEvent,NoteOffEvent)): to_remove.append(ev) else: # Put the rest as close to the beginning as possible intro_times.setdefault(ev.tick, []).append(ev) intro_events.append(id(ev)) # Give each distinct time only a single midi tick for tick,(old_tick, pre_events) in enumerate(intro_times.items()): for ev in pre_events: ev.tick = tick intro_length = max(intro_length, len(intro_times)) # Now get rid of the pre-start notes for ev in to_remove: mid.remove_event_instance(ev) # Shift everything back as for as we can shift = first_note - intro_length for ev in mid.trackpool: if id(ev) not in intro_events: old_time = ev.tick ev.tick -= shift return mid
def init_mcmc(name, state_0, r_start, r_end, *params, **kwargs): import copy extra = copy.deepcopy(kwargs) mcmc = {'name': name} # Name- also name of file # Constatns mcmc['accept_hat'] = 0.23 mcmc['sigma'] = 100 mcmc['state_0'] = state_0 mcmc['start'] = r_start mcmc['end'] = r_end mcmc['active_params'] = np.array(extra.get('active_params', np.arange(6))) apsl = mcmc['active_params'] mcmc['names'] = extra.get('names', ['omega', 'phi', 'rho', 'f1', 'f2', 'f3', 'e']) # mcmc['names'] = np.array(mcmc['names'])[apsl] # Initial Values mcmc['values'] = np.array(params)[apsl] # CURRENT values ######################################## # vals = np.array(params) # vals[apsl] = mcmc['values'].copy() print(mcmc['values'], params) M_now, M2_now, state_z = run_model(state_0, r_start, r_end, *params) mcmc['y_now_M'] = M_now.copy() # [None, :, :] # CURRENT values ######################################## mcmc['y2_now_M'] = M2_now.copy() # [None, :, :] # CURRENT values ######################################## mcmc['active'] = True # Model Soecific mcmc['d'] = len(apsl) # len(mcmc['names']) # mcmc['cov'] = np.diag((1/10, np.pi / 10, 0.5, 0.000005, 0.000005, 0.000005, 0.25 / 10)) # mcmc['cov'] /= 100 mcmc['scaling_factor'] = np.array([2.4 / np.sqrt(mcmc['d'])]) # Chains and Metrics mcmc['y_hat_M'] = mcmc['y_now_M'].copy()[None, :, :] # CHAIN: y values for proposed set (shape like data points) mcmc['y2_hat_M'] = mcmc['y2_now_M'].copy() # CHAIN: y values for proposed set (shape like data points) # mcmc['y_hat_Y'] = mcmc['y_now_Y'].copy() # CHAIN: y values for proposed set (shape like data points) mcmc['chain'] = mcmc['values'].copy() # CHAIN: proposed set mcmc['guesses'] = mcmc['values'].copy() # CHAIN: proposed set mcmc['accepted'] = np.array([1]) mcmc['rates'] = np.array([-1]) mcmc['max_likelihood'] = -np.inf mcmc['gelman_rubin'] = np.zeros(mcmc['d']) mcmc['change'] = np.array([-1]) mcmc['initial_guess'] = np.array(params) mcmc['tally'] = 0 mcmc = {**mcmc, **extra} # After Uniting # State Z mcmc['state_z'] = state_z # ll_now = log_liklihood(mcmc['y_now_M'], mcmc['datax'], mcmc['sigma']) ll_now = log_liklihood(M_now, mcmc['datay1'], mcmc['sigma'], noise=150) ll_now += log_liklihood(M2_now, mcmc['datay2'], mcmc['sigma2'], noise=300) mcmc['ll'] = np.array([-np.inf, ll_now]) # SD mcmc['cov'] = mcmc['cov'][apsl, :][:, apsl] mcmc['sd'] = mcmc['scaling_factor'] ** 2 * mcmc['cov'] return mcmc
def trim_intro(mid): """ Many MIDI files begin with a count-in on a drum. Some might even begin with some silence. Given a L{midi.EventStream}, returns a version with any drum intro or silence trimmed from the beginning, so that the first thing heard is a non-drum note. It is assumed that this is General MIDI data, so channel 10 is necessarily a drum channel. """ from midi import EndOfTrackEvent import copy mid = copy.deepcopy(mid) first_note = None events = iter(sorted(mid.trackpool)) # Find the time of the first played note that's not a drum note while first_note is None: ev = events.next() if isinstance(ev, NoteOnEvent) and ev.channel != 9: first_note = ev.tick # Shift everything backwards to_remove = [] intro_events = [] intro_length = 0 for track in mid.tracklist.values(): # Keep all the events in the right order, but remove as much # empty time as possible for ev in track: intro_times = {} if ev.tick < first_note: # Remove all note-ons and -offs if isinstance(ev, (NoteOnEvent, NoteOffEvent)): to_remove.append(ev) else: # Put the rest as close to the beginning as possible intro_times.setdefault(ev.tick, []).append(ev) intro_events.append(id(ev)) # Give each distinct time only a single midi tick for tick, (old_tick, pre_events) in enumerate(intro_times.items()): for ev in pre_events: ev.tick = tick intro_length = max(intro_length, len(intro_times)) # Now get rid of the pre-start notes for ev in to_remove: mid.remove_event_instance(ev) # Shift everything back as for as we can shift = first_note - intro_length for ev in mid.trackpool: if id(ev) not in intro_events: old_time = ev.tick ev.tick -= shift return mid
def all_possible_turns(self): # print("Called all_possible_turns") turn_list = [] moves = self.all_possible_moves() for move in moves: next_turn = Turn(move) if not move.eat: # print("Loc 3") turn_list.append(next_turn) else: next_move = copy.deepcopy(move) board = copy.deepcopy(self) board.move_piece(next_move) turn_list.extend(board.all_continuation_seq(next_turn)) # print(*turn_list,sep="\n") # print("All possible turns exits") return turn_list
def create_changed_data( data, idx_atribut, idx_changed_label ): #menghasilkan data baru yang sudah diganti atributnya (splitting) data_temp = copy.deepcopy(data) change_continuous_values(dataframe, data_temp, idx_atribut, idx_changed_label) return data_temp
def depthlimitedwithallchharacteristics(currentnode, child_node, parent_node): small_node = 0 small_node = current_node maxval = 9999 minval = -9999 parent_node = copy.deepcopy(current_node) if self.currentTurn == 1: opponent = 2 elif self.currentTurn == 2: opponent = 1 v = infinity track_of_child_nodes = [] for j in range(0, 7, 1): current_state = self.checkPiece(j, opponent) if current_state != None: track_of_child_nodes.append(self.gameBoard) self.gameBoard = copy.deepcopy(parent_node) # return the value of this function depth limited max value return v # Depth limited Search def depth_limited_alpha_beta_pruning(self, current_node, maxDepth): current_state = copy.deepcopy(current_node) for i in range(0, 7, 1): if self.playPiece(i) != None: # print self.gameBoard if self.pieceCount == 42 or maxDepth == 0: self.gameBoard = copy.deepcopy(current_state) return i else: v = self.depth_limited_minValue( self.gameBoard, -infinity, infinity, maxDepth - 1) utility_list[i] = v self.gameBoard = copy.deepcopy(current_state) max_util_value = max([i for i in utility_list.values()]) for i in range(0, 7, 1): if i in utility_list: if utility_list[i] == max_util_value: utility_list.clear() return i
def copy(self): """ Copies the BeliefState by recursively deep-copying all of its parts. Domains are not copied, as they do not change during the interpretation or generation. """ copied = BeliefState(self.__dict__['referential_domain']) for key in ['environment_variables', 'deferred_effects', 'pos', 'p']: copied.__dict__[key] = copy.deepcopy(self.__dict__[key]) return copied
def plot_hddm_fit(self, range_=(-1., 1.), bins=150., plot_fit=True): import hddm import copy x = np.linspace(range_[0], range_[1], 200) # Plot parameters for i,test_param in enumerate(self.depends): #print test_param plt.figure() for j, dep_val in enumerate(self.x): param_vals = {} tag = "%s('%s_%.4f',)" %(test_param, self.condition, dep_val) dep_tag = '%s_%.4f'%(self.condition, dep_val) plt.subplot(3,3,j+1) data = self.hddm_data[(self.hddm_data['dependent']==dep_tag) & (self.hddm_data.inhibited == False) & (self.hddm_data.ss_presented == False)] data = hddm.utils.flip_errors(data) data_ss = self.hddm_data[(self.hddm_data['dependent']==dep_tag) & (self.hddm_data.inhibited == False) & (self.hddm_data.ss_presented == True)] data_ss = hddm.utils.flip_errors(data_ss) # Plot histogram hist = hddm.utils.histogram(data['rt'], bins=bins, range=range_, density=True)[0] plt.plot(np.linspace(range_[0], range_[1], bins), hist) # Plot histogram of stop-errors hist_ss = hddm.utils.histogram(data_ss['rt'], bins=bins, range=range_, density=True)[0] plt.plot(np.linspace(range_[0], range_[1], bins), hist_ss) # Plot fit if plot_fit: fitted_params = copy.deepcopy(self.stats_dict[test_param]) for param in self.depends: if param == test_param: param_vals[param] = fitted_params[tag]['mean'] else: param_name = param# +'_group' param_vals[param] = fitted_params[param_name]['mean'] #fit = hddm.likelihoods.wfpt_switch.pdf(x, param_vals['vpp'], param_vals['vcc'], param_vals['Vcc'], param_vals['a'], .5, param_vals['t'], param_vals['tcc'], param_vals['T']) #plt.plot(x, fit) plt.title(tag) if not plot_fit: # Bail, no need to replot the same thing self.save_plot('hddm_fit') return self.save_plot('hddm_fit_%s'%test_param)
def main(): import copy options, args = get_options() # load the building blocks enumerator = BuildingBlockEnumerator(options.bblocks_dir, options.subsets_file) ## would be cool: # enumerator.enum_template("D* (R0 R0) A*", options) ## but we can do better: enumerator.enum_smarter(options) good2 = copy.deepcopy(enumerator.good_strs) print "Found %d valid structures" % (len(good2)) for g in good2: print g
def DFADecompile(self, dc): ''' Decompiles code in all basic blocks, but doesn't do any structuring. @param dc: decompiler. ''' import copy visited = set() DFAStack = [(self.root, [])] while len(DFAStack): (n, DecompileStack) = DFAStack.pop() if n not in visited: visited.add(n) res = dc.codeDecompile(self.nodes[n].offset, self.nodes[n].length, stack=DecompileStack, mode='conditional') (self.nodes[n].code, self.nodes[n].condition) = res for e in self.nodes[n].outgoing: DFAStack.append((e.toNode, copy.deepcopy(DecompileStack)))
def preprocess(self, data): """ splits the routes of mould parts (design + mould) and update the _class attribute of the entities """ orders = data["input"]["BOM"]["productionOrders"] for order in orders: orderComponents = order.get("componentsList", []) componentsToAdd = [] for index, component in enumerate(orderComponents): route = component.get("route", []) design_step_list = [] # for each step of the components route find out if it is of a design route (ENG - CAD) or of mould route (ASSM-INJM). If the route contains none of these technology-types steps then the component is normal routeList = copy.deepcopy(route) i = 0 # figure out which steps are design steps for step in routeList: stepTechnology = step.get('technology',[]) assert stepTechnology in self.ROUTE_STEPS_SET, 'the technology provided does not exist' if stepTechnology in self.DESIGN_ROUTE_STEPS_SET: design_step_list.append(step) route.pop(i) else: i+=1 # if the current entity is a mold-design then create the design and update the _class attribute of the mold if design_step_list: design = {"name": component.get("name","")+"_Design", "id": component.get("id","")+"_D", "quantity": component.get("quantity", 1), "route": design_step_list, "_class": "Dream.OrderDesign"} componentsToAdd.append(design) """the current component is a mold""" component["_class"] = "Dream.Mould" # XXX hard-coded value # otherwise we have a normal component, update the _class attribute accordingly else: component["class"] = "Dream.OrderComponent" # XXX hard-coded value for design in componentsToAdd: orderComponents.append(design) return data
def evaluation_pdf(request, app_id): application = get_object_or_404(Application, id=app_id) evaluation, created = Evaluation.objects.get_or_create(application=application) try: from reportlab.pdfgen import canvas from reportlab.lib.pagesizes import letter from reportlab.lib.styles import getSampleStyleSheet from reportlab.platypus import Paragraph, Image, Spacer, SimpleDocTemplate from reportlab.lib import enums import settings, copy from datetime import date except: return HttpResponse("Missing library") width, pageHeight = letter response = HttpResponse(mimetype='application/pdf') response['Content-Disposition'] = "attachment; filename=application_%s.pdf" % app_id stylesheet = getSampleStyleSheet() normalStyle = copy.deepcopy(stylesheet['Normal']) normalStyle.fontName = 'Times-Roman' normalStyle.fontSize = 12 normalStyle.leading = 15 centreAlign = copy.deepcopy(normalStyle) centreAlign.alignment = enums.TA_CENTER rightAlign = copy.deepcopy(normalStyle) rightAlign.alignment = enums.TA_RIGHT lindent = copy.deepcopy(normalStyle) lindent.leftIndent = 12 h1 = copy.deepcopy(normalStyle) h1.fontName = 'Times-Bold' h1.fontSize = 18 h1.leading = 22 h1.backColor = '#d0d0d0' h1.borderPadding = 3 h1.spaceBefore = 3 h1.spaceAfter = 3 h2 = copy.deepcopy(normalStyle) h2.fontName = 'Times-Bold' h2.fontSize = 14 h2.leading = 18 h2.backColor = '#e8e8e8' h2.borderPadding = 3 h2.spaceBefore = 3 h2.spaceAfter = 3 page = SimpleDocTemplate(response, pagesize=letter, title="EWB application") p = [] p.append(Paragraph("<strong><em>PRIVATE AND CONFIDENTIAL</em></strong>", centreAlign)) p.append(Spacer(0, 10)) p.append(Paragraph("Engineers Without Borders Canada", normalStyle)) p.append(Spacer(0, 6)) p.append(Paragraph("<strong>%s</strong>" % application.session.name, normalStyle)) p.append(Spacer(0, -40)) img = Image(settings.MEDIA_ROOT + '/images/emaillogo.jpg', 100, 51) img.hAlign = 'RIGHT' p.append(img) #p.line(50, height - 90, width - 50, height - 90) p.append(Spacer(0, 10)) p.append(Paragraph("Application for", normalStyle)) p.append(Spacer(0, 5)) parsed_name = '' if application.profile.first_name: parsed_name = application.profile.first_name if application.profile.first_name and application.profile.last_name: parsed_name = parsed_name + ' ' if application.profile.last_name: parsed_name = parsed_name + application.profile.last_name p.append(Paragraph(parsed_name, h1)) p.append(Paragraph("<strong>Submitted " + str(application.updated.date()) + "</strong>", normalStyle)) p.append(Spacer(0, -13)) p.append(Paragraph("Printed: " + str(date.today()), rightAlign)) p.append(Spacer(0, 14)) p.append(Paragraph("<strong>English language</strong> Reading: %d Writing: %d Speaking %d" % (application.en_reading, application.en_writing, application.en_speaking), normalStyle)) p.append(Paragraph("<strong>French language</strong> Reading: %d Writing: %d Speaking %d" % (application.fr_reading, application.fr_writing, application.fr_speaking), normalStyle)) #p.append(Paragraph("<strong>GPA </strong> %d" % application.gpa, normalStyle)) p.append(Spacer(0, 20)) p.append(Paragraph("Resume", h2)) try: p.append(Paragraph(application.resume_text.replace("<br>", "<br/>").replace("</p>", "<br/><br/>"), lindent)) except: p.append(Paragraph(strip_tags(application.resume_text).replace("\n", "<br/>"), lindent)) p.append(Spacer(0, 20)) p.append(Paragraph("References", h2)) try: p.append(Paragraph(application.references.replace("<br>", "<br/>").replace("</p>", "<br/><br/>"), lindent)) except: p.append(Paragraph(strip_tags(application.resume_text).replace("\n", "<br/>"), lindent)) p.append(Spacer(0, 20)) p.append(Paragraph("Application Questions", h2)) for question in application.session.application_questions(): p.append(Paragraph("<strong>%s</strong>" % question.question, normalStyle)) answer = Answer.objects.filter(application=application, question=question) if answer: p.append(Paragraph(answer[0].answer.replace("\n", "<br/>"), lindent)) else: p.append(Paragraph("<em>No answer</em>", lindent)) p.append(Spacer(0, 20)) """ for m in activity.get_metrics(): metricname = '' for mshort, mlong in ALLMETRICS: if mshort == m.metricname: metricname = mlong p.append(Paragraph(metricname, h2)) for x, y in m.get_values().items(): if x and y: p.append(Paragraph(fix_encoding(str(x)), bold)) p.append(Paragraph(fix_encoding(str(y)), lindent)) p.append(Spacer(0, 10)) p.append(Spacer(0, 10)) """ page.build(p) return response
def verifyWeeksByValuesAll(self,df2): try: df2.insert(df2.keys().get_loc("lifted_gallons_Weekly"),"WeeksByLiftedGallons",0) df2.insert(df2.keys().get_loc("lifted_gallons_Weekly"),"lifted_gallonsWeekly_modified_aposteriori",df2["lifted_gallons_Daily"].values) df2.insert(df2.keys().get_loc("lifted_gallons_Weekly"),"Modified_LiftedGallonsaposteriori",0) df2.insert(df2.keys().get_loc("lifted_gallons_Daily"),"lifted_gallons_modified_WeeksByLiftedGallons",df2["lifted_gallons_Daily"].values) weeksByValues=[] x=df2["lifted_gallons_Weekly"] for i in x.keys()[:-1]: if x[i]>x[i+1]: weeksByValues.append(i) ct=1 temp=[] wkz=[] for i in x.keys()[:-1]: if x[i]>x[i+1]: temp.append(i) print i,ct wkz.append(copy.deepcopy(temp)) print temp temp=[] ct=ct+1 else: temp.append(i) print i,ct if not(x[i]>x[i+1]): print i+1,ct temp.append(i+1) wkz.append(copy.deepcopy(temp)) print temp else: wkz.append([i+1]) for i in range(len(wkz)): self.df2.loc[wkz[i],"WeeksByLiftedGallons"]='w'+str(i+1) print df2.loc[:,"WeeksByLiftedGallons"] grpNRD=df2.groupby(["WeeksByLiftedGallons"])["lifted_gallons_Weekly","lifted_gallons_Daily"] df2.insert(df2.keys().get_loc("lifted_gallons_modified_WeeksByLiftedGallons"),"Modified_WeeksByLiftedGallons",0) firstNRD=self.df2["WeeksByLiftedGallons"].iloc[0] for key,grp in grpNRD: if grp["lifted_gallons_Daily"].cumsum()[-1]==grp["lifted_gallons_Weekly"][-1]: print "****Valid Week***\n",grp.loc[:,["WeeksByLiftedGallons","lifted_gallons_Daily","lifted_gallons_Weekly"]] validLiftedGallons=df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_modified_WeeksByLiftedGallons"].cumsum()==df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_Weekly"] df2.loc[df2["WeeksByLiftedGallons"]==key,"sanityWeekly_CumulativeDaily_WeeksByLiftedGallons"]=validLiftedGallons else: print "****Invalid Week****\n",grp.loc[:,["WeeksByLiftedGallons","lifted_gallons_Daily","lifted_gallons_Weekly"]] for i in range(len(grp)): if i!=0: if grp.ix[i-1]["lifted_gallons_Weekly"] != 0 and grp.ix[i]["lifted_gallons_Weekly"] != 0: if df2.loc[str(grp.ix[i]['date']),"lifted_gallons_Daily"] != (grp.ix[i]["lifted_gallons_Weekly"]-grp.ix[i-1]["lifted_gallons_Weekly"]): df2.loc[str(grp.ix[i]['date']),"lifted_gallons_modified_WeeksByLiftedGallons"]=grp.ix[i]["lifted_gallons_Weekly"]-grp.ix[i-1]["lifted_gallons_Weekly"] df2.loc[str(grp.ix[i]['date']),"Modified_WeeksByLiftedGallons"]=1 # print i,grp.ix[i+1]["lifted_gallons_Weekly"]-grp.ix[i]["lifted_gallons_Weekly"] else: if grp.ix[i]["lifted_gallons_Weekly"] == 0 and grp.ix[i]["lifted_gallons_Daily"]==0 and i!=(len(grp)-1): self.computeMissingLiftedWeeklyAndDailyOneDayWeekValue(df2,grp,i) else: print "No Enough Data available to Fill Lifted Gallons" else: # opening balance if key==firstNRD and firstNRD!=0 : nrdGroups=self.df2["WeeksByLiftedGallons"].value_counts() if nrdGroups[firstNRD]!=7: openingBalances=self.verifyOpeningBalance() if openingBalances !={}: if openingBalances['lifted_weekly'] != 0 and grp.ix[i]["lifted_gallons_Weekly"] != 0: if df2.loc[str(grp.ix[i]['date']),"lifted_gallons_Daily"] != grp.ix[i]["lifted_gallons_Weekly"]-openingBalances['lifted_weekly']: modifiedLGD=grp.ix[i]["lifted_gallons_Weekly"]-openingBalances['lifted_weekly'] df2.loc[str(grp.ix[i]['date']),"lifted_gallons_modified_WeeksByLiftedGallons"]=modifiedLGD df2.loc[str(grp.ix[i]['date']),"Modified_WeeksByLiftedGallons"]=3 elif df2.loc[str(grp.ix[i]['date']),"lifted_gallons_Daily"] != grp.ix[i]["lifted_gallons_Weekly"]: df2.loc[str(grp.ix[i]['date']),"lifted_gallons_modified_WeeksByLiftedGallons"]= grp.ix[i]["lifted_gallons_Weekly"] df2.loc[str(grp.ix[i]['date']),"Modified_WeeksByLiftedGallons"]=1 elif df2.loc[str(grp.ix[i]['date']),"lifted_gallons_Daily"] != grp.ix[i]["lifted_gallons_Weekly"]: df2.loc[str(grp.ix[i]['date']),"lifted_gallons_modified_WeeksByLiftedGallons"]= grp.ix[i]["lifted_gallons_Weekly"] df2.loc[str(grp.ix[i]['date']),"Modified_WeeksByLiftedGallons"]=1 if key==firstNRD and nrdGroups[firstNRD]!=7: currentMonthCumLGD=df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_modified_WeeksByLiftedGallons"].cumsum()[-1] presentMonthLastWeeklyLifted=df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_Weekly"][-1] if (openingBalances['lifted_weekly']+currentMonthCumLGD)==presentMonthLastWeeklyLifted: print "Modification_Successful" else: print "Modification_UnSuccessful" cumSum=df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_modified_WeeksByLiftedGallons"].cumsum() cumSum=cumSum.apply(lambda x:x+openingBalances['lifted_weekly']) validLiftedGallons=cumSum==df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_Weekly"] df2.loc[df2["WeeksByLiftedGallons"]==key,"sanityWeekly_CumulativeDaily_WeeksByLiftedGallons"]=validLiftedGallons else: if df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_modified_WeeksByLiftedGallons"].cumsum()[-1]==df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_Weekly"][-1]: print "Modification_Successful" else: print "Modification_UnSuccessful" validLiftedGallons=df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_modified_WeeksByLiftedGallons"].cumsum()==df2.loc[df2["WeeksByLiftedGallons"]==key,"lifted_gallons_Weekly"] df2.loc[df2["WeeksByLiftedGallons"]==key,"sanityWeekly_CumulativeDaily_WeeksByLiftedGallons"]=validLiftedGallons except Exception as e: savepath=self.savepath+"\\"+"Exception" if not os.path.exists(savepath): os.makedirs(savepath) file=open(savepath+"\\"+self.fileName+".txt","w") file.write("Exception in Combination:"+"\ncustomer : "+self.customer+"\nSupplier : "+self.supplier+"\nAccount : "+self.account+"\nTerminal: "+self.terminal+"\nProduct : "+self.product+"\nException : "+str(e)) file.close() print "Exception in Combination:",self.customer,self.supplier,self.account,self.terminal,self.product,e return 0
def preprocess(self, data): """ merge the steps that constitute one single technology step """ orders = data["input"]["BOM"]["productionOrders"] # for all the orders for order in orders: orderComponents = order.get("componentsList", []) # for each component of the order for component in orderComponents: route = component.get("route", []) updatedRoute = [] technologySequence = [] for index, step in enumerate(route): technology = step["technology"] technology = technology.split("-")[0] # XXX loadType is always manual for this pilot step["operationType"] = {"Load" : 1} '''processingType + operator for processing''' # if the operator is defined as automatic or # XXX if the technology is EDM # then set the processing type as automatic and remove the operator property if step["operator"] == "Automatic"\ or step["technology"] == "EDM": step["operationType"]["Processing"] = 0 step.pop("operator") else: step["operationType"]["Processing"] = 1 tempOperator = copy.deepcopy(step["operator"]) step["operator"] = {} step["operator"]["processing"] = [tempOperator] step["operator"]["load"] = [tempOperator] '''find out if there is there is any previous step to merge with''' idxToMerge = None if technologySequence: if technology == technologySequence[-1]: if len(route[index-1]["technology"].split("-")): if route[index-1]["technology"].split("-")[-1]=="SET": idxToMerge = index-1 else: idxToMerge = None '''if we must merge two steps''' if idxToMerge != None: # remove the previous step from the updatedRoute and technologySequence updatedRoute.pop(-1) technologySequence.pop(-1) stepToMerge = route[idxToMerge] # parts needed step["partsneeded"] = stepToMerge["partsneeded"] # technology step["technology"] = technology # setupTime if stepToMerge["processingTime"]: step["setupTime"] = stepToMerge["processingTime"] # setupType + operator for setup if stepToMerge["operator"] == "Automatic": step["operationType"]["Setup"] = 0 else: step["operationType"]["Setup"] = 1 try: tempOperator = copy.deepcopy(step["operator"]) except: tempOperator = None step["operator"] = {} step["operator"]["setup"] = route[idxToMerge]["operator"]["processing"] step["operator"]["load"] = route[idxToMerge]["operator"]["load"] if tempOperator: step["operator"]["processing"] = tempOperator["processing"] technologySequence.append(technology) # append the (updated) step to the temporary route updatedRoute.append(step) # update the route of the step component["route"] = updatedRoute return data
def runRules(self,suppliersCombinations=[]): try: self.get_maxbatch_analysis_pivot() if suppliersCombinations==[]: suppliersCombinations=self.fetchSupplierCombi() if len(suppliersCombinations)==0: raise ValueError("No Combinations found to execute!") for supplierInfo in suppliersCombinations.values: details=[self.customer,self.supplier,supplierInfo[0],supplierInfo[1],supplierInfo[2],self.month,self.analysisDate,self.db,self.savelocation,copy.deepcopy(self.mp)] supplierRule=rules(details) frame=supplierRule.executeRules() if type(frame) != int: self.frames.append(frame) self.createPivotAll(frame) if len(self.pivotFrames)>0: resultNew=pd.concat(self.pivotFrames) resultNew.to_excel(self.savepath+self.supplier+"_"+self.month+"_reconciledPivotAll.xls") result=pd.concat(self.frames) db = MySQLdb.connect(self.dbcon[0],self.dbcon[1],self.dbcon[2],self.dbcon[3]) result.to_sql(name=self.customer+"_"+self.supplier+"_"+self.month+"_reconciled",con=db,flavor='mysql', if_exists='replace') # resultNew.to_sql(name=self.customer+"_"+self.supplier+"_reconciledPivot",con=db,flavor='mysql', if_exists='replace') db.close() indexCol=[u'date', u'account_type', u'supplier_terminal_name', u'product_name','lifted_gallons_modified_WeeksByLiftedGallons','lifted_gallons_daily_flag','lifted_gallons_daily_modified', "lifted_gallons_Daily",'lifted_gallons_weekly_flag',"lifted_gallons_Weekly", "Lifted_actual_weekly",'lifted_gallons_monthly_flag',"lifted_gallons_Monthly", "Lifted_actual_monthly", 'WeeksByLiftedGallons','Week_switch', "base_gallons_Daily", "base_gallons_Monthly", "base_gallons_Weekly", 'Modified_WeeksByLiftedGallons', "beginning_gallons_Daily", "beginning_gallons_Monthly", "beginning_gallons_Weekly", "en_allocation_status_Daily", "en_allocation_status_Monthly", "en_allocation_status_Weekly", "percentage_allocation_Daily", "percentage_allocation_Monthly", "percentage_allocation_Weekly", "alerts_ratability_Daily", "alerts_ratability_Monthly", "alerts_ratability_Weekly", "next_refresh_date_Daily", "next_refresh_date_Monthly", 'Modified_NRD', "next_refresh_date_Weekly", 'sanityWeekly_CumulativeDaily_WeeksByLiftedGallons','sanityWeekly_CumulativeDaily_NextRefreshDate','sanityMonthly_CumulativeDaily_WeeksByLiftedGallons','sanityMonthly_CumulativeDaily_WeeksByNextRefreshDate'] columnHeader=[u'date', u'account_type', u'supplier_terminal_name', u'product_name','Lifted_mod_daily_weekly_value','lifted_gallons_daily_flag','Lifted_mod_daily_nextrefresh','Lifted_actual_daily','lifted_gallons_weekly_flag','lifted_gallons_weekly_modified','Lifted_actual_weekly','lifted_gallons_weekly_flag','lifted_gallons_monthly_modified','Lifted_actual_monthly','Week_structure_weekly_value','Week_structure_next_refresh', "base_gallons_Daily", "base_gallons_Monthly", "base_gallons_Weekly", 'daily_weekValue_flag', "beginning_gallons_Daily", "beginning_gallons_Monthly", "beginning_gallons_Weekly", "en_allocation_status_Daily", "en_allocation_status_Monthly", "en_allocation_status_Weekly", "percentage_allocation_Daily", "percentage_allocation_Monthly", "percentage_allocation_Weekly", "alerts_ratability_Daily", "alerts_ratability_Monthly", "alerts_ratability_Weekly", "next_refresh_date_Daily", "next_refresh_date_Monthly", 'Modified_NRD', "next_refresh_date_Weekly", 'sanityWeekly_CumulativeDaily_WeeksByLiftedGallons','sanityWeekly_CumulativeDaily_NextRefreshDate','sanityMonthly_CumulativeDaily_WeeksByLiftedGallons','sanityMonthly_CumulativeDaily_WeeksByNextRefreshDate'] result.to_excel(self.savepath+self.supplier+"_"+self.month+"_reconciled.xls",columns=indexCol,header=columnHeader) except Exception as e: print "Exception:",e
def __deepcopy__(self, memo): result = super(forms.ChoiceField, self).__deepcopy__(memo) if hasattr(self, "_choices"): result._choices = copy.deepcopy(self._choices, memo) return result
def extendModel(model,symbol,value): newModel = copy.deepcopy(model) newModel[symbol] = value return newModel
def simplify(stream, remove_drums=False, remove_pc=False, remove_all_text=False, one_track=False, remove_tempo=False, remove_control=False, one_channel=False, remove_misc_control=False, real_note_offs=False, remove_duplicates=False): """ Filters a midi L{midi.EventStream} to simplify it. This is useful as a preprocessing step before taking midi input to an algorithm, for example, to make it clearer what the algorithm is using. Use kwargs to determine what filters will be applied. Without any kwargs, the stream will just be left as it was. Returns a filtered copy of the stream. @type remove_drums: bool @param remove_drums: filter out all channel 10 events @type remove_pc: bool @param remove_pc: filter out all program change events @type remove_all_text: bool @param remove_all_text: filter out any text events. This includes copyright, text, track name, lyrics. @type one_track: bool @param one_track: reduce everything to just one track @type remove_tempo: bool @param remove_tempo: filter out all tempo events @type remove_control: bool @param remove_control: filter out all control change events @type one_channel: bool @param one_channel: use only one channel: set the channel of every event to 0 @type remove_misc_control: bool @param remove_misc_control: filters a miscellany of device control events: aftertouch, channel aftertouch, pitch wheel, sysex, port @type real_note_offs: bool @param real_note_offs: replace 0-velocity note-ons with actual note-offs. Some midi files use one, some the other """ from midi import EventStream, TextEvent, ProgramChangeEvent, \ CopyrightEvent, TrackNameEvent, \ SetTempoEvent, ControlChangeEvent, AfterTouchEvent, \ ChannelAfterTouchEvent, PitchWheelEvent, SysExEvent, \ LyricsEvent, PortEvent, CuePointEvent, MarkerEvent, EndOfTrackEvent import copy # Empty stream to which we'll add the events we don't filter new_stream = EventStream() new_stream.resolution = stream.resolution new_stream.format = stream.format # Work out when the first note starts in the input stream input_start = first_note_tick(stream) # Filter track by track for track in stream: track_events = [] for ev in sorted(track): # Don't add EOTs - they get added automatically if type(ev) == EndOfTrackEvent: continue ev = copy.deepcopy(ev) # Each filter may modify the event or continue to filter it altogether if remove_drums: # Filter out any channel 10 events, which is typically # reserved for drums if ev.channel == 9 and \ type(ev) in (NoteOnEvent, NoteOffEvent): continue if remove_pc: # Filter out any program change events if type(ev) == ProgramChangeEvent: continue if remove_all_text: # Filter out any types of text event if type(ev) in (TextEvent, CopyrightEvent, TrackNameEvent, LyricsEvent, CuePointEvent, MarkerEvent): continue if remove_tempo: # Filter out any tempo events if type(ev) == SetTempoEvent: continue if remove_control: # Filter out any control change events if type(ev) == ControlChangeEvent: continue if remove_misc_control: # Filter out various types of control events if type(ev) in (AfterTouchEvent, ChannelAfterTouchEvent, ChannelAfterTouchEvent, PitchWheelEvent, SysExEvent, PortEvent): continue if real_note_offs: # Replace 0-velocity note-ons with note-offs if type(ev) == NoteOnEvent and ev.velocity == 0: new_ev = NoteOffEvent() new_ev.pitch = ev.pitch new_ev.channel = ev.channel new_ev.tick = ev.tick ev = new_ev if one_channel: ev.channel = 0 track_events.append(ev) # If there are events left in the track, add them all as a new track if len(track_events) > 1: if not one_track or len(new_stream.tracklist) == 0: new_stream.add_track() for ev in track_events: new_stream.add_event(ev) track_events = [] for track in stream: track.sort() # Work out when the first note happens now result_start = first_note_tick(new_stream) # Move all events after and including this sooner so the music # starts at the same point it did before shift = result_start - input_start before_start = max(input_start-1, 0) if shift > 0: for ev in new_stream.trackpool: if ev.tick >= result_start: ev.tick -= shift elif ev.tick < result_start and ev.tick >= input_start: # This event happened in a region that no longer contains notes # Move it back to before what's now the first note ev.tick = before_start new_stream.trackpool.sort() if remove_duplicates: # Get rid of now duplicate events remove_duplicate_notes(new_stream, replay=True) return new_stream
def executeRules(self): try: print "***** Execution in Combination:",self.customer,self.supplier,self.account,self.terminal,self.product savepath=self.savepath if not os.path.exists(savepath): os.makedirs(savepath) self.fileName=self.get_filename() self.df2=copy.deepcopy(self.supplierdata.loc[(self.supplierdata["account_type"]==self.account)&(self.supplierdata["supplier_terminal_name"]==self.terminal)&(self.supplierdata["product_name"]==self.product),:]) dateDetails=datetime.datetime.strptime(self.analysisDate,"%d-%m-%Y") self.dateDetails=dateDetails self.daysInMonth=calendar.monthrange(dateDetails.year,dateDetails.month)[1] idx=pd.date_range(dateDetails.strftime('%m-%d-%Y'),str(dateDetails.month)+'-'+str(self.daysInMonth)+'-'+str(dateDetails.year)) self.df2['date']= pd.to_datetime(self.df2[u'date'],format='%Y-%m-%d') self.df2.index=pd.DatetimeIndex(self.df2['date']) self.df2 = self.df2.reindex(idx, fill_value=0) self.df2['date']=self.df2.index.values self.df2[u"next_refresh_date_Weekly"]=self.df2[u"next_refresh_date_Weekly"].apply(lambda x:int(x) if x=='0' else x) findWeeks=dict(self.df2[u"next_refresh_date_Weekly"].value_counts()) if findWeeks.has_key(0): if findWeeks[0]==self.daysInMonth: savepath=savepath+"\\No_Weekly_Refresh_Dates" if not os.path.exists(savepath): os.makedirs(savepath) file=open(savepath+"\\"+self.fileName+".txt","w") file.write("No Weekly Refresh Dates to Compute for combination "+self.fileName+"....\n Exiting Execution.....") file.close() print "No Weekly Refresh Dates to Compute....\n Exiting Execution....." return 0 validateLiftedValuesWeekly=dict(self.df2["lifted_gallons_Weekly"].value_counts()) if validateLiftedValuesWeekly.has_key(0): if validateLiftedValuesWeekly[0]==self.daysInMonth: savepath=savepath+"\\No_Weekly_Lifted_Gallons" if not os.path.exists(savepath): os.makedirs(savepath) file=open(savepath+"\\"+self.fileName+".txt","w") file.write("No Weekly Lifted gallons to Compute week switches for combination "+self.fileName+"....\n Exiting Execution.....") file.close() print "No Weekly Lifted gallons to Compute week switches....\n Exiting Execution....." return 0 self.df2[u"next_refresh_date_Weekly"]=self.df2[u"next_refresh_date_Weekly"].apply(lambda x:datetime.datetime.strptime(' '.join(x.split(' ')[:2]),'%m/%d %H:%M:%S') if x!=0 else x) self.df2[u"next_refresh_date_Weekly"]=self.df2[u"next_refresh_date_Weekly"].apply(lambda x:x.replace(year=datetime.datetime.now().year) if x!=0 else x) uniqueDates=self.df2[u"next_refresh_date_Weekly"].unique() uniqueDatesList=uniqueDates.tolist() uniqueDatesList.remove(0) #Generate Missing Next RefreshDates generatedList=[] for i in uniqueDatesList: if i not in generatedList: generatedList.append(i) if len(generatedList) <6: generatedList.sort() self.fillNRDbackward(generatedList) generatedList.sort() self.fillNRDforward(generatedList) validDates=self.getValidDates(generatedList) #fetech rows with valid Next_Refresh_Dates validDatesRowValues=self.df2.loc[self.df2["next_refresh_date_Weekly"]!=0,"next_refresh_date_Weekly"] validDatesRowValuesPairs=validDatesRowValues.drop_duplicates().to_dict() #Reverese the Key Values if(len(validDatesRowValues)<self.daysInMonth): xcc={} for k,v in validDatesRowValuesPairs.iteritems(): xcc[v]=k if xcc.has_key(0): xcc.pop(0) self.df2.insert(self.df2.keys().get_loc("next_refresh_date_Weekly"),"Modified_NRD",0) for validDate in validDates: if self.df2.loc[str(dateDetails.replace(day=validDate[0])),"next_refresh_date_Weekly"]==0: self.df2.loc[str(dateDetails.replace(day=validDate[0])),"next_refresh_date_Weekly"]=validDate[1] # self.df2.loc[str(dateDetails.replace(day=validDate[0])),'account_type']=self.df2.loc[str(xcc[validDate[1]]),'account_type'] # self.df2.loc[str(dateDetails.replace(day=validDate[0])),'supplier_terminal_name']=self.df2.loc[str(xcc[validDate[1]]),'supplier_terminal_name'] # self.df2.loc[str(dateDetails.replace(day=validDate[0])),'product_name']=self.df2.loc[str(xcc[validDate[1]]),'product_name'] self.df2.loc[str(dateDetails.replace(day=validDate[0])),'account_type']=self.account self.df2.loc[str(dateDetails.replace(day=validDate[0])),'supplier_terminal_name']=self.terminal self.df2.loc[str(dateDetails.replace(day=validDate[0])),'product_name']=self.product self.df2.loc[str(dateDetails.replace(day=validDate[0])),"Modified_NRD"]=1 # print validDate else: print "All next refresh Dates Valid" #set up columns for Daily weekly and Monthly self.df2.insert(self.df2.keys().get_loc("lifted_gallons_Daily"),"lifted_gallons_daily_modified",self.df2["lifted_gallons_Daily"].values) self.df2.insert(self.df2.keys().get_loc("lifted_gallons_daily_modified"),"lifted_gallons_daily_flag",0) self.df2.insert(self.df2.keys().get_loc("lifted_gallons_Weekly"),"Lifted_actual_weekly",self.df2["lifted_gallons_Weekly"].values) self.df2.insert(self.df2.keys().get_loc("lifted_gallons_Weekly"),"lifted_gallons_weekly_flag",0) self.df2.insert(self.df2.keys().get_loc("lifted_gallons_Monthly"),"Lifted_actual_monthly",self.df2["lifted_gallons_Monthly"].values) self.df2.insert(self.df2.keys().get_loc("lifted_gallons_Monthly"),"lifted_gallons_monthly_flag",0) ruleFactoryObj=ruleFactory() ruleAttributes=ruleFactoryObj.fetch_rules((self.customer).lower(),(self.supplier).lower()) for ruleAttribute in ruleAttributes: stat=getattr(self,ruleAttribute)(self.df2) if type(stat) is int: return 0 if "lifted_gallons_Monthly" in self.df2.keys(): self.df2["sanityMonthly_CumulativeDaily_WeeksByLiftedGallons"]=0 self.df2.loc[self.df2.loc[:,"lifted_gallons_modified_WeeksByLiftedGallons"].cumsum()==self.df2.loc[:,"lifted_gallons_Monthly"],"sanityMonthly_CumulativeDaily_WeeksByLiftedGallons"]=1 self.df2["sanityMonthly_CumulativeDaily_WeeksByNextRefreshDate"]=0 self.df2.loc[self.df2.loc[:,"lifted_gallons_daily_modified"].cumsum()==self.df2.loc[:,"lifted_gallons_Monthly"],"sanityMonthly_CumulativeDaily_WeeksByNextRefreshDate"]=1 self.df2.to_excel(savepath+"\\"+self.fileName+".xls") return self.df2 except Exception as e: savepath=self.savepath+"\\"+"Exception" if not os.path.exists(savepath): os.makedirs(savepath) file=open(savepath+"\\"+self.fileName+".txt","w") file.write("Exception in Combination:"+"\ncustomer : "+self.customer+"\nSupplier : "+self.supplier+"\nAccount : "+self.account+"\nTerminal: "+self.terminal+"\nProduct : "+self.product+"\nException : "+str(e)) file.close() print "Exception in Combination:",self.customer,self.supplier,self.account,self.terminal,self.product,e return 0