Example #1
0
def calculateFullList(data_set, data_label, features_num, fn, jump=1):
	doc = []
	prev_id = -1
	for i in range(len(data_set)):
		if not data_set[i][0] == prev_id:
			if not (int(data_set[i][0]) % jump == 0):
				continue
			if doc:
				writeOneList(doc, data_label[int(prev_id)],  fn)
			prev_id = data_set[i][0]
			doc = [ '0' for j in range(features_num) ]
			doc[int(data_set[i][1]) - 1] = data_set[i][2]
			# import pdb; pdb.set_trace()
			# doc[features_num] = data_label[int(data_set[i][0])]
			# tmp_lst = [ '0' for i in range(features_num + 1) ]
			# docs[data_set[i][0]] = tmp_lst
			# docs[data_set[i][0]][int(data_set[i][1])] = data_set[i][2]
			# docs[data_set[i][0]][features_num] = data_label[int(data_set[i][0])]
		else:
			try:
				doc[int(data_set[i][1]) - 1] = data_set[i][2]
			except:
				print "An error has occured and we have entered debug mode."
				import pdb; pdb.set_trace()
	writeOneList(doc, data_label[int(prev_id)], fn)
Example #2
0
    def _relayout(self):
        # calculate number of rows and cols, try to maintain a square
        # TODO: take into account the window geometry
        num_widgets = len(self.widgets)
        rows = int(round(math.sqrt(num_widgets)))
        cols = rows
        if rows**2 < num_widgets:
            cols += 1

        # remove all the old widgets
        for w in self.widgets:
            self.remove(w)

        self.resize(rows, cols)

        # add them back in, in a grid
        for i in range(0, rows):
            for j in range(0, cols):
                index = i*cols + j
                if index < num_widgets:
                    widget = self.widgets[index]
                self.attach(widget, j, j+1, i, i+1,
                            xoptions=gtk.FILL|gtk.EXPAND,
                            yoptions=gtk.FILL|gtk.EXPAND,
                            xpadding=0, ypadding=0)
Example #3
0
	def toXML(self, writer, ttFont):
		if self.isComposite():
			for compo in self.components:
				compo.toXML(writer, ttFont)
			if hasattr(self, "program"):
				writer.begintag("instructions")
				self.program.toXML(writer, ttFont)
				writer.endtag("instructions")
				writer.newline()
		else:
			last = 0
			for i in range(self.numberOfContours):
				writer.begintag("contour")
				writer.newline()
				for j in range(last, self.endPtsOfContours[i] + 1):
					writer.simpletag("pt", [
							("x", self.coordinates[j][0]), 
							("y", self.coordinates[j][1]),
							("on", self.flags[j] & flagOnCurve)])
					writer.newline()
				last = self.endPtsOfContours[i] + 1
				writer.endtag("contour")
				writer.newline()
			if self.numberOfContours:
				writer.begintag("instructions")
				self.program.toXML(writer, ttFont)
				writer.endtag("instructions")
				writer.newline()
Example #4
0
def spin(dist_matrix, y, shuffle=False):
    """
    Parameters
    __________
    dist_matrix: np.array([n,n])
    y: np.array([n])
    shffle: boolean, if True distance_matrix will be shuffled


    Returns
    -------
    dist_matrix: np.array([n,n])
    y: np.array([n])
    """
    si = range(len(dist_matrix))
    if shuffle:
        np.random.shuffle(si)
    sigma = 2 ** 5.
    for j in range(6):
        w = make_weight_matrix(sigma, len(dist_matrix))
        out = []
        for i in range(20):
            y = y[si]
            dist_matrix = dist_matrix[np.meshgrid(si, si)]
            si, se, mm, e = neighborhood_sort(dist_matrix, sigma, w)
            if e in out:
                break
            else:
                out = out + [e]
            #print sigma, i, e
        sigma = sigma / 2.0
        #print e
    return dist_matrix, y
Example #5
0
def test():
    #hushen_score=-5
    #chye_score=-5
    for hushen_score in range(-5,6):
        for chye_score in range(-5,6):
            print('hushen_score=',hushen_score,'chye_score=',chye_score)
            position,sys_score,is_sys_risk=sys_risk_analyse(max_position=0.85,ultimate_coefficient=0.25,shzh_score=hushen_score,chy_score=chye_score)  
Example #6
0
    def _instantiate_processes(self, job_count):
        filename_list = collect_files(path_list(self.section.get('files', "")))
        file_dict = self._get_file_dict(filename_list)

        manager = multiprocessing.Manager()
        global_bear_queue = multiprocessing.Queue()
        filename_queue = multiprocessing.Queue()
        local_result_dict = manager.dict()
        global_result_dict = manager.dict()
        message_queue = multiprocessing.Queue()
        control_queue = multiprocessing.Queue()

        barrier = Barrier(parties=job_count)

        bear_runner_args = {"file_name_queue": filename_queue,
                            "local_bear_list": self.local_bear_list,
                            "global_bear_list": self.global_bear_list,
                            "global_bear_queue": global_bear_queue,
                            "file_dict": file_dict,
                            "local_result_dict": local_result_dict,
                            "global_result_dict": global_result_dict,
                            "message_queue": message_queue,
                            "control_queue": control_queue,
                            "barrier": barrier,
                            "TIMEOUT": 0.1}

        self._instantiate_bears(file_dict,
                                message_queue)
        self._fill_queue(filename_queue, filename_list)
        self._fill_queue(global_bear_queue, range(len(self.global_bear_list)))

        return ([BearRunner(**bear_runner_args) for i in range(job_count)],
                bear_runner_args)
Example #7
0
 def possible_moves(self, side):
     moves = []
     for i in range(8):
         for j in range(8):
             if self.board[i,j] == 0 and self.valid_flip(i,j, side):
                 moves.append((i, j))
     return moves
Example #8
0
	def makeHist(self, normalize = True, doPMF = True):
		if self.isDataPickled:
			return

		if not self.Dim == 1:
			raise TypeError('Variable # mismatch')

		z = self.z
		Nframes = len(z)
		bin_min = 0.98 * z.min(); bin_max = 1.02*z.max()
		delta = (bin_max - bin_min)/float(self.nbins)
		bin_centers = np.zeros(self.nbins)
		bin_vals = np.zeros(self.nbins)
		pmf = np.zeros(self.nbins)
		for i in range(self.nbins):
			bin_centers[i] = bin_min + (i+0.5) * delta
			
		frameStatus = pb(Text = 'Binning frame by frame', Steps = Nframes)
		for i in range(Nframes):
		
			assignment = int((z[i] - bin_min)/delta)
			bin_vals[assignment] += 1.0
		
			frameStatus.Update(i)
		
		if normalize:
			#bin_vals /= (np.sum(bin_vals) * delta)
			bin_vals /= np.trapz(bin_vals, bin_centers, dx = delta)
		if doPMF:
			pmf = - np.log(bin_vals)
		

		hist = {'bin_centers': bin_centers, 'bin_vals': bin_vals, 'pmf' : pmf}
		pickle.dump(hist, open(self.data, 'w'))
		self.isDataPickled = True
def f(n):
    dict = {}
    for i in range(n):
        s = int("".join(raw_input().split()))
        if s in dict:
            dict[s] += 1
        else:
            dict[s] = 1
    k = raw_input()
    # print dict
    l = dict.keys()
    l.sort()
    # print l
    for i in l:
        temp = str(i)
        count = 26 - len(temp)
        for j in range(count):
            temp = "0" + temp
        temp = list(temp)
        temp.insert(2, " ")
        temp.insert(11, " ")
        temp.insert(16, " ")
        temp.insert(21, " ")
        temp.insert(26, " ")
        temp = "".join(temp)
        temp = temp + " " + str(dict[i])
        print temp
Example #10
0
    def solve(self, board):
        """
        :type board: List[List[str]]
        :rtype: void Do not return anything, modify board in-place instead.
        """
        if len(board) == 0 or len(board[0]) == 0:
            return
        
        saves = deque()
        
        for r in range(len(board)):
            saves.append((r, 0))
            saves.append((r, len(board[0])-1))
            
        for c in range(len(board[0])):  
            saves.append((0, c))
            saves.append((len(board)-1, c))

        while len(saves) != 0:
            r, c = saves.popleft()
            if 0 <= r < len(board) and 0 <= c < len(board[0]) and board[r][c] == 'O':
                board[r][c] = '#'
                saves.append((r-1, c))
                saves.append((r, c-1))
                saves.append((r+1, c))
                saves.append((r, c+1))
                
        board[:] = [[ 'X' if board[r][c] != '#' else 'O' for c in range(len(board[0]))] for r in range(len(board))]
        
Example #11
0
    def step(self):
        self.gridLastState = copy.deepcopy(self.grid)
        
        for row in range(self.gridSize):
            for col in range(self.gridSize): 
                aliveNeighbors = 0
                if self.gridLastState[row][col] == True:
                    aliveNeighbors -= 1
                for xOffset in [-1, 0, 1]:
                    for yOffset in [-1, 0, 1]:
                        checkXLoc = row + xOffset
                        checkYLoc = col + yOffset
                        if not -1 < (checkXLoc ) < self.gridSize or not -1 < ( checkYLoc ) < self.gridSize:
                            if self.gridWrap ==  False:
                                continue
                            else:
                                checkXLoc = checkXLoc % self.gridSize
                                checkYLoc = checkYLoc % self.gridSize
                             
                        if self.gridLastState[checkXLoc][checkYLoc] == True:
                            aliveNeighbors += 1
                self.grid[row][col] = self.calcNextState(self.gridLastState[row][col], aliveNeighbors)

        if self.grid == self.gridLastState:
            return False 
        else:
            return True
Example #12
0
def label_nodes_with_class(nodes_xyt, class_maps, pix):
  """
  Returns:
    class_maps__: one-hot class_map for each class.
    node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes
  """
  # Assign each pixel to a node.
  selem = skimage.morphology.disk(pix)
  class_maps_ = class_maps*1.
  for i in range(class_maps.shape[2]):
    class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem)
  class_maps__ = np.argmax(class_maps_, axis=2)
  class_maps__[np.max(class_maps_, axis=2) == 0] = -1

  # For each node pick out the label from this class map.
  x = np.round(nodes_xyt[:,[0]]).astype(np.int32)
  y = np.round(nodes_xyt[:,[1]]).astype(np.int32)
  ind = np.ravel_multi_index((y,x), class_maps__.shape)
  node_class_label = class_maps__.ravel()[ind][:,0]

  # Convert to one hot versions.
  class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool)
  node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool)
  for i in range(class_maps.shape[2]):
    class_maps_one_hot[:,:,i] = class_maps__ == i
    node_class_label_one_hot[:,i] = node_class_label == i
  return class_maps_one_hot, node_class_label_one_hot
Example #13
0
def ind_complement(v, ind):
    if isinstance(ind, _INDEXTYPES):
        ind = [ind]
    elif type(ind) is slice:
        ind = range(*ind.indices(len(v)))
    l = len(v)
    return sorted(set(range(l)) - set(i if i >= 0 else l+i for i in ind))
Example #14
0
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
	row = []
	col = []
	data = []
	row_valid = []
	col_valid = []
	data_valid = []

	doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
	base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
	train_ids = base_ids_list
	valid_ids = set()
	if valid_flag:
		valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
		valid_ids = [base_ids_list[i] for i in valid_index]
		base_ids = set(base_ids_list)
		train_ids = sorted(base_ids - set(valid_ids))

	labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
	labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
	for i in range(len(data_set)):
		if int(data_set[i][0]) in train_ids:
			row.append(int(data_set[i][0]))
			col.append(int(data_set[i][1])-1)
			data.append(int(data_set[i][2]))
			# labels.append(int(data_label_hash[int(data_set[i][0])]))
		elif int(data_set[i][0]) in valid_ids:
			row_valid.append(int(data_set[i][0]))
			col_valid.append(int(data_set[i][1])-1)
			data_valid.append(int(data_set[i][2]))
			# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))

	train = translate(row), col, data, labels
	valid = translate(row_valid), col_valid, data_valid, labels_valid
	return train, valid
	def __init__(self):
		self.page_widgets = {}
		self.dict_lt = {}
		self.dict_rt = {}
		self.widget = load_uh_widget(self.widget_xml, style=self.style)

		self.pickbelts_container_lt = self.widget.findChild(name="left_pickbelts")
		self.pickbelts_container_rt = self.widget.findChild(name="right_pickbelts")

		for i in range(len(self.sections)):
			self.page_widgets[i] = self.widget.findChild(name=self.sections[i][0])

		# Create the required pickbelts
		for side in ('lt', 'rt'):
			for i in range(len(self.sections)):
				pickbelt = ImageButton(is_focusable=False)
				pickbelt.name = self.sections[i][0] + '_' + side
				pickbelt.text = self.sections[i][1]
				pickbelt.font = "small_tooltip"
				pickbelt.position = (self.pickbelt_start_pos[0]+5*i, self.pickbelt_start_pos[1]+70*i)
				pickbelt.capture(Callback(self.update_view, i), event_name="mouseClicked")
				if side == 'lt':
					pickbelt.up_image='content/gui/images/background/pickbelt_l.png'
					self.pickbelts_container_lt.addChild(pickbelt)
					self.dict_lt[i] = pickbelt
				else:
					pickbelt.up_image='content/gui/images/background/pickbelt_r.png'
					self.pickbelts_container_rt.addChild(pickbelt)
					self.dict_rt[i] = pickbelt
		self.widget.show() # Hack to initially setup the pickbelts properly
		self.update_view()
		self.widget.hide() # Hack to initially setup the pickbelts properly
Example #16
0
def condition_on_grades(user="c6961489"):
	c = new_conn.cursor()
	models = [None, None, None, None, None, None]
	for i in range(6):
		c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
		x_train = np.array(c.fetchall())
		c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
		y_train = np.array(c.fetchall())[:,0]
		clf = SVC()
		clf.fit(x_train, y_train)
		print clf.score(x_train, y_train)
		models[i] = clf
	print "====================="
	c.execute('SELECT user_id from (select user_id, count(distinct grade) as cnt from discrete_log group by user_id) where cnt = 6 limit 5')
	users = [row[0] for row in c.fetchall()]
	scores = [0, 0, 0, 0, 0, 0]
	for user in users:
		for i in range(6):
			c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
			x_train = np.array(c.fetchall())
			c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
			y_train = np.array(c.fetchall())[:,0]
			scores[i] += models[i].score(x_train, y_train)
	for i in range(6):
		scores[i] /= len(users);
		print scores[i]
Example #17
0
def tab(ye,xe):
	for y in range(ye):
		for x in range(xe):
			
			print t[y,x],
			
		print 
Example #18
0
def display(matrix, s):
	print '\nXX ' + ' '.join([("%-4d" % i) for i in range(1, len(matrix))])
	for i in range(len(matrix)-1):
		print "%d " % i,
		for j in range(1, len(matrix)):
				print "%-4s" % matrix[i][j],
		print
Example #19
0
 def RandomGraph(self, nodes, edges, maxweight = 100.0):
     """
     Generates a graph of random edges.
     
     @param nodes: list of nodes or number of nodes in the random graph
     @param edges: number of edges to generate in the random graph
     @type edges: integer
     @param maxweight: maximum weight of each edge. default = 100.0
     @type maxweight: float
     """
     import random
     nodes_size = 0
     if type(nodes) == int:
         adjacency = [range(nodes)]
         nodes_size = nodes
         for node in range(nodes):
             adjacency.append([0 for x in range(nodes)])
     elif type(nodes) == list:
         adjacency = nodes
         nodes_size = len(nodes)
         for node in range(nodes_size):
             adjacency.append([0 for x in range(nodes_size)])
     else: raise FunctionParameterTypeError('nodes can only be a list \
             or integer')
     count = 0
     while count <= edges:
         edge = (int(random.uniform(0, nodes_size)) + 1, 
                 int(random.uniform(0, nodes_size)),
                 int(random.uniform(0, 1) * maxweight))
         if adjacency[edge[0]][edge[1]] == 0:
             adjacency[edge[0]][edge[1]] = edge[2]
             count = count + 1
     self.makeGraphFromAdjacency(adjacency)
Example #20
0
def BowTieAdjacencyDic():
    """
    Return the adjacency dictionary of the bow tie with indices given by
    
        1 ----- 2
         \     /
          \   /
           \ /
            0
           / \
          /   \
         /     \
        3 ----- 4
    
    To make it non-redundant we will require that
        
        i ---> j  iff   i < j
        
    """
    A = {}
    for i in range(5):
        A[i] = []
    
    for i in range(1,5):
        A[0].append(i)
    
    A[1].append(2)
    A[3].append(4)
    
    return A
Example #21
0
 def flip(self, x, y, side):
     for dx in range(-1, 2):
         for dy in range(-1, 2):
             if dy == 0 and dx == 0:
                 continue
             if(self.valid_ray(x, y, side, dx, dy)):
                 self.flip_ray(x, y, side, dx, dy)
def load_maze(file_name, wall_char="#", start_char="S", end_char="E"):
    """loads a maze from a file,
    The mazes must be rectangular and bordered with the wall char.
    a maze file must have only a single start_char and a single end_char
    defining the start and finish points of the maze.
    """
    # open the file and read the lines
    maze_lines = open(file_name).readlines()
    # find the dimensions of the maze
    n_rows = len(maze_lines)
    n_cols = len(maze_lines[0])
    walls = [[]]*n_rows
    # iterate over the rows and columns
    for row_idx in range(n_rows):
        for col_idx in range(n_cols):
            cur_char = maze_lines[row_idx][col_idx]
            # if we have a wall character mark wall as true
            if cur_char == wall_char:
                walls[row_idx].append(True)
            else:
                # check for start and finish points
                if cur_char == start_char:
                    start_pos = row_idx, col_idx
                elif cur_char == end_char:
                    end_pos = row_idx, col_idx
    walls = np.array(walls, dtype = bool)

    # creating an instance/object of the Maze class
    maze_object = Maze(walls, start_pos, end_pos)
    return maze_object
Example #23
0
def resample(oldrate,newrate,x,n,dtype,factor):
    print "Resampling from",oldrate,"Hz to",newrate,"Hz, amplification factor",factor
    rategcd = gcd(oldrate,newrate)
    uprate = newrate / rategcd
    dnrate = oldrate / rategcd

    oldcount = len(x)
    midcount = oldcount * uprate
    newcount = midcount / dnrate

    print "Upsampling by",uprate
    if uprate == 1:
        yout = np.asarray(x, dtype=dtype)
    else:
        yout = np.zeros(midcount, dtype=dtype)
        for i in range(0, oldcount-1):
            yout[i * uprate] = x[i] * uprate

    wl = min(1.0/uprate,1.0/dnrate)
    print "Antialias filtering at",wl
    
    midrate = oldrate * uprate
    filt = firfilter(0, (midrate * wl) / 2.0, midrate, n)
    y = signal.lfilter(filt, 1, yout)

    print "Downsampling by",dnrate
    if dnrate == 1:
        yout = np.asarray(y, dtype=dtype)
    else:
        yout = np.zeros(newcount, dtype=dtype)
        for i in range(0, newcount-1):
            yout[i] = y[i * dnrate] * factor

    return yout
Example #24
0
 def test_verify_leaf_inclusion_all_nodes_all_tree_sizes_up_to_4(self):
     leaves = ["aa", "bb", "cc", "dd"]
     hh = HexTreeHasher()
     leaf_hashes = [hh.hash_leaf(l) for l in leaves]
     hc = hh.hash_children
     proofs_per_tree_size = {
         1: [[] ],
         2: [[leaf_hashes[1]], [leaf_hashes[0]]],
         3: [[leaf_hashes[1], leaf_hashes[2]], # leaf 0
             [leaf_hashes[0], leaf_hashes[2]], # leaf 1
             [hc(leaf_hashes[0], leaf_hashes[1])]], # leaf 2
         4: [[leaf_hashes[1], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 0
             [leaf_hashes[0], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 1
             [leaf_hashes[3], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 2
             [leaf_hashes[2], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 3
             ]
         }
     tree = compact_merkle_tree.CompactMerkleTree(hasher=HexTreeHasher())
     verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
     # Increase the tree by one leaf each time
     for i in range(4):
         tree.append(leaves[i])
         tree_size = i + 1
         # ... and check inclusion proof validates for each node
         # of the tree
         for j in range(tree_size):
           proof = proofs_per_tree_size[tree_size][j]
           sth = self.STH(tree.root_hash, tree_size)
           self.assertTrue(
               verifier.verify_leaf_inclusion(
                   leaves[j], j, proof, sth))
Example #25
0
 def __f2tpos(self, fen, frdpos, emypos):
     self.__init_clayer()
     poslist = fen.split()[0].split('/')
     player = fen.split()[1]
     for i in range(len(poslist)):
         item = poslist[9 - i]
         index = 0
         for j in range(len(item)):
             if item[j].isupper():
                 if player == 'w':
                     frdpos[index][i][self.__chesslayer[item[j]]] = 1
                 else:
                     emypos[index][i][self.__chesslayer[item[j]]] = 1
                 self.__chesslayer[item[j]] += 1
                 index += 1
             elif item[j].islower():
                 if player == 'w':
                     emypos[index][i][self.__chesslayer[item[j]]] = 1
                 else:
                     frdpos[index][i][self.__chesslayer[item[j]]] = 1
                 self.__chesslayer[item[j]] += 1
                 index += 1
             else:
                 index += int(item[j])
     return frdpos, emypos
Example #26
0
def createMatrix(j_range = 0 ,entry = "data"):
	# j_range = 0
	# if entry == "assigned":
	# 	j_range = 1290442
	# else:
	# 	j_range = 5425990
	if j_range == 0:
		print "You need to pass in the number of clusters as an argument to this function."
		sys.exit(1)
	with open(entry+"_collective.txt","r") as fin, open(entry+"_clust_mem_matrix.txt","w") as out_1, open(entry+"_pre_intensity_matrix.txt","w") as out_2:
		clust_matrix = [[0 for i in range(0,42)] for j in range(0,j_range)]
		int_matrix = [[0.0 for i in range(0,42)] for j in range(0,j_range)]
		fin.readline()
		for line in fin:
			line = line.split()
			clust = int(line[12].split(".")[1])
			f_index = int(line[0])/11	
			clust_matrix[clust][f_index] = clust_matrix[clust][f_index] + 1
			int_matrix[clust][f_index] = int_matrix[clust][f_index] + float(line[10])
		for i in xrange(0,42):			
			out_1.write("".join(["\t",str(i)]))
			out_2.write("".join(["\t",str(i)]))
		out_1.write("\n")
		out_2.write("\n")
		for i in xrange(0,j_range):
			for j in xrange(0,42):
				if j == 0:
					out_1.write("".join([entry,"_0_0.",str(i)]))
					out_2.write("".join([entry,"_0_0.",str(i)]))
				out_1.write("".join(["\t",str(clust_matrix[i][j])]))
				out_2.write("".join(["\t",str(int_matrix[i][j])]))
			out_1.write("\n")
			out_2.write("\n")
	return None
Example #27
0
def search_in_board(words, board):
    trie = Trie.create(words+words[::-1])
    acc_hash = {}
    handled_paths = []
    pos_list = [(i,j) for i in range(len(board)) for j in range(len(board[0]))]
    while len(pos_list) > 0:
        i,j = pos_list.pop(0)
        cur_char = board[i][j]
        # ((0,0),'o',[])
        cur_word_point = ([(i,j)], cur_char)
        # [((1,0),'e'),((0,1),'a')]
        neighbors = find_neighbors((i,j),board)
        cur_words = acc_hash.get((i,j), [])
        # remove all the paths which have been handled
        cur_words = filter(lambda x: x[0] not in handled_paths, cur_words)
        filtered_prefixs = filter_by_prefix(
                cur_words+[cur_word_point], neighbors, trie)
        # [((0,1),'oa',[(0,0)])]
        update_acc_hash(acc_hash, filtered_prefixs)
        # add all the paths which have been handled
        map(lambda x: handled_paths.append(x[0]), cur_words)
        # add some position for new path
        for cur_word_point in filtered_prefixs:
            cur_pos = cur_word_point[0][-1]
            if cur_pos not in pos_list:
                pos_list.append(cur_pos)


    # return acc_hash
    word_points = filter_words(acc_hash)
    return map(lambda x: (x[1], x[0]), word_points)
Example #28
0
def knapsack_unbounded_dp(items, C):
    # order by max value per item size
    items = sorted(items, key=lambda item: item[VALUE]/float(item[SIZE]), reverse=True)
 
    # Sack keeps track of max value so far as well as the count of each item in the sack
    print('!')
    sack = [(0, [0 for i in items]) for i in range(0, C+1)]   # value, [item counts]
    print('!')
    for i,item in enumerate(items): 
        name, size, value = item
        for c in range(size, C+1):
            print(sack)
            sackwithout = sack[c-size]  # previous max sack to try adding this item to
            trial = sackwithout[0] + value
            used = sackwithout[1][i]
            if sack[c][0] < trial:
                # old max sack with this added item is better
                sack[c] = (trial, sackwithout[1][:])
                sack[c][1][i] +=1   # use one more
 
    value, bagged = sack[C]
    numbagged = sum(bagged)
    size = sum(items[i][1]*n for i,n in enumerate(bagged))
    # convert to (iten, count) pairs) in name order
    bagged = sorted((items[i][NAME], n) for i,n in enumerate(bagged) if n)
 
    return value, size, numbagged, bagged
Example #29
0
def make_video(events, t0=0.0, t1=None, dt_frame=0.01, tau=0.01):
    if t1 is None:
        t1 = events["t"].max()

    ts = events["t"]
    dt = 1e-3
    nt = int((t1 - t0) / dt) + 1
    # nt = min(nt, 1000)  # cap at 1000 for now

    image = np.zeros((128, 128))
    images = np.zeros((nt, 128, 128))

    for i in range(nt):
        # --- decay image
        image *= np.exp(-dt / tau) if tau > 0 else 0
        # image *= 0

        # --- add events
        ti = t0 + i * dt
        add_to_image(image, events[close(ts, ti)])

        images[i] = image

    # --- average in frames
    nt_frame = int(dt_frame / dt)
    nt_video = int(nt / nt_frame)

    video = np.zeros((nt_video, 128, 128))
    for i in range(nt_video):
        slicei = slice(i * nt_frame, (i + 1) * nt_frame)
        video[i] = np.sum(images[slicei], axis=0)

    return video
 def __init__(self):
     # In input, empty are '' for faster key-typing, in Board representation empty is '.' for better index handling.
     # (TODO): This hack may be fixed after project is finished
     self.board = [['.' for j in range(5)] for i in range(5)]
     self.destinations_map = defaultdict(list)
     self.portals = []
     self.changer = []