Ejemplo n.º 1
0
 def __init__(self):
     # In input, empty are '' for faster key-typing, in Board representation empty is '.' for better index handling.
     # (TODO): This hack may be fixed after project is finished
     self.board = [['.' for j in range(5)] for i in range(5)]
     self.destinations_map = defaultdict(list)
     self.portals = []
     self.changer = []
Ejemplo n.º 2
0
def make_video(events, t0=0.0, t1=None, dt_frame=0.01, tau=0.01):
    if t1 is None:
        t1 = events["t"].max()

    ts = events["t"]
    dt = 1e-3
    nt = int((t1 - t0) / dt) + 1
    # nt = min(nt, 1000)  # cap at 1000 for now

    image = np.zeros((128, 128))
    images = np.zeros((nt, 128, 128))

    for i in range(nt):
        # --- decay image
        image *= np.exp(-dt / tau) if tau > 0 else 0
        # image *= 0

        # --- add events
        ti = t0 + i * dt
        add_to_image(image, events[close(ts, ti)])

        images[i] = image

    # --- average in frames
    nt_frame = int(dt_frame / dt)
    nt_video = int(nt / nt_frame)

    video = np.zeros((nt_video, 128, 128))
    for i in range(nt_video):
        slicei = slice(i * nt_frame, (i + 1) * nt_frame)
        video[i] = np.sum(images[slicei], axis=0)

    return video
Ejemplo n.º 3
0
def search_in_board(words, board):
    trie = Trie.create(words+words[::-1])
    acc_hash = {}
    handled_paths = []
    pos_list = [(i,j) for i in range(len(board)) for j in range(len(board[0]))]
    while len(pos_list) > 0:
        i,j = pos_list.pop(0)
        cur_char = board[i][j]
        # ((0,0),'o',[])
        cur_word_point = ([(i,j)], cur_char)
        # [((1,0),'e'),((0,1),'a')]
        neighbors = find_neighbors((i,j),board)
        cur_words = acc_hash.get((i,j), [])
        # remove all the paths which have been handled
        cur_words = filter(lambda x: x[0] not in handled_paths, cur_words)
        filtered_prefixs = filter_by_prefix(
                cur_words+[cur_word_point], neighbors, trie)
        # [((0,1),'oa',[(0,0)])]
        update_acc_hash(acc_hash, filtered_prefixs)
        # add all the paths which have been handled
        map(lambda x: handled_paths.append(x[0]), cur_words)
        # add some position for new path
        for cur_word_point in filtered_prefixs:
            cur_pos = cur_word_point[0][-1]
            if cur_pos not in pos_list:
                pos_list.append(cur_pos)


    # return acc_hash
    word_points = filter_words(acc_hash)
    return map(lambda x: (x[1], x[0]), word_points)
Ejemplo n.º 4
0
def calculateFullList(data_set, data_label, features_num, fn, jump=1):
	doc = []
	prev_id = -1
	for i in range(len(data_set)):
		if not data_set[i][0] == prev_id:
			if not (int(data_set[i][0]) % jump == 0):
				continue
			if doc:
				writeOneList(doc, data_label[int(prev_id)],  fn)
			prev_id = data_set[i][0]
			doc = [ '0' for j in range(features_num) ]
			doc[int(data_set[i][1]) - 1] = data_set[i][2]
			# import pdb; pdb.set_trace()
			# doc[features_num] = data_label[int(data_set[i][0])]
			# tmp_lst = [ '0' for i in range(features_num + 1) ]
			# docs[data_set[i][0]] = tmp_lst
			# docs[data_set[i][0]][int(data_set[i][1])] = data_set[i][2]
			# docs[data_set[i][0]][features_num] = data_label[int(data_set[i][0])]
		else:
			try:
				doc[int(data_set[i][1]) - 1] = data_set[i][2]
			except:
				print "An error has occured and we have entered debug mode."
				import pdb; pdb.set_trace()
	writeOneList(doc, data_label[int(prev_id)], fn)
Ejemplo n.º 5
0
	def toXML(self, writer, ttFont):
		if self.isComposite():
			for compo in self.components:
				compo.toXML(writer, ttFont)
			if hasattr(self, "program"):
				writer.begintag("instructions")
				self.program.toXML(writer, ttFont)
				writer.endtag("instructions")
				writer.newline()
		else:
			last = 0
			for i in range(self.numberOfContours):
				writer.begintag("contour")
				writer.newline()
				for j in range(last, self.endPtsOfContours[i] + 1):
					writer.simpletag("pt", [
							("x", self.coordinates[j][0]), 
							("y", self.coordinates[j][1]),
							("on", self.flags[j] & flagOnCurve)])
					writer.newline()
				last = self.endPtsOfContours[i] + 1
				writer.endtag("contour")
				writer.newline()
			if self.numberOfContours:
				writer.begintag("instructions")
				self.program.toXML(writer, ttFont)
				writer.endtag("instructions")
				writer.newline()
Ejemplo n.º 6
0
 def __f2tpos(self, fen, frdpos, emypos):
     self.__init_clayer()
     poslist = fen.split()[0].split('/')
     player = fen.split()[1]
     for i in range(len(poslist)):
         item = poslist[9 - i]
         index = 0
         for j in range(len(item)):
             if item[j].isupper():
                 if player == 'w':
                     frdpos[index][i][self.__chesslayer[item[j]]] = 1
                 else:
                     emypos[index][i][self.__chesslayer[item[j]]] = 1
                 self.__chesslayer[item[j]] += 1
                 index += 1
             elif item[j].islower():
                 if player == 'w':
                     emypos[index][i][self.__chesslayer[item[j]]] = 1
                 else:
                     frdpos[index][i][self.__chesslayer[item[j]]] = 1
                 self.__chesslayer[item[j]] += 1
                 index += 1
             else:
                 index += int(item[j])
     return frdpos, emypos
Ejemplo n.º 7
0
    def _relayout(self):
        # calculate number of rows and cols, try to maintain a square
        # TODO: take into account the window geometry
        num_widgets = len(self.widgets)
        rows = int(round(math.sqrt(num_widgets)))
        cols = rows
        if rows**2 < num_widgets:
            cols += 1

        # remove all the old widgets
        for w in self.widgets:
            self.remove(w)

        self.resize(rows, cols)

        # add them back in, in a grid
        for i in range(0, rows):
            for j in range(0, cols):
                index = i*cols + j
                if index < num_widgets:
                    widget = self.widgets[index]
                self.attach(widget, j, j+1, i, i+1,
                            xoptions=gtk.FILL|gtk.EXPAND,
                            yoptions=gtk.FILL|gtk.EXPAND,
                            xpadding=0, ypadding=0)
Ejemplo n.º 8
0
def resample(oldrate,newrate,x,n,dtype,factor):
    print "Resampling from",oldrate,"Hz to",newrate,"Hz, amplification factor",factor
    rategcd = gcd(oldrate,newrate)
    uprate = newrate / rategcd
    dnrate = oldrate / rategcd

    oldcount = len(x)
    midcount = oldcount * uprate
    newcount = midcount / dnrate

    print "Upsampling by",uprate
    if uprate == 1:
        yout = np.asarray(x, dtype=dtype)
    else:
        yout = np.zeros(midcount, dtype=dtype)
        for i in range(0, oldcount-1):
            yout[i * uprate] = x[i] * uprate

    wl = min(1.0/uprate,1.0/dnrate)
    print "Antialias filtering at",wl
    
    midrate = oldrate * uprate
    filt = firfilter(0, (midrate * wl) / 2.0, midrate, n)
    y = signal.lfilter(filt, 1, yout)

    print "Downsampling by",dnrate
    if dnrate == 1:
        yout = np.asarray(y, dtype=dtype)
    else:
        yout = np.zeros(newcount, dtype=dtype)
        for i in range(0, newcount-1):
            yout[i] = y[i * dnrate] * factor

    return yout
Ejemplo n.º 9
0
def test():
    #hushen_score=-5
    #chye_score=-5
    for hushen_score in range(-5,6):
        for chye_score in range(-5,6):
            print('hushen_score=',hushen_score,'chye_score=',chye_score)
            position,sys_score,is_sys_risk=sys_risk_analyse(max_position=0.85,ultimate_coefficient=0.25,shzh_score=hushen_score,chy_score=chye_score)  
Ejemplo n.º 10
0
 def flip(self, x, y, side):
     for dx in range(-1, 2):
         for dy in range(-1, 2):
             if dy == 0 and dx == 0:
                 continue
             if(self.valid_ray(x, y, side, dx, dy)):
                 self.flip_ray(x, y, side, dx, dy)
Ejemplo n.º 11
0
def spin(dist_matrix, y, shuffle=False):
    """
    Parameters
    __________
    dist_matrix: np.array([n,n])
    y: np.array([n])
    shffle: boolean, if True distance_matrix will be shuffled


    Returns
    -------
    dist_matrix: np.array([n,n])
    y: np.array([n])
    """
    si = range(len(dist_matrix))
    if shuffle:
        np.random.shuffle(si)
    sigma = 2 ** 5.
    for j in range(6):
        w = make_weight_matrix(sigma, len(dist_matrix))
        out = []
        for i in range(20):
            y = y[si]
            dist_matrix = dist_matrix[np.meshgrid(si, si)]
            si, se, mm, e = neighborhood_sort(dist_matrix, sigma, w)
            if e in out:
                break
            else:
                out = out + [e]
            #print sigma, i, e
        sigma = sigma / 2.0
        #print e
    return dist_matrix, y
Ejemplo n.º 12
0
 def RandomGraph(self, nodes, edges, maxweight = 100.0):
     """
     Generates a graph of random edges.
     
     @param nodes: list of nodes or number of nodes in the random graph
     @param edges: number of edges to generate in the random graph
     @type edges: integer
     @param maxweight: maximum weight of each edge. default = 100.0
     @type maxweight: float
     """
     import random
     nodes_size = 0
     if type(nodes) == int:
         adjacency = [range(nodes)]
         nodes_size = nodes
         for node in range(nodes):
             adjacency.append([0 for x in range(nodes)])
     elif type(nodes) == list:
         adjacency = nodes
         nodes_size = len(nodes)
         for node in range(nodes_size):
             adjacency.append([0 for x in range(nodes_size)])
     else: raise FunctionParameterTypeError('nodes can only be a list \
             or integer')
     count = 0
     while count <= edges:
         edge = (int(random.uniform(0, nodes_size)) + 1, 
                 int(random.uniform(0, nodes_size)),
                 int(random.uniform(0, 1) * maxweight))
         if adjacency[edge[0]][edge[1]] == 0:
             adjacency[edge[0]][edge[1]] = edge[2]
             count = count + 1
     self.makeGraphFromAdjacency(adjacency)
Ejemplo n.º 13
0
 def possible_moves(self, side):
     moves = []
     for i in range(8):
         for j in range(8):
             if self.board[i,j] == 0 and self.valid_flip(i,j, side):
                 moves.append((i, j))
     return moves
Ejemplo n.º 14
0
def tab(ye,xe):
	for y in range(ye):
		for x in range(xe):
			
			print t[y,x],
			
		print 
Ejemplo n.º 15
0
    def _instantiate_processes(self, job_count):
        filename_list = collect_files(path_list(self.section.get('files', "")))
        file_dict = self._get_file_dict(filename_list)

        manager = multiprocessing.Manager()
        global_bear_queue = multiprocessing.Queue()
        filename_queue = multiprocessing.Queue()
        local_result_dict = manager.dict()
        global_result_dict = manager.dict()
        message_queue = multiprocessing.Queue()
        control_queue = multiprocessing.Queue()

        barrier = Barrier(parties=job_count)

        bear_runner_args = {"file_name_queue": filename_queue,
                            "local_bear_list": self.local_bear_list,
                            "global_bear_list": self.global_bear_list,
                            "global_bear_queue": global_bear_queue,
                            "file_dict": file_dict,
                            "local_result_dict": local_result_dict,
                            "global_result_dict": global_result_dict,
                            "message_queue": message_queue,
                            "control_queue": control_queue,
                            "barrier": barrier,
                            "TIMEOUT": 0.1}

        self._instantiate_bears(file_dict,
                                message_queue)
        self._fill_queue(filename_queue, filename_list)
        self._fill_queue(global_bear_queue, range(len(self.global_bear_list)))

        return ([BearRunner(**bear_runner_args) for i in range(job_count)],
                bear_runner_args)
Ejemplo n.º 16
0
def knapsack_unbounded_dp(items, C):
    # order by max value per item size
    items = sorted(items, key=lambda item: item[VALUE]/float(item[SIZE]), reverse=True)
 
    # Sack keeps track of max value so far as well as the count of each item in the sack
    print('!')
    sack = [(0, [0 for i in items]) for i in range(0, C+1)]   # value, [item counts]
    print('!')
    for i,item in enumerate(items): 
        name, size, value = item
        for c in range(size, C+1):
            print(sack)
            sackwithout = sack[c-size]  # previous max sack to try adding this item to
            trial = sackwithout[0] + value
            used = sackwithout[1][i]
            if sack[c][0] < trial:
                # old max sack with this added item is better
                sack[c] = (trial, sackwithout[1][:])
                sack[c][1][i] +=1   # use one more
 
    value, bagged = sack[C]
    numbagged = sum(bagged)
    size = sum(items[i][1]*n for i,n in enumerate(bagged))
    # convert to (iten, count) pairs) in name order
    bagged = sorted((items[i][NAME], n) for i,n in enumerate(bagged) if n)
 
    return value, size, numbagged, bagged
Ejemplo n.º 17
0
	def __init__(self):
		self.page_widgets = {}
		self.dict_lt = {}
		self.dict_rt = {}
		self.widget = load_uh_widget(self.widget_xml, style=self.style)

		self.pickbelts_container_lt = self.widget.findChild(name="left_pickbelts")
		self.pickbelts_container_rt = self.widget.findChild(name="right_pickbelts")

		for i in range(len(self.sections)):
			self.page_widgets[i] = self.widget.findChild(name=self.sections[i][0])

		# Create the required pickbelts
		for side in ('lt', 'rt'):
			for i in range(len(self.sections)):
				pickbelt = ImageButton(is_focusable=False)
				pickbelt.name = self.sections[i][0] + '_' + side
				pickbelt.text = self.sections[i][1]
				pickbelt.font = "small_tooltip"
				pickbelt.position = (self.pickbelt_start_pos[0]+5*i, self.pickbelt_start_pos[1]+70*i)
				pickbelt.capture(Callback(self.update_view, i), event_name="mouseClicked")
				if side == 'lt':
					pickbelt.up_image='content/gui/images/background/pickbelt_l.png'
					self.pickbelts_container_lt.addChild(pickbelt)
					self.dict_lt[i] = pickbelt
				else:
					pickbelt.up_image='content/gui/images/background/pickbelt_r.png'
					self.pickbelts_container_rt.addChild(pickbelt)
					self.dict_rt[i] = pickbelt
		self.widget.show() # Hack to initially setup the pickbelts properly
		self.update_view()
		self.widget.hide() # Hack to initially setup the pickbelts properly
Ejemplo n.º 18
0
 def test_verify_leaf_inclusion_all_nodes_all_tree_sizes_up_to_4(self):
     leaves = ["aa", "bb", "cc", "dd"]
     hh = HexTreeHasher()
     leaf_hashes = [hh.hash_leaf(l) for l in leaves]
     hc = hh.hash_children
     proofs_per_tree_size = {
         1: [[] ],
         2: [[leaf_hashes[1]], [leaf_hashes[0]]],
         3: [[leaf_hashes[1], leaf_hashes[2]], # leaf 0
             [leaf_hashes[0], leaf_hashes[2]], # leaf 1
             [hc(leaf_hashes[0], leaf_hashes[1])]], # leaf 2
         4: [[leaf_hashes[1], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 0
             [leaf_hashes[0], hc(leaf_hashes[2], leaf_hashes[3])], # leaf 1
             [leaf_hashes[3], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 2
             [leaf_hashes[2], hc(leaf_hashes[0], leaf_hashes[1])], # leaf 3
             ]
         }
     tree = compact_merkle_tree.CompactMerkleTree(hasher=HexTreeHasher())
     verifier = ledger.merkle_verifier.MerkleVerifier(HexTreeHasher())
     # Increase the tree by one leaf each time
     for i in range(4):
         tree.append(leaves[i])
         tree_size = i + 1
         # ... and check inclusion proof validates for each node
         # of the tree
         for j in range(tree_size):
           proof = proofs_per_tree_size[tree_size][j]
           sth = self.STH(tree.root_hash, tree_size)
           self.assertTrue(
               verifier.verify_leaf_inclusion(
                   leaves[j], j, proof, sth))
Ejemplo n.º 19
0
def createMatrix(j_range = 0 ,entry = "data"):
	# j_range = 0
	# if entry == "assigned":
	# 	j_range = 1290442
	# else:
	# 	j_range = 5425990
	if j_range == 0:
		print "You need to pass in the number of clusters as an argument to this function."
		sys.exit(1)
	with open(entry+"_collective.txt","r") as fin, open(entry+"_clust_mem_matrix.txt","w") as out_1, open(entry+"_pre_intensity_matrix.txt","w") as out_2:
		clust_matrix = [[0 for i in range(0,42)] for j in range(0,j_range)]
		int_matrix = [[0.0 for i in range(0,42)] for j in range(0,j_range)]
		fin.readline()
		for line in fin:
			line = line.split()
			clust = int(line[12].split(".")[1])
			f_index = int(line[0])/11	
			clust_matrix[clust][f_index] = clust_matrix[clust][f_index] + 1
			int_matrix[clust][f_index] = int_matrix[clust][f_index] + float(line[10])
		for i in xrange(0,42):			
			out_1.write("".join(["\t",str(i)]))
			out_2.write("".join(["\t",str(i)]))
		out_1.write("\n")
		out_2.write("\n")
		for i in xrange(0,j_range):
			for j in xrange(0,42):
				if j == 0:
					out_1.write("".join([entry,"_0_0.",str(i)]))
					out_2.write("".join([entry,"_0_0.",str(i)]))
				out_1.write("".join(["\t",str(clust_matrix[i][j])]))
				out_2.write("".join(["\t",str(int_matrix[i][j])]))
			out_1.write("\n")
			out_2.write("\n")
	return None
Ejemplo n.º 20
0
def BowTieAdjacencyDic():
    """
    Return the adjacency dictionary of the bow tie with indices given by
    
        1 ----- 2
         \     /
          \   /
           \ /
            0
           / \
          /   \
         /     \
        3 ----- 4
    
    To make it non-redundant we will require that
        
        i ---> j  iff   i < j
        
    """
    A = {}
    for i in range(5):
        A[i] = []
    
    for i in range(1,5):
        A[0].append(i)
    
    A[1].append(2)
    A[3].append(4)
    
    return A
def load_maze(file_name, wall_char="#", start_char="S", end_char="E"):
    """loads a maze from a file,
    The mazes must be rectangular and bordered with the wall char.
    a maze file must have only a single start_char and a single end_char
    defining the start and finish points of the maze.
    """
    # open the file and read the lines
    maze_lines = open(file_name).readlines()
    # find the dimensions of the maze
    n_rows = len(maze_lines)
    n_cols = len(maze_lines[0])
    walls = [[]]*n_rows
    # iterate over the rows and columns
    for row_idx in range(n_rows):
        for col_idx in range(n_cols):
            cur_char = maze_lines[row_idx][col_idx]
            # if we have a wall character mark wall as true
            if cur_char == wall_char:
                walls[row_idx].append(True)
            else:
                # check for start and finish points
                if cur_char == start_char:
                    start_pos = row_idx, col_idx
                elif cur_char == end_char:
                    end_pos = row_idx, col_idx
    walls = np.array(walls, dtype = bool)

    # creating an instance/object of the Maze class
    maze_object = Maze(walls, start_pos, end_pos)
    return maze_object
Ejemplo n.º 22
0
def condition_on_grades(user="******"):
	c = new_conn.cursor()
	models = [None, None, None, None, None, None]
	for i in range(6):
		c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
		x_train = np.array(c.fetchall())
		c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
		y_train = np.array(c.fetchall())[:,0]
		clf = SVC()
		clf.fit(x_train, y_train)
		print clf.score(x_train, y_train)
		models[i] = clf
	print "====================="
	c.execute('SELECT user_id from (select user_id, count(distinct grade) as cnt from discrete_log group by user_id) where cnt = 6 limit 5')
	users = [row[0] for row in c.fetchall()]
	scores = [0, 0, 0, 0, 0, 0]
	for user in users:
		for i in range(6):
			c.execute('SELECT easiness, ret_reps, ret_reps_since_lapse, lapses, pred_grade, acq_reps from discrete_log where user_id="%s" and grade=%d' % (user, i))
			x_train = np.array(c.fetchall())
			c.execute('SELECT interval_bucket from discrete_log where user_id="%s" and grade=%d' % (user, i))
			y_train = np.array(c.fetchall())[:,0]
			scores[i] += models[i].score(x_train, y_train)
	for i in range(6):
		scores[i] /= len(users);
		print scores[i]
Ejemplo n.º 23
0
	def makeHist(self, normalize = True, doPMF = True):
		if self.isDataPickled:
			return

		if not self.Dim == 1:
			raise TypeError('Variable # mismatch')

		z = self.z
		Nframes = len(z)
		bin_min = 0.98 * z.min(); bin_max = 1.02*z.max()
		delta = (bin_max - bin_min)/float(self.nbins)
		bin_centers = np.zeros(self.nbins)
		bin_vals = np.zeros(self.nbins)
		pmf = np.zeros(self.nbins)
		for i in range(self.nbins):
			bin_centers[i] = bin_min + (i+0.5) * delta
			
		frameStatus = pb(Text = 'Binning frame by frame', Steps = Nframes)
		for i in range(Nframes):
		
			assignment = int((z[i] - bin_min)/delta)
			bin_vals[assignment] += 1.0
		
			frameStatus.Update(i)
		
		if normalize:
			#bin_vals /= (np.sum(bin_vals) * delta)
			bin_vals /= np.trapz(bin_vals, bin_centers, dx = delta)
		if doPMF:
			pmf = - np.log(bin_vals)
		

		hist = {'bin_centers': bin_centers, 'bin_vals': bin_vals, 'pmf' : pmf}
		pickle.dump(hist, open(self.data, 'w'))
		self.isDataPickled = True
Ejemplo n.º 24
0
    def solve(self, board):
        """
        :type board: List[List[str]]
        :rtype: void Do not return anything, modify board in-place instead.
        """
        if len(board) == 0 or len(board[0]) == 0:
            return
        
        saves = deque()
        
        for r in range(len(board)):
            saves.append((r, 0))
            saves.append((r, len(board[0])-1))
            
        for c in range(len(board[0])):  
            saves.append((0, c))
            saves.append((len(board)-1, c))

        while len(saves) != 0:
            r, c = saves.popleft()
            if 0 <= r < len(board) and 0 <= c < len(board[0]) and board[r][c] == 'O':
                board[r][c] = '#'
                saves.append((r-1, c))
                saves.append((r, c-1))
                saves.append((r+1, c))
                saves.append((r, c+1))
                
        board[:] = [[ 'X' if board[r][c] != '#' else 'O' for c in range(len(board[0]))] for r in range(len(board))]
        
Ejemplo n.º 25
0
    def step(self):
        self.gridLastState = copy.deepcopy(self.grid)
        
        for row in range(self.gridSize):
            for col in range(self.gridSize): 
                aliveNeighbors = 0
                if self.gridLastState[row][col] == True:
                    aliveNeighbors -= 1
                for xOffset in [-1, 0, 1]:
                    for yOffset in [-1, 0, 1]:
                        checkXLoc = row + xOffset
                        checkYLoc = col + yOffset
                        if not -1 < (checkXLoc ) < self.gridSize or not -1 < ( checkYLoc ) < self.gridSize:
                            if self.gridWrap ==  False:
                                continue
                            else:
                                checkXLoc = checkXLoc % self.gridSize
                                checkYLoc = checkYLoc % self.gridSize
                             
                        if self.gridLastState[checkXLoc][checkYLoc] == True:
                            aliveNeighbors += 1
                self.grid[row][col] = self.calcNextState(self.gridLastState[row][col], aliveNeighbors)

        if self.grid == self.gridLastState:
            return False 
        else:
            return True
Ejemplo n.º 26
0
def ind_complement(v, ind):
    if isinstance(ind, _INDEXTYPES):
        ind = [ind]
    elif type(ind) is slice:
        ind = range(*ind.indices(len(v)))
    l = len(v)
    return sorted(set(range(l)) - set(i if i >= 0 else l+i for i in ind))
Ejemplo n.º 27
0
def label_nodes_with_class(nodes_xyt, class_maps, pix):
  """
  Returns:
    class_maps__: one-hot class_map for each class.
    node_class_label: one-hot class_map for each class, nodes_xyt.shape[0] x n_classes
  """
  # Assign each pixel to a node.
  selem = skimage.morphology.disk(pix)
  class_maps_ = class_maps*1.
  for i in range(class_maps.shape[2]):
    class_maps_[:,:,i] = skimage.morphology.dilation(class_maps[:,:,i]*1, selem)
  class_maps__ = np.argmax(class_maps_, axis=2)
  class_maps__[np.max(class_maps_, axis=2) == 0] = -1

  # For each node pick out the label from this class map.
  x = np.round(nodes_xyt[:,[0]]).astype(np.int32)
  y = np.round(nodes_xyt[:,[1]]).astype(np.int32)
  ind = np.ravel_multi_index((y,x), class_maps__.shape)
  node_class_label = class_maps__.ravel()[ind][:,0]

  # Convert to one hot versions.
  class_maps_one_hot = np.zeros(class_maps.shape, dtype=np.bool)
  node_class_label_one_hot = np.zeros((node_class_label.shape[0], class_maps.shape[2]), dtype=np.bool)
  for i in range(class_maps.shape[2]):
    class_maps_one_hot[:,:,i] = class_maps__ == i
    node_class_label_one_hot[:,i] = node_class_label == i
  return class_maps_one_hot, node_class_label_one_hot
def f(n):
    dict = {}
    for i in range(n):
        s = int("".join(raw_input().split()))
        if s in dict:
            dict[s] += 1
        else:
            dict[s] = 1
    k = raw_input()
    # print dict
    l = dict.keys()
    l.sort()
    # print l
    for i in l:
        temp = str(i)
        count = 26 - len(temp)
        for j in range(count):
            temp = "0" + temp
        temp = list(temp)
        temp.insert(2, " ")
        temp.insert(11, " ")
        temp.insert(16, " ")
        temp.insert(21, " ")
        temp.insert(26, " ")
        temp = "".join(temp)
        temp = temp + " " + str(dict[i])
        print temp
Ejemplo n.º 29
0
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
	row = []
	col = []
	data = []
	row_valid = []
	col_valid = []
	data_valid = []

	doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
	base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
	train_ids = base_ids_list
	valid_ids = set()
	if valid_flag:
		valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
		valid_ids = [base_ids_list[i] for i in valid_index]
		base_ids = set(base_ids_list)
		train_ids = sorted(base_ids - set(valid_ids))

	labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
	labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
	for i in range(len(data_set)):
		if int(data_set[i][0]) in train_ids:
			row.append(int(data_set[i][0]))
			col.append(int(data_set[i][1])-1)
			data.append(int(data_set[i][2]))
			# labels.append(int(data_label_hash[int(data_set[i][0])]))
		elif int(data_set[i][0]) in valid_ids:
			row_valid.append(int(data_set[i][0]))
			col_valid.append(int(data_set[i][1])-1)
			data_valid.append(int(data_set[i][2]))
			# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))

	train = translate(row), col, data, labels
	valid = translate(row_valid), col_valid, data_valid, labels_valid
	return train, valid
Ejemplo n.º 30
0
def display(matrix, s):
	print '\nXX ' + ' '.join([("%-4d" % i) for i in range(1, len(matrix))])
	for i in range(len(matrix)-1):
		print "%d " % i,
		for j in range(1, len(matrix)):
				print "%-4s" % matrix[i][j],
		print
Ejemplo n.º 31
0
def loop():
    global atraffic, btraffic, ctraffic, dtraffic
    acm = 10
    bcm = 10
    ccm = 10
    dcm = 10
    acount = 10
    bcount = 10
    ccount = 10
    dcount = 10
    numbypass = 0

    decisionmake(atraffic, btraffic, ctraffic, dtraffic)

    for i in range(3):
        for loopcounter in range(10):

            if (acm == 0 or bcm == 0 or ccm == 0 or dcm == 0):
                continue
            loopcounter += 1

            if not (ctraffic == 0 and btraffic == 0 and dtraffic == 0):
                ayellowpin.write(0)
                turngreen(agreenpin)

            bypass_and_count_increase(acm, bcm, ccm, dcm)
            time.sleep(.500)

        for loopcounter in range(10):

            if (acm == 0 or ccm == 0 or ccm == 0 or dcm == 0):
                continue
            loopcounter += 1

            if not (ctraffic == 0 and atraffic == 0 and dtraffic == 0):
                byellowpin.write(0)
                turngreen(bgreenpin)

            bypass_and_count_increase(acm, bcm, ccm, dcm)

            time.sleep(.500)

        for loopcounter in range(10):

            if (acm == 0 or bcm == 0 or ccm == 0 or dcm == 0):
                continue
            loopcounter += 1

            if not (btraffic == 0 and atraffic == 0 and dtraffic == 0):
                cyellowpin.write(0)
                turngreen(cgreenpin)

            bypass_and_count_increase(acm, bcm, ccm, dcm)

            time.sleep(.500)

        for loopcounter in range(10):
            if (acm == 0 or bcm == 0 or ccm == 0 or dcm == 0):
                continue
            loopcounter += 1

            if not (btraffic == 0 and atraffic == 0 and ctraffic == 0):
                dyellowpin.write(0)
                turngreen(dgreenpin)

            bypass_and_count_increase(acm, bcm, ccm, dcm)

            time.sleep(.500)

    if (acount < 10):
        atraffic = 0
    else:
        atraffic = 1
    if (bcount < 10):
        btraffic = 0
    else:
        btraffic = 1
    if (ccount < 10):
        ctraffic = 0
    else:
        ctraffic = 1
    if (dcount < 10):
        dtraffic = 0
    else:
        dtraffic = 1

    if (atraffic == 1 and btraffic == 1 and dtraffic == 1):
        ctraffic = 1
    elif (btraffic == 1 and ctraffic == 1 and dtraffic == 1):
        atraffic = 1
    elif (ctraffic == 1 and atraffic == 1 and dtraffic == 1):
        btraffic = 1
    elif (btraffic == 1 and atraffic == 1 and ctraffic == 1):
        dtraffic = 1
 def createTargetArray(self, nums: List[int],
                       index: List[int]) -> List[int]:
     target = list()
     for i in range(len(nums)):
         target.insert(index[i], nums[i])
     return target
Ejemplo n.º 33
0
"""

Comparison between

* `filter.rank.maximum`
* `morphology.dilate`

on increasing structuring element size:

"""

a = data.camera()

rec = []
e_range = range(1, 20, 2)
for r in e_range:
    elem = disk(r + 1)
    rc, ms_rc = cr_max(a, elem)
    rcm, ms_rcm = cm_dil(a, elem)
    rec.append((ms_rc, ms_rcm))

rec = np.asarray(rec)

plt.figure()
plt.title('Performance with respect to element size')
plt.ylabel('Time (ms)')
plt.title('Element radius')
plt.plot(e_range, rec)
plt.legend(['filter.rank.maximum', 'morphology.dilate'])
"""
Ejemplo n.º 34
0
    G_optimizer = torch.optim.Adam(G.parameters(), lr=1e-3)

    pathlib.Path(sample_output).mkdir(parents=True, exist_ok=True)
    pathlib.Path(os.path.join(sample_output, "images")).mkdir(parents=True, exist_ok=True)
    d_loss = 0
    g_loss = 0
    
    d_to_g_threshold = 0.5
    g_to_d_threshold = 0.3

    train_d = True
    train_g = True

    conditional_training = False
    _si = 1
    for epoch in range(epochs):
        for i, (image, label) in enumerate(data_loader):
            if conditional_training:
                if d_loss - g_loss > d_to_g_threshold:
                    train_d = True
                    train_g = False
                elif g_loss - d_loss > g_to_d_threshold:
                    train_g = True
                    train_d = False
                else:
                    train_d = True
                    train_g = True

            D_optimizer.zero_grad()
            G_optimizer.zero_grad()
q = []
t = []
rep = int(input())
for i in range(rep):
    quality, time = (input()).split()
    q.append(quality)
    t.append(time)

index = 0
s = []
while index < len(q):
    ans = float(q[index]) * float(t[index])
    s.append(ans)
    index += 1

Sum = sum(s)
pass
print('%.3f' % Sum)
Ejemplo n.º 36
0
def visualize_2D_AE(model,
                    training_df,
                    validation_df,
                    example_data,
                    num_examples,
                    batch_size,
                    num_gpus,
                    dims,
                    iter_,
                    n_cols=4,
                    std_to_plot=2.5,
                    summary_density=50,
                    save_loc=False,
                    n_samps_per_dim=8):
    """
    Visualization of AE as it trains in 2D space
    """
    # choose a color palette
    current_palette = sns.color_palette()
    # summarize training
    bins = [0] + np.unique(
        np.logspace(0,
                    np.log2(np.max(training_df.batch + [100])),
                    num=summary_density,
                    base=2).astype('int'))
    training_df['batch_bin'] = pd.cut(training_df.batch + 1,
                                      bins,
                                      labels=bins[:-1])
    training_summary = training_df.groupby(['batch_bin']).describe()
    validation_df['batch_bin'] = pd.cut(validation_df.batch + 1,
                                        bins,
                                        labels=bins[:-1])
    validation_summary = validation_df.groupby(['batch_bin']).describe()
    validation_df[:3]

    # get reconstructions of example data
    example_recon, z = model.sess.run((model.x_tilde, model.z_x),
                                      {model.x_input: example_data})
    # get Z representations of data
    z = np.array(
        generate_manifold(model, dims, iter_, num_examples, batch_size,
                          num_gpus))

    if np.shape(z)[1] == 2:
        # generate volumetric data
        x_bounds = [
            -inv_z_score(std_to_plot, z[:, 0]),
            inv_z_score(std_to_plot, z[:, 0])
        ]
        y_bounds = [
            -inv_z_score(std_to_plot, z[:, 1]),
            inv_z_score(std_to_plot, z[:, 1])
        ]
        maxx, maxy, hx, hy, pts = make_grid(x_bounds,
                                            y_bounds,
                                            maxx=int(n_samps_per_dim),
                                            maxy=int(n_samps_per_dim))

        dets = metric_and_volume(model, maxx, maxy, hx, hy, pts, dims,
                                 batch_size)

    fig = plt.figure(figsize=(10, 10))
    outer = gridspec.GridSpec(2, 2, wspace=0.2, hspace=0.2)

    scatter_ax = plt.Subplot(fig, outer[0])
    scatter_ax.scatter(z_score(z[:, 0]),
                       z_score(z[:, 1]),
                       alpha=.1,
                       s=3,
                       color='k')
    scatter_ax.axis('off')
    scatter_ax.set_xlim([-std_to_plot, std_to_plot])
    scatter_ax.set_ylim([-std_to_plot, std_to_plot])
    fig.add_subplot(scatter_ax)

    if np.shape(z)[1] == 2:
        volume_ax = plt.Subplot(fig, outer[1])
        volume_ax.axis('off')
        volume_ax.matshow(np.log2(dets), cmap=plt.cm.viridis)
        fig.add_subplot(volume_ax)

    recon_ax = gridspec.GridSpecFromSubplotSpec(int(n_cols),
                                                int(n_cols / 2),
                                                subplot_spec=outer[2],
                                                wspace=0.1,
                                                hspace=0.1)

    for axi in range(int(n_cols) * int(n_cols / 2)):
        recon_sub_ax = gridspec.GridSpecFromSubplotSpec(
            1, 2, subplot_spec=recon_ax[axi], wspace=0.1, hspace=0.1)
        orig_ax = plt.Subplot(fig, recon_sub_ax[0])
        orig_ax.matshow(np.squeeze(example_data[axi].reshape(dims)),
                        origin='lower')
        orig_ax.axis('off')
        rec_ax = plt.Subplot(fig, recon_sub_ax[1])
        rec_ax.matshow(np.squeeze(example_recon[axi].reshape(dims)),
                       origin='lower')
        rec_ax.axis('off')
        fig.add_subplot(orig_ax)
        fig.add_subplot(rec_ax)

    error_ax = plt.Subplot(fig, outer[3])
    #error_ax.plot(training_df.batch, training_df.recon_loss)
    training_plt, = error_ax.plot(
        training_summary.recon_loss['mean'].index.astype('int').values,
        training_summary.recon_loss['mean'].values,
        alpha=1,
        color=current_palette[0],
        label='training')

    error_ax.fill_between(
        training_summary.recon_loss['mean'].index.astype('int').values,
        training_summary.recon_loss['mean'].values -
        training_summary.recon_loss['std'].values,
        training_summary.recon_loss['mean'].values +
        training_summary.recon_loss['std'].values,
        alpha=.25,
        color=current_palette[0])

    error_ax.fill_between(
        validation_summary.recon_loss['mean'].index.astype('int').values,
        validation_summary.recon_loss['mean'].values -
        validation_summary.recon_loss['std'].values,
        validation_summary.recon_loss['mean'].values +
        validation_summary.recon_loss['std'].values,
        alpha=.25,
        color=current_palette[1])
    validation_plt, = error_ax.plot(
        validation_summary.recon_loss['mean'].index.astype('int').values,
        validation_summary.recon_loss['mean'].values -
        validation_summary.recon_loss['std'].values,
        alpha=1,
        color=current_palette[1],
        label='validation')

    error_ax.legend(handles=[validation_plt, training_plt], loc=1)
    error_ax.set_yscale("log")
    error_ax.set_xscale("log")
    fig.add_subplot(error_ax)
    if save_loc != False:
        if not os.path.exists('/'.join(save_loc.split('/')[:-1])):
            os.makedirs('/'.join(save_loc.split('/')[:-1]))
        plt.savefig(save_loc)
    plt.show()
Ejemplo n.º 37
0
class Customer(object):
    def __init__(self, i, t, start=None, end=None, A=None, B=None):
        self.i = i
        self.t = t
        self.start = start
        self.end = end
        self.A = A
        self.B = B


T = int(input())
for test_case in range(1, T + 1):
    N, M, K, A, B = map(int, input().split())
    As = list(map(int, input().split()))
    Bs = list(map(int, input().split()))
    customers = list(map(int, input().split()))
    wait_rec = [Customer(i + 1, t) for i, t in enumerate(customers)]
    print(wait_rec)
    wait_rep = []
    fin = []
    rec, rep = [0] * N, [0] * M
    t = 0

    while len(fin) < K:
        for i in range(N):
            if rec[i] == 0 and wait_rec:
                if t >= wait_rec[0].t:
                    C = wait_rec.pop(0)
                    C.start, C.end, C.A = t, t + As[i], i + 1
                    rec[i] = C
            elif rec[i] != 0:
Ejemplo n.º 38
0
def generateES(icls, scls, size, imin, imax, smin, smax):
    ind = icls(random.uniform(imin, imax) for _ in range(size))
    ind.strategy = scls(random.uniform(smin, smax) for _ in range(size))
    return ind
Ejemplo n.º 39
0
    print("Usage- python %s COM_PORT note1 [note2 note3 ...]"%(argv[0]))
    exit(0)
print "Using COM port- " + com_port
notes=[]
for note in argv[2:]:       # append all notes (params) passed in command line
    notes.append(note)
print "Notes- "+ str(notes)

ser = serial.Serial(com_port, 9600)
ser.reset_input_buffer()

accelerometer_range=23.0    #range of values which adxl sends = 11 - (-11) +1
no_bins=len(argv)-2         #remove script name and com port number
bin_size = accelerometer_range/no_bins 
decision_boundary=[]       
for i in range(0,no_bins):
    decision_boundary.append((-11+(bin_size*i),-11+(bin_size*(i+1)),i))  #list of all boundaries according to number of notes
print "number of bins- " + str(no_bins)
print "bin size- " + str(bin_size)
print "Decision Boundaries- " + str(decision_boundary)
time.sleep(1)

#To read from a file (if you don't have an Arduino and wish to simulate)

#with open("arduino","w") as arduino_readings:
#    for i in range(1,10):
#        reading=uniform(-11,11)
#        arduino_readings.write(str(reading)+"\r\n")

#with open("arduino","r") as arduino_readings:
#    for reading in arduino_readings:
Ejemplo n.º 40
0
 def run(self):
     for i in range(1, self.range):
         self.add(self._enum, i)
     self.wait()
Ejemplo n.º 41
0
Archivo: lvutil.py Proyecto: xandrus/sm
    try:
        util.pread(cmd_st, expect_rc=1)
    except util.CommandException, e:
        if e.code == 0:
            nodeExists = True

    if not util.pathexists(mapperPath) and not nodeExists:
        return

    util.SMlog("_lvmBugCleanup: seeing dm file %s" % mapperPath)

    # destroy the dm device
    if nodeExists:
        util.SMlog("_lvmBugCleanup: removing dm device %s" % mapperDevice)
        for i in range(LVM_FAIL_RETRIES):
            try:
                util.pread2(cmd_rm)
                break
            except util.CommandException, e:
                if i < LVM_FAIL_RETRIES - 1:
                    util.SMlog("Failed on try %d, retrying" % i)
                    try:
                        util.pread(cmd_st, expect_rc=1)
                        util.SMlog("_lvmBugCleanup: dm device {}"
                                   " removed".format(mapperDevice)
                                   )
                        break
                    except:
                        cmd_rm = cmd_rf
                        time.sleep(1)
Ejemplo n.º 42
0
for number in range(1, 101, 1):
    if number % 3 == 0 and number % 5 == 0:
        print('FizzBuzz')
    elif number % 3 == 0:
        print('Fizz')
    elif number % 5 == 0:
        print('Buzz')
    elif number % 3 == 0 and number % 5 == 0:
        print('FizzBuzz')
    else:
        print(number)
Ejemplo n.º 43
0
def thread_way():
    workers = 10
    with futures.ThreadPoolExecutor(workers) as executor:
        res = list(executor.submit(blocking_way) for i in range(10))
    return len([fut.result() for fut in res])
Ejemplo n.º 44
0
# 1546번

case = int(input())
score = list(map(int, input().split()))

good = max(score)
new_score = 0
for i in range(len(score)):
    new_score += score[i] / good * 100

print(new_score / case) 
Ejemplo n.º 45
0
        global COUNT
        response = b''
        chunk = sock.recv(4096)
        # 一次性读完就好了吧
        if chunk:
            response += chunk
        else:
            selector.unregister(key.fd)
            COUNT += 1

    try:
        sock.connect(('example.com', 80))
    except BlockingIOError:
        pass
    selector.register(sock.fileno(), EVENT_WRITE, connect)


if __name__ == '__main__':
    current = float(time.time())
    # sync_way()
    # process_way()
    # thread_way()
    for i in range(10):
        non_blocking_way2()
    while COUNT < 9:
        events = selector.select()
        for event_key, event_mask in events:
            callback = event_key.data
            callback(event_key, event_mask)
    print('{:.2f}'.format(float(time.time()) - current))
Ejemplo n.º 46
0
n=int(input())
coke=input().split()
capacity=input().split()
for i in range(len(coke)):
    coke[i]=int(coke[i])
for i in range(len(capacity)):
    capacity[i]=int(capacity[i])
capacity.sort()
if (capacity[-1]+capacity[-2])>sum(coke):
    print('YES')
else:
    print('NO')
Ejemplo n.º 47
0
def Q_learning_metrics(mdp: Tictactoe(),
                       mode,
                       signature,
                       step_performance_list,
                       epsilon=0.1,
                       n_episodes=500,
                       alpha=0.1,
                       big=False):
    gamma = mdp.gamma
    for key, val in mdp.states_manager.states.items():
        val['Q'] = np.zeros(len(val['actions']))
        if (len(val['actions']) == 0):
            val['Q'] = np.zeros(1)
        val['actions_explored'] = np.ones(len(val['actions']))
        val['explored'] = 0
        if not val['terminal']:
            val['policy'] = np.random.choice(val['actions'])
    score_random = []
    score_intelligent = []
    step_iteration = []

    time_step_iteration = []
    s_init = mdp.states_manager.get_state(0)
    Q_values_init_iterations = [s_init['Q']]
    unexplored = len(mdp.states_manager.states) - 1
    unexplored_iterations = [unexplored]
    print("problem initialized")
    start = time.time()
    k = 1
    step_performance = step_performance_list[k]
    for episode in range(n_episodes):
        if (not big):
            Q_values_init_iterations.append(s_init['Q'])

        if (episode == step_performance):
            step = step_performance_list[k] - step_performance_list[k - 1]
            time_elapsed = time.time() - start
            print("episode : " + str(episode))
            print("time last it : " + str(time_elapsed))
            time_step_iteration.append(time_elapsed)
            current_time = time.time()
            if (big):
                Q_values_init_iterations.append(s_init['Q'])
                unexplored_iterations.append(unexplored)
            #update_policy(mdp)
            scores = performance_measure_dict(mdp, repeat=100000)
            score_random.append(scores[0])
            score_intelligent.append(scores[1])
            step_iteration.append(episode)
            print("time performance step : " + str(time.time() - current_time))
            k = k + 1
            if (k < len(step_performance_list)):
                step_performance = step_performance_list[k]
            start = time.time() - time_elapsed

        state = mdp.states_manager.get_state(0)
        state['explored'] += 1
        while not state['terminal']:
            chosen_action_index = epsilon_greedy_exploration_metrics(
                epsilon, state)
            agent_action = state['actions'][chosen_action_index]
            state['actions_explored'][chosen_action_index] += 1
            potential_actions = mdp.T(state['number'],
                                      agent_action,
                                      state['actions'],
                                      mode=mode)
            final_state_number = mdp.choose_action(potential_actions)
            final_state = mdp.states_manager.get_state(final_state_number)
            state['Q'][chosen_action_index] = state['Q'][
                chosen_action_index] + alpha * (
                    final_state['reward'] + gamma *
                    (final_state['Q'].max()) - state['Q'][chosen_action_index])
            state['policy'] = state['actions'][np.argmax(state['Q'])]
            state = final_state
            state['explored'] += 1
            if (state['explored'] == 1):
                unexplored -= 1
        if (not big):
            unexplored_iterations.append(unexplored)

    policy = create_policy(mdp)
    exploration_stats = exploration_statitics(mdp)
    variables = [
        score_random, score_intelligent, step_iteration, time_step_iteration,
        Q_values_init_iterations, unexplored_iterations, policy,
        exploration_stats
    ]
    names = [
        "score_random", "score_intelligent", "step_iteration",
        "time_step_iteration", "Q_values_init_iterations",
        "unexplored_iterations", "policy", "exploration_stats"
    ]

    for k in range(len(variables)):
        pickle.dump(
            variables[k],
            open(
                'assignement_4/q_learning_results/' + signature + "_epsilon_" +
                str(epsilon) + "_alpha_" + str(alpha) + "_gamma_" +
                str(gamma) + "_mode_" + mode + "_" + names[k] + ".p", "wb"))

    return mdp
Ejemplo n.º 48
0
def sync_way():
    res = []
    for i in range(10):
        # res.append(blocking_way())
        res.append(non_blocking_way())
    return len(res)
Ejemplo n.º 49
0
def main():
    parser = ArgumentParser()
    parser.add_argument('--pregenerated_data', type=Path, required=True)
    parser.add_argument('--output_dir', type=Path, required=True)
    parser.add_argument("--bert_model", type=str, required=True, help="Bert pre-trained model selected in the list: bert-base-uncased, "
                             "bert-large-uncased, bert-base-cased, bert-base-multilingual, bert-base-chinese.")
    parser.add_argument("--do_lower_case", action="store_true")
    parser.add_argument("--reduce_memory", action="store_true",
                        help="Store training data as on-disc memmaps to massively reduce memory usage")

    parser.add_argument("--epochs", type=int, default=3, help="Number of epochs to train for")
    parser.add_argument("--local_rank",
                        type=int,
                        default=-1,
                        help="local_rank for distributed training on gpus")
    parser.add_argument("--no_cuda",
                        action='store_true',
                        help="Whether not to use CUDA when available")
    parser.add_argument('--gradient_accumulation_steps',
                        type=int,
                        default=1,
                        help="Number of updates steps to accumulate before performing a backward/update pass.")
    parser.add_argument("--train_batch_size",
                        default=32,
                        type=int,
                        help="Total batch size for training.")
    parser.add_argument('--fp16',
                        action='store_true',
                        help="Whether to use 16-bit float precision instead of 32-bit")
    parser.add_argument('--loss_scale',
                        type=float, default=0,
                        help="Loss scaling to improve fp16 numeric stability. Only used when fp16 set to True.\n"
                        "0 (default value): dynamic loss scaling.\n"
                        "Positive power of 2: static loss scaling value.\n")
    parser.add_argument("--warmup_steps", 
                        default=0, 
                        type=int,
                        help="Linear warmup over warmup_steps.")
    parser.add_argument("--adam_epsilon", 
                        default=1e-8, 
                        type=float,
                        help="Epsilon for Adam optimizer.")
    parser.add_argument("--learning_rate",
                        default=3e-5,
                        type=float,
                        help="The initial learning rate for Adam.")
    parser.add_argument('--seed',
                        type=int,
                        default=42,
                        help="random seed for initialization")
    args = parser.parse_args()

    assert args.pregenerated_data.is_dir(), \
        "--pregenerated_data should point to the folder of files made by pregenerate_training_data.py!"

    samples_per_epoch = []
    for i in range(args.epochs):
        epoch_file = args.pregenerated_data / f"epoch_0.json" #f"epoch_{i}.json"
        metrics_file = args.pregenerated_data / f"epoch_0_metrics.json" #f"epoch_{i}_metrics.json"
        if epoch_file.is_file() and metrics_file.is_file():
            metrics = json.loads(metrics_file.read_text())
            samples_per_epoch.append(metrics['num_training_examples'])
        else:
            if i == 0:
                exit("No training data was found!")
            print(f"Warning! There are fewer epochs of pregenerated data ({i}) than training epochs ({args.epochs}).")
            print("This script will loop over the available data, but training diversity may be negatively impacted.")
            num_data_epochs = i
            break
    else:
        num_data_epochs = args.epochs

    if args.local_rank == -1 or args.no_cuda:
        device = torch.device("cuda" if torch.cuda.is_available() and not args.no_cuda else "cpu")
        n_gpu = torch.cuda.device_count()
    else:
        torch.cuda.set_device(args.local_rank)
        device = torch.device("cuda", args.local_rank)
        n_gpu = 1
        # Initializes the distributed backend which will take care of sychronizing nodes/GPUs
        torch.distributed.init_process_group(backend='nccl')
    logging.info("device: {} n_gpu: {}, distributed training: {}, 16-bits training: {}".format(
        device, n_gpu, bool(args.local_rank != -1), args.fp16))

    if args.gradient_accumulation_steps < 1:
        raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
                            args.gradient_accumulation_steps))

    args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if n_gpu > 0:
        torch.cuda.manual_seed_all(args.seed)

    if args.output_dir.is_dir() and list(args.output_dir.iterdir()):
        logging.warning(f"Output directory ({args.output_dir}) already exists and is not empty!")
    args.output_dir.mkdir(parents=True, exist_ok=True)

    tokenizer = BertTokenizer.from_pretrained(args.bert_model, do_lower_case=args.do_lower_case)

    total_train_examples = 0
    for i in range(args.epochs):
        # The modulo takes into account the fact that we may loop over limited epochs of data
        total_train_examples += samples_per_epoch[i % len(samples_per_epoch)]

    num_train_optimization_steps = int(
        total_train_examples / args.train_batch_size / args.gradient_accumulation_steps)
    if args.local_rank != -1:
        num_train_optimization_steps = num_train_optimization_steps // torch.distributed.get_world_size()

    # Prepare model
    model = BertForPreTraining.from_pretrained(args.bert_model)
    if args.fp16:
        model.half()
    model.to(device)
    if args.local_rank != -1:
        try:
            from apex.parallel import DistributedDataParallel as DDP
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")
        model = DDP(model)
    elif n_gpu > 1:
        model = torch.nn.DataParallel(model)

    # Prepare optimizer
    param_optimizer = list(model.named_parameters())
    no_decay = ['bias', 'LayerNorm.bias', 'LayerNorm.weight']
    optimizer_grouped_parameters = [
        {'params': [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
         'weight_decay': 0.01},
        {'params': [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
    ]

    if args.fp16:
        try:
            from apex.fp16_utils import FP16_Optimizer
            from apex.optimizers import FusedAdam
        except ImportError:
            raise ImportError(
                "Please install apex from https://www.github.com/nvidia/apex to use distributed and fp16 training.")

        optimizer = FusedAdam(optimizer_grouped_parameters,
                              lr=args.learning_rate,
                              bias_correction=False)
        # get_linear_schedule_with_warmup only
        # accepts unwrapped optimizer
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps)        
        if args.loss_scale == 0:
            optimizer = FP16_Optimizer(optimizer, dynamic_loss_scale=True)
        else:
            optimizer = FP16_Optimizer(optimizer, static_loss_scale=args.loss_scale)
    else:
        optimizer = AdamW(optimizer_grouped_parameters, lr=args.learning_rate, eps=args.adam_epsilon)
        scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=args.warmup_steps, num_training_steps=num_train_optimization_steps)

    
    global_step = 0
    logging.info("***** Running training *****")
    logging.info(f"  Num examples = {total_train_examples}")
    logging.info("  Batch size = %d", args.train_batch_size)
    logging.info("  Num steps = %d", num_train_optimization_steps)
    model.train()
    for epoch in range(args.epochs):
        epoch_dataset = PregeneratedDataset(epoch=epoch, training_path=args.pregenerated_data, tokenizer=tokenizer,
                                            num_data_epochs=num_data_epochs, reduce_memory=args.reduce_memory)
        if args.local_rank == -1:
            train_sampler = RandomSampler(epoch_dataset)
        else:
            train_sampler = DistributedSampler(epoch_dataset)
        train_dataloader = DataLoader(epoch_dataset, sampler=train_sampler, batch_size=args.train_batch_size)
        tr_loss = 0
        nb_tr_examples, nb_tr_steps = 0, 0
        with tqdm(total=len(train_dataloader), desc=f"Epoch {epoch}") as pbar: # tqdm: progress bar
            for step, batch in enumerate(train_dataloader):
                batch = tuple(t.to(device) for t in batch)
                input_ids, input_mask, segment_ids, lm_label_ids, is_next = batch
                outputs = model(input_ids, masked_lm_labels=lm_label_ids, next_sentence_label=is_next)
                loss = outputs[0]
                if n_gpu > 1:
                    loss = loss.mean() # mean() to average on multi-gpu.
                if args.gradient_accumulation_steps > 1:
                    loss = loss / args.gradient_accumulation_steps
                if args.fp16:
                    optimizer.backward(loss)
                else:
                    loss.backward()
                tr_loss += loss.item()
                nb_tr_examples += input_ids.size(0)
                nb_tr_steps += 1
                pbar.update(1)
                mean_loss = tr_loss * args.gradient_accumulation_steps / nb_tr_steps
                pbar.set_postfix_str(f"Loss: {mean_loss:.5f}")
                if (step + 1) % args.gradient_accumulation_steps == 0:
                    optimizer.step()
                    scheduler.step()  # Update learning rate schedule
                    optimizer.zero_grad()
                    global_step += 1

    # Save a trained model
    if  n_gpu > 1 and torch.distributed.get_rank() == 0  or n_gpu <=1 :
        logging.info("** ** * Saving fine-tuned model ** ** * ")
        model.save_pretrained(args.output_dir)
        tokenizer.save_pretrained(args.output_dir)
Ejemplo n.º 50
0
def run(sysdict):
    start_time = time.time()
    answer = Verify(sysdict, vtaloc)
    end_time = time.time()
    total_time = end_time - start_time
    return (bound, answer, total_time)


if step == 1:
    #### 1] Increase activation count for each node ####
    print("Running max instantiations from " + str(LB1) + " to " + str(UB1))

    # (BOUND, ANSWER, TIME)
    results1 = []

    for bound in range(LB1, UB1+1):
        print("Max insantiations: " + str(bound))
        sysdict = LoadSystem(instanceName)
        UE = sysdict['UserEquipments']
        for ue in UE:
            ue.maxInst = bound

        sysdict['Executors'] = bound*len(UE)
        sysdict['QueueLength'] = bound*len(UE)

        results1.append(run(sysdict))
    results.append(results1)

if step == 2:

    ##### 2] Increase number of user equipment
Ejemplo n.º 51
0
n = input("n=")
s1 = 0
for i in range(n + 1):
    s1 += i
s1 = s1**2
s2 = 0
for i in range(n + 1):
    s2 += i**2
print(s1)
print(s2)
print(s1 - s2)
Ejemplo n.º 52
0
 def start_new_nodes(self, processors, peering, schedulers,
                     start_nodes_per_round, round_, poet_kwargs):
     start = round_ * start_nodes_per_round
     for num in range(start, start + start_nodes_per_round):
         self.start_node(num, processors, peering, schedulers, poet_kwargs)
Ejemplo n.º 53
0
    return (((x2-x1)**2 + (y2-y1)**2)**0.5)

# Hamilton orrelation function estimator:


def W_ham(N,DD,DR,RR):
    return (N*(( DD* RR) / (DR)**2) ) -1



'''
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
Begin main loop
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
'''
for i in range(0,loops):
    # Swift telescope values from Dai et al. (2015)
    a = 1.34
    b = 2.37 # +/- 0.01
    f_b = 3.67 * 10 ** (-15) # erg  cm^-2 s^-1
    k = 531.91*10**14 # +/- 250.04; (deg^-2 (erg cm^-2 s^-1)^-1)
    s_ref = 10**-14 # erg cm^-2 s^-1

    '''
    def f3(x):
        return (1/(-a+1))*(1/s_ref)**(-a)*k*(x**(-a+1))

    def f4(x):
        return (1/s_ref)**(-b)*k*(f_b/s_ref)**(b-a)*(1/(-b+1))*(x**(-b+1))
    '''
Ejemplo n.º 54
0
 def windowControllerDidLoadNib_(self, controller):
     self._resultStores = [[] for _ in range(eventformatter.kLanguageCount)]
     self.currentStyle = _userDefaults.integerForKey_(
         u'defaultOutputLanguage')
Ejemplo n.º 55
0
def get_helmert_test():
    dirs = "../data/csv_Helmert_30_non_mat/"
    if not os.path.exists(dirs):
        os.makedirs(dirs)

    y_list = [
        "03", "04", "05", "06", "07", "08", "09", "10", "13", "14", "15", "16"
    ]
    month_list = [
        "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"
    ]

    for year in y_list:
        for month in month_list:
            print(year + month)
            gw_file_list = sorted(
                glob.glob("../data/csv_w/ecm" + year + month + "*.csv"))
            iw_file_list = sorted(
                glob.glob("../data/csv_iw/" + year + month + "*.csv"))
            ic0_file_list = sorted(
                glob.glob("../data/csv_ic0/IC0_20" + year + month + "*.csv"))
            sit_file_list = sorted(
                glob.glob("../data/csv_sit/SIT_20" + year + month + "*.csv"))
            gw_list = []
            iw_list = []
            ic0_list = []
            sit_list = []

            L_gw = len(gw_file_list)
            L_iw = len(iw_file_list)
            if L_gw != L_iw:
                print("L_gw != L_iw")
                continue
            grid_data = pd.read_csv(grid900to145_file_name, header=None)
            grid145 = np.array(grid_data, dtype='int64').ravel()
            for gw_fname in gw_file_list:
                df_wind = pd.read_csv(gw_fname, header=None)
                wind = np.array(df_wind, dtype='float32')
                gw_list.append(wind[:, [0, 1]])
            for iw_fname in iw_file_list:
                df_ice_wind = pd.read_csv(iw_fname, header=None)
                df_ice_wind[df_ice_wind == 999.] = np.nan
                ice_wind = np.array(df_ice_wind, dtype='float32') / 100
                iw_list.append(ice_wind[:, [0, 1]])
            """
			for ic0_fname in ic0_file_list:
				ic0_data = pd.read_csv(ic0_fname, header=None)
				ic0 = np.array(ic0_data, dtype='float32')
				ic0_145 = ic0[grid145]
				ic0_list.append(ic0_145)
			for sit_fname in sit_file_list:
				sit_data = pd.read_csv(sit_file_name, header=None)
				sit = np.array(sit_data, dtype='float32')
				sit[sit>=10001] = np.nan
				sit_145 = sit[grid145]
				sit_list.append(sit_145)
			"""

            gw_array = np.array(gw_list)
            iw_array = np.array(iw_list)
            gw_array = np.where(np.isnan(iw_array), np.nan, gw_array)

            gw_ave = np.nanmean(gw_array, axis=0)
            iw_ave = np.nanmean(iw_array, axis=0)

            gw_minus_ave = gw_array - np.tile(gw_ave, (L_gw, 1, 1))
            iw_minus_ave = iw_array - np.tile(iw_ave, (L_iw, 1, 1))

            param_list = []
            for j in range(145**2):
                #print(j)
                N_c = np.sum(~np.isnan(iw_minus_ave[:, j, 0]))
                if N_c <= 20:
                    param_list.append(
                        [np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, N_c])
                    continue
                gw_u = gw_minus_ave[:, j, 0]
                gw_v = gw_minus_ave[:, j, 1]
                iw_u = iw_minus_ave[:, j, 0]
                iw_v = iw_minus_ave[:, j, 1]
                b_1 = (np.nansum(gw_u * iw_u) + np.nansum(gw_v * iw_v)) / (
                    np.nansum(gw_u**2) + np.nansum(gw_v**2))
                b_2 = (np.nansum(gw_u * iw_v) - np.nansum(gw_v * iw_u)) / (
                    np.nansum(gw_u**2) + np.nansum(gw_v**2))
                a_1 = iw_ave[j, 0] - b_1 * gw_ave[j, 0] + b_2 * gw_ave[j, 1]
                a_2 = iw_ave[j, 1] - b_1 * gw_ave[j, 1] - b_2 * gw_ave[j, 0]
                R_denominator = np.nansum(iw_u**2 + iw_v**2)
                R_numerator = np.nansum((iw_array[:,j,0] - (a_1 + b_1*gw_array[:,j,0] - b_2*gw_array[:,j,1]))**2) + \
                 np.nansum((iw_array[:,j,1] - (a_2 + b_2*gw_array[:,j,0] + b_1*gw_array[:,j,1]))**2)
                R2 = 1 - R_numerator / R_denominator
                A = np.sqrt(b_1**2 + b_2**2)
                theta = np.arctan2(b_2, b_1) * 180 / np.pi
                #print(a_1, a_2, b_1, b_2)
                #print("A: {}\ntheta: {}\na_1: {}\na_2: {}\nR2: {}\ne2: {}\nN_c: {}".format(A, theta, a_1, a_2, R2, R_numerator, N_c))
                param_list.append([A, theta, a_1, a_2, R2, R_numerator, N_c])
            param_array = np.array(param_list)

            data_array = np.hstack((param_array, iw_ave, gw_ave))
            data = pd.DataFrame(data_array)
            data.columns = [
                "A", "theta", "ocean_u", "ocean_v", "R2", "epsilon2", "N_c",
                "mean_iw_u", "mean_iw_v", "mean_w_u", "mean_w_v"
            ]
            save_name = dirs + "Helmert_30_non_mat_20" + year + month + ".csv"
            print(save_name)
            data.to_csv(save_name, index=False)
Ejemplo n.º 56
0
for value in range(1, 6):
    print(value)

numbers = list(range(0, 7, 3))
print(numbers)
Ejemplo n.º 57
0
def map_corr_slide_month(slide):
    dirs = "../result_h/corr_map/corr_slide_month/"
    if not os.path.exists(dirs):
        os.makedirs(dirs)

    y_list = [
        "03", "04", "05", "06", "07", "08", "09", "10", "13", "14", "15", "16"
    ]
    month_list = [
        "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"
    ]
    product_ym = list(itertools.product(y_list, month_list))

    data_dict = {}
    for month in month_list:
        data_dict[month] = {}
        for year in y_list:
            data_dict[month][year] = {}
            data_dict[month][year]["ym_A"] = np.array([np.nan] * (145**2))
            data_dict[month][year]["ym_ic0"] = np.array([np.nan] * (145**2))
            data_dict[month][year]["data_A"] = np.array([np.nan] * (145**2))
            data_dict[month][year]["data_ic0"] = np.array([np.nan] * (145**2))

    for k, item in enumerate(product_ym):
        if k + slide > len(product_ym) - 1:
            break
        x_ym = product_ym[k][0] + product_ym[k][1]
        y_ym = product_ym[k + slide][0] + product_ym[k + slide][1]
        print(x_ym, y_ym)
        if (k <= 95) & (k + slide > 95):
            print("\tcontinue: {}".format((x_ym, y_ym)))
            data_dict[product_ym[k][1]][product_ym[k][0]]["ym_A"] = x_ym
            data_dict[product_ym[k][1]][product_ym[k][0]]["ym_ic0"] = y_ym
            data_dict[product_ym[k][1]][product_ym[k][0]]["data_A"] = np.array(
                [np.nan] * (145**2))
            data_dict[product_ym[k][1]][
                product_ym[k][0]]["data_ic0"] = np.array([np.nan] * (145**2))
            continue
        else:
            helmert_axis_x_file = "../data/csv_Helmert_both_30/Helmert_both_30_20" + x_ym + ".csv"
            helmert_axis_y_file = "../data/csv_Helmert_both_30/Helmert_both_30_20" + y_ym + ".csv"

            data_A = pd.read_csv(helmert_axis_x_file)["A"]
            data_ic0 = pd.read_csv(helmert_axis_y_file)["ic0_30"]

            data_dict[product_ym[k][1]][product_ym[k][0]]["ym_A"] = x_ym
            data_dict[product_ym[k][1]][product_ym[k][0]]["ym_ic0"] = y_ym
            data_dict[product_ym[k][1]][product_ym[k][0]]["data_A"] = np.array(
                data_A)
            data_dict[product_ym[k][1]][product_ym[k]
                                        [0]]["data_ic0"] = np.array(data_ic0)

    for month in month_list:
        accumulate_data_A = []
        accumulate_data_ic0 = []
        for year in y_list:
            #print(month, year)
            accumulate_data_A.append(data_dict[month][year]["data_A"])
            accumulate_data_ic0.append(data_dict[month][year]["data_ic0"])
        accumulate_data_A = np.array(accumulate_data_A)
        accumulate_data_ic0 = np.array(accumulate_data_ic0)

        corr_list = []
        for i in range(145**2):
            plot_data_A = accumulate_data_A[:, i]
            #data_A = data_A[~np.isnan(data_A)]
            plot_data_ic0 = accumulate_data_ic0[:, i]
            tmp_df = pd.DataFrame({
                "data_A": plot_data_A,
                "data_ic0": plot_data_ic0
            })
            if len(tmp_df.dropna()) <= 5:
                corr = np.nan
            else:
                corr = tmp_df.dropna().corr()
                corr = np.array(corr)[0, 1]
            corr_list.append(corr)
        corr_array = np.array(corr_list)

        save_name = dirs + "ic0_A_start_month_" + month + "_slide_" + str(
            slide) + ".png"
        print("\t{}".format(save_name))
        visualize.plot_map_once(corr_array,
                                data_type="type_non_wind",
                                show=False,
                                save_name=save_name,
                                vmax=1,
                                vmin=-1,
                                cmap=plt.cm.jet)
Ejemplo n.º 58
0
def search_corr_map_30():
    dirs = "../result_h/corr_map_search_grid/"
    if not os.path.exists(dirs):
        os.makedirs(dirs)

    data_ex_dir = "../data/csv_Helmert_ex/Helmert_ex_200301.csv"
    data_ex = pd.read_csv(data_ex_dir)

    y_list = [
        "03", "04", "05", "06", "07", "08", "09", "10", "13", "14", "15", "16"
    ]
    month_list = [
        "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"
    ]

    #corr_all = []
    for month in month_list:
        file_list = sorted(
            glob.glob("../data/csv_Helmert_both_30/Helmert_both_30_*" + month +
                      ".csv"))
        accumulate_data = []
        for file in file_list:
            data = pd.read_csv(file)
            data = data.loc[:, [
                "A", "theta", "R2", "epsilon2", "ic0_30", "sit_30"
            ]]
            print(data.columns)
            print(data.dropna().head(2))
            print(np.array(data.dropna())[0:2, :])
            accumulate_data.append(np.array(data))
        accumulate_data = np.array(accumulate_data)
        #data_A_ic0 = accumulate_data[:, :, [0,4]]

        corr_list = []
        for i in range(145**2):
            data_A = accumulate_data[:, i, 0]
            #data_A = data_A[~np.isnan(data_A)]
            data_ic0 = accumulate_data[:, i, 4]
            tmp_df = pd.DataFrame({"data_A": data_A, "data_ic0": data_ic0})
            if len(tmp_df.dropna()) <= 5:
                corr = np.nan
            else:
                corr = tmp_df.dropna().corr()
                corr = np.array(corr)[0, 1]
                #print(i, corr)
            corr_list.append(corr)
        #corr_all.append(corr_list)
        df_corr = pd.DataFrame({"corr": corr_list})
        df_corr = pd.concat([
            latlon_ex, df_corr,
            data_ex.loc[:,
                        ["coastal_region_1", "coastal_region_2", "area_label"]]
        ],
                            axis=1)
        corr_grid_pos = df_corr.loc[(df_corr["corr"] >= 0.7) & (
            df_corr["area_label"].isin([0, 1, 4, 5, 7, 8, 10, 12, 16])
        ), :].dropna().index
        corr_grid_neg = df_corr.loc[(df_corr["corr"] <= -0.7) & (
            df_corr["area_label"].isin([0, 1, 4, 5, 7, 8, 10, 12, 16])
        ), :].dropna().index
        try:
            plot_grids_pos = random.sample(
                np.array(corr_grid_pos).tolist(), 15)
            plot_grids_neg = random.sample(
                np.array(corr_grid_neg).tolist(), 15)
        except:
            plot_grids_pos = random.sample(np.array(corr_grid_pos).tolist(), 5)
            plot_grids_neg = random.sample(np.array(corr_grid_neg).tolist(), 5)
        for grid in plot_grids_pos:
            plot_A = accumulate_data[:, grid, 0]
            plot_ic0 = accumulate_data[:, grid, 4]
            sns.set_style("darkgrid")
            sns.jointplot(x=plot_ic0, y=plot_A, kind="reg")
            save_name = dirs + "ic0_A_pos_grid_" + str(grid) + ".png"
            plt.savefig(save_name)
            plt.close()
        for grid in plot_grids_neg:
            plot_A = accumulate_data[:, grid, 0]
            plot_ic0 = accumulate_data[:, grid, 4]
            sns.set_style("darkgrid")
            sns.jointplot(x=plot_ic0, y=plot_A, kind="reg")
            save_name = dirs + "ic0_A_neg_grid_" + str(grid) + ".png"
            plt.savefig(save_name)
            plt.close()

        m = Basemap(lon_0=180,
                    boundinglat=65,
                    resolution='l',
                    projection='npstere')
        m.drawcoastlines(color='0.15')
        m.fillcontinents(color='#555555')
        lon = np.array(latlon_ex.Lon)
        lat = np.array(latlon_ex.Lat)
        x, y = m(lon, lat)
        m.scatter(x[plot_grids_pos],
                  y[plot_grids_pos],
                  marker='o',
                  color="r",
                  s=2,
                  alpha=0.9)
        m.scatter(x[plot_grids_neg],
                  y[plot_grids_neg],
                  marker='o',
                  color="b",
                  s=2,
                  alpha=0.9)
        for grid in plot_grids_pos:
            plt.annotate(str(grid),
                         xy=(x[grid], y[grid]),
                         xycoords='data',
                         xytext=(x[grid], y[grid]),
                         textcoords='data',
                         color='r')
        for grid in plot_grids_neg:
            plt.annotate(str(grid),
                         xy=(x[grid], y[grid]),
                         xycoords='data',
                         xytext=(x[grid], y[grid]),
                         textcoords='data',
                         color='b')
        plt.savefig(dirs + "ic0_A_grid_info_" + month + ".png", dpi=300)
        plt.close()
Ejemplo n.º 59
0
def ocean_30_vs_90_with_colorbar():
    dirs = "../result_h/mean_vector/ocean_currents_30_vs_90_with_colorbar/"
    if not os.path.exists(dirs):
        os.makedirs(dirs)

    start_list.pop()
    for i, start in enumerate(start_list):
        print("******************  {}/{}  *******************".format(
            i + 1, M - 1))
        helmert_30_30_fname = "../data/csv_Helmert_30/Helmert_30_" + str(
            start)[:6] + ".csv"
        data_30 = pd.read_csv(helmert_30_30_fname)
        data_30_vec = [
            np.array(data_30["ocean_u"]),
            np.array(data_30["ocean_v"])
        ]
        helmert_90_90_fname = "../data/csv_Helmert_both_90/Helmert_both_90_" + str(
            start)[:6] + ".csv"
        data_90 = pd.read_csv(helmert_90_90_fname)
        data_90_vec = [
            np.array(data_90["ocean_u_90"]),
            np.array(data_90["ocean_v_90"])
        ]

        vector_list = [data_30_vec, data_90_vec]
        name_list = ["_ocean_30", "_ocean_90"]
        for j in range(2):
            m = Basemap(lon_0=180,
                        boundinglat=50,
                        resolution='l',
                        projection='npstere')
            m.drawcoastlines(color='0.15')
            m.fillcontinents(color='#555555')
            lon = np.array(latlon_ex.Lon)
            lat = np.array(latlon_ex.Lat)
            x, y = m(lon, lat)
            x1 = np.reshape(x, (145, 145), order='F')
            y1 = np.reshape(y, (145, 145), order='F')
            dx1 = (x1[1, 0] - x1[0, 0]) / 2
            dy1 = (y1[0, 1] - y1[0, 0]) / 2

            x2 = np.linspace(x1[0, 0], x1[144, 0], 145)
            y2 = np.linspace(y1[0, 0], y1[0, 144], 145)
            xx, yy = np.meshgrid(x2, y2)
            xx, yy = xx.T, yy.T

            vector_u = np.ma.masked_invalid(vector_list[j][0])
            vector_v = np.ma.masked_invalid(vector_list[j][1])
            vector_speed = np.sqrt(vector_u * vector_u + vector_v * vector_v)

            data_non_wind = vector_speed
            data_non_wind = np.ma.masked_invalid(data_non_wind)
            data1 = np.reshape(data_non_wind, (145, 145), order='F')

            xx = np.hstack([xx, xx[:, 0].reshape(145, 1)])
            xx_ex = np.vstack([xx, (xx[144, :] + (xx[1, 0] - xx[0, 0]))])
            yy = np.vstack([yy, yy[0, :]])
            yy_ex = np.hstack([
                (yy[:, 0].reshape(146, 1) + (yy[0, 0] - yy[0, 1])), yy
            ])

            m.pcolormesh(xx_ex - dx1,
                         yy_ex + dy1,
                         data1,
                         cmap=plt.cm.jet,
                         vmax=0.2,
                         vmin=0)
            m.colorbar(location='bottom')
            m.quiver(x, y, vector_u, vector_v, color="k")
            save_name = dirs + str(start)[:6] + name_list[j] + ".png"
            print(save_name)
            plt.savefig(save_name, dpi=450)
            plt.close()
Ejemplo n.º 60
0
def ts_30_by_year():
    dirs = "../result_h/ts_30_by_year/"
    if not os.path.exists(dirs):
        os.makedirs(dirs)

    y_list = [
        "03", "04", "05", "06", "07", "08", "09", "10", "13", "14", "15", "16"
    ]
    month_list = [
        "01", "02", "03", "04", "05", "06", "07", "08", "09", "10", "11", "12"
    ]

    corr_all = []
    for month in month_list:
        print("*************** " + month + " ***************")
        data_A_year, data_theta_year, data_R2_year, data_e2_year = [], [], [], []
        for year in y_list:
            file_list = "../data/csv_Helmert_both_30/Helmert_both_30_20" + year + month + ".csv"
            df = pd.read_csv(file_list)
            data = df.groupby("area_label")[["A", "theta", "R2",
                                             "epsilon2"]].describe()

            print((data.loc[:, ("A", "std")] < 0).sum())
            for item in ["A", "theta", "R2", "epsilon2"]:
                data.loc[:, (item, "1sigma_pos")] = data.loc[:, (
                    item, "mean")] + data.loc[:, (item, "std")]
                data.loc[:, (item, "1sigma_neg")] = data.loc[:, (
                    item, "mean")] - data.loc[:, (item, "std")]
                data.loc[:, (item, "2sigma_pos")] = data.loc[:, (
                    item, "mean")] + 2 * data.loc[:, (item, "std")]
                data.loc[:, (item, "2sigma_neg")] = data.loc[:, (
                    item, "mean")] - 2 * data.loc[:, (item, "std")]

            data_A = data.loc[:, (
                "A", ["mean", "1sigma_pos", "1sigma_neg", "count"])].values
            data_theta = data.loc[:, (
                "theta", ["mean", "1sigma_pos", "1sigma_neg", "count"])].values
            data_R2 = data.loc[:, (
                "R2", ["mean", "1sigma_pos", "1sigma_neg", "count"])].values
            data_e2 = data.loc[:, (
                "epsilon2",
                ["mean", "1sigma_pos", "1sigma_neg", "count"])].values
            """
			if year == "03":
				tmp = data.loc[:, ("A", ["mean", "1sigma_pos", "1sigma_neg", "count"])].dropna()
				print(tmp.head())
				print(np.array(tmp)[:3,:])
			"""
            data_A_year.append(data_A)
            data_theta_year.append(data_theta)
            data_R2_year.append(data_R2)
            data_e2_year.append(data_e2)

        data_A_year = np.array(data_A_year)
        data_theta_year = np.array(data_theta_year)
        data_R2_year = np.array(data_R2_year)
        data_e2_year = np.array(data_e2_year)

        dates1 = pd.date_range("2003", "2011", freq='YS')[:-1]
        dates2 = pd.date_range("2013", "2017", freq='YS')[:-1]
        #print(dates1)
        N_dates1 = len(dates1)

        #print(data_A_year[0,np.array(tmp.index),:])
        for i in range(19):
            print("\tarea: {}".format(i))
            plt.figure(figsize=(6, 4))
            gs = gridspec.GridSpec(3, 2)
            #gs.tight_layout(plt.figure(figsize=(6, 4)))

            plt.subplot(gs[0, 0])
            plt.plot(dates1, data_A_year[:N_dates1, i, 1], '-', color="k")
            plt.plot(dates2, data_A_year[N_dates1:, i, 1], '-', color="k")
            plt.fill_between(dates1,
                             data_A_year[:N_dates1, i, 2],
                             data_A_year[:N_dates1, i, 3],
                             facecolor='green',
                             alpha=0.3)
            plt.fill_between(dates2,
                             data_A_year[N_dates1:, i, 2],
                             data_A_year[N_dates1:, i, 3],
                             facecolor='green',
                             alpha=0.3)
            plt.ylim([0, 0.025])
            plt.ylabel('A')
            plt.subplot(gs[0, 0]).get_xaxis().set_major_formatter(
                mdates.DateFormatter('%y'))

            plt.subplot(gs[1, 0])
            plt.plot(dates1, data_theta_year[:N_dates1, i, 1], '-', color="k")
            plt.plot(dates2, data_theta_year[N_dates1:, i, 1], '-', color="k")
            plt.fill_between(dates1,
                             data_theta_year[:N_dates1, i, 2],
                             data_theta_year[:N_dates1, i, 3],
                             facecolor='lightskyblue',
                             alpha=0.3)
            plt.fill_between(dates2,
                             data_theta_year[N_dates1:, i, 2],
                             data_theta_year[N_dates1:, i, 3],
                             facecolor='lightskyblue',
                             alpha=0.3)
            plt.ylim([-60, 60])
            plt.yticks([-60, -40, -20, 0, 20, 40, 60])
            plt.ylabel(r'$\theta$')
            plt.subplot(gs[1, 0]).get_xaxis().set_major_formatter(
                mdates.DateFormatter('%y'))

            plt.subplot(gs[0, 1])
            plt.plot(dates1, data_R2_year[:N_dates1, i, 1], '-', color="k")
            plt.plot(dates2, data_R2_year[N_dates1:, i, 1], '-', color="k")
            plt.fill_between(dates1,
                             data_R2_year[:N_dates1, i, 2],
                             data_R2_year[:N_dates1, i, 3],
                             facecolor='coral',
                             alpha=0.3)
            plt.fill_between(dates2,
                             data_R2_year[N_dates1:, i, 2],
                             data_R2_year[N_dates1:, i, 3],
                             facecolor='coral',
                             alpha=0.3)
            plt.ylim([0, 1])
            plt.yticks([0, .2, .4, .6, .8, 1])
            plt.ylabel(r'$R^{2}$')
            plt.subplot(gs[0, 1]).get_xaxis().set_major_formatter(
                mdates.DateFormatter('%y'))

            plt.subplot(gs[1, 1])
            plt.plot(dates1, data_e2_year[:N_dates1, i, 1], '-', color="k")
            plt.plot(dates2, data_e2_year[N_dates1:, i, 1], '-', color="k")
            plt.fill_between(dates1,
                             data_e2_year[:N_dates1, i, 2],
                             data_e2_year[:N_dates1, i, 3],
                             facecolor='silver',
                             alpha=0.3)
            plt.fill_between(dates2,
                             data_e2_year[N_dates1:, i, 2],
                             data_e2_year[N_dates1:, i, 3],
                             facecolor='silver',
                             alpha=0.3)
            plt.ylim([0, 1.5])
            plt.yticks([0, .5, 1, 1.5])
            plt.ylabel(r'$e^{2}$')
            plt.subplot(gs[1, 1]).get_xaxis().set_major_formatter(
                mdates.DateFormatter('%y'))

            plt.subplot(gs[2, :])
            y1 = data_A_year[:N_dates1, i, 0]
            y2 = data_A_year[N_dates1:, i, 0]
            plt.plot(dates1, y1, '-', color="k")
            plt.plot(dates2, y2, '-', color="k")
            y_lim_min = max(y1.min() - 5, 0)
            y_lim_max = y1.max() + 5
            plt.ylim([y_lim_min, y_lim_max])
            #print(int(y_lim_max-y_lim_min+1))
            #plt.yticks(y_lim_min, y_lim_max, int(y_lim_max-y_lim_min+1))
            plt.ylabel("number of data")
            plt.subplot(gs[2, :]).get_xaxis().set_major_formatter(
                mdates.DateFormatter('%y'))
            plt.grid(True)

            try:
                plt.tight_layout()
            except:
                print("tight layout passed...")

            save_name = dirs + "all_area_" + str(i) + "_" + month + ".png"
            try:
                plt.savefig(save_name, dpi=300)
            except:
                print("save passed...")
            plt.close()