Example #1
0
	def test_read_weighted(self):
		"""
		Test to verify if weights are being properly added to the graph.
		Copy of test1.elist with floating point weights between -125000 and 125000.
		"""
		fname = path.join(path.dirname(__file__),'test2_w.memlist')
		G = memlist.read(fname,directed=True,ignore_duplicate_edges=True,weighted=True)

		self.assertEqual(len(G),4)
		self.assertEqual(G.size(),4)

		self.assertEqual(G.weight_(G.edge_idx_(0,1)), 28299.769933)
Example #2
0
	def test_read_directed(self):
		"""
		Test to make sure the last character of the last line is read when a file does not
		end with a \n character.
		"""
		fname = path.join(path.dirname(__file__),'test2.memlist')
		G = memlist.read(fname,directed=True)
		
		self.assertEqual(G.size(),4)
		
		# Last line was read correctly. Don't know how to test for existence of nodes
		# since G.node_object will fail due to lack of node objects.
		self.assertTrue(G.has_edge_(2,3)) # Will raise Exception otherwise.
Example #3
0
    def test_read_directed(self):
        """
		Test to make sure the last character of the last line is read when a file does not
		end with a \n character.
		"""
        fname = path.join(path.dirname(__file__), 'test2.memlist')
        G = memlist.read(fname, directed=True)

        self.assertEqual(G.size(), 4)

        # Last line was read correctly. Don't know how to test for existence of nodes
        # since G.node_object will fail due to lack of node objects.
        self.assertTrue(G.has_edge_(2, 3))  # Will raise Exception otherwise.
Example #4
0
    def test_read_weighted(self):
        """
        Test to verify if weights are being properly added to the graph.
        Copy of test1.elist with floating point weights between -125000 and 125000.
        """
        fname = path.join(path.dirname(__file__), 'test2_w.memlist')
        G = memlist.read(fname, directed=True,
                         ignore_duplicate_edges=True, weighted=True)

        self.assertEqual(len(G), 4)
        self.assertEqual(G.size(), 4)

        self.assertEqual(G.weight_(G.edge_idx_(0, 1)), 28299.769933)
Example #5
0
	def test_write_undirected(self):
		
		G = zen.Graph()
		G.add_nodes(5)
		G.add_edge_(0,1)
		G.add_edge_(1,2)
		G.add_edge_(3,4)
		
		fd,fname = tempfile.mkstemp()
		os.close(fd)
		memlist.write(G,fname)
		
		G2 = memlist.read(fname,directed=False)
		
		self.assertEqual(len(G2),len(G))
		self.assertEqual(G2.size(),G.size())
Example #6
0
    def test_write_undirected(self):

        G = zen.Graph()
        G.add_nodes(5)
        G.add_edge_(0, 1)
        G.add_edge_(1, 2)
        G.add_edge_(3, 4)

        fd, fname = tempfile.mkstemp()
        os.close(fd)
        memlist.write(G, fname)

        G2 = memlist.read(fname, directed=False)

        self.assertEqual(len(G2), len(G))
        self.assertEqual(G2.size(), G.size())
Example #7
0
def profile_graph(type):
    global max_size,increment
    print 'Profiling ' + type + " graphs!"
    file = open('csv/' + type + '_graphs_profile.csv', 'w')
    file.write("Nodes FileSize LoadTime VM RAM SP NCC LCC GCC MST\n")
    profile.start_clock()
    for i in range(increment,max_size+1,increment):
        #We want to profile the time taken to load each graph into memory for each category. 
        #We use manual garbage collection to make sure we are only keeping the minimum number of 
        #objects within memory
        gc.collect()
        
        #Load the graph from memory
        filename = type + str(i) + ".graph"
        filesize = profile.filesize("storage/"+ type + "/" + filename)/1024
        
        #The operating system will kill the profiling process if there is not enough ram to fit the VM
        #requirements to store the graph
        if not_enough_RAM("storage/"+ type + "/" + filename,ram_zen_python):
            print 'Graph is too big to be loaded in virtual memory, continuing to next graph...'
            file.write(str(i) + " " + str(filesize) + " 0 0 0 0 0 0 0 0\n")
            continue
        profile.start_clock()
        G = memlist.read("storage/" + type + "/" + filename)
        
        difftime = profile.get_time_from_clock()
        loadtime = str(difftime.seconds) + "." + str(difftime.microseconds/1000)
        vm_graph = round(profile.memory()/1024)
        ram_graph = round(profile.resident()/1024)
        #Using pickle measures the byte size of the
        
        print "Graph " + filename + " has taken " + loadtime + " to load. The graph is using " + str(vm_graph) + "kB of VM and " + str(ram_graph) + "kB of RAM"
        
        #Creating a list of lists
        sample = 20
        list = [0] * sample
        
        #Execute a few shortest paths and take the maximum value as a reference.
        for j in range(sample):
            index = random.randint(0,i)
            #source = G.node_object(index)
            #zen.algorithms.shortest_path.single_source_shortest_path(G, index)
            #zen.algorithms.shortest_path.dijkstra_path(G,index)
            list[j] = profile.get_time_from_clock()
        difftime = max(list)
        shortestpathtime = str(difftime.seconds) + "." + str(difftime.microseconds/1000)
        
        #Execute a few clustering computations and take the maximum value as a reference.
        #zen.algorithms.clustering.ncc(G)
        difftime = profile.get_time_from_clock()
        ncctime = str(difftime.seconds) + "." + str(difftime.microseconds/1000)
        
        #zen.algorithms.clustering.lcc(G)
        difftime = profile.get_time_from_clock()
        lcctime = str(difftime.seconds) + "." + str(difftime.microseconds/1000)
        
        #zen.algorithms.clustering.gcc(G)
        difftime = profile.get_time_from_clock()
        gcctime = str(difftime.seconds) + "." + str(difftime.microseconds/1000)
        
        #zen.algorithms.spanning.minimum_spanning_tree(G)
        difftime = profile.get_time_from_clock()    
        msttime = str(difftime.seconds) + "." + str(difftime.microseconds/1000)
        
        print "Time for queries : SP=" + shortestpathtime + "seconds, NCC=" + ncctime + "seconds, LCC=" + lcctime + "seconds, GCC=" + gcctime + "seconds, MST=" + msttime
        file.write(str(i) + " " + str(filesize) + " " + loadtime + " " + str(vm_graph) + " " + str(ram_graph) + " " + shortestpathtime + " " + ncctime + " " + lcctime + " " + gcctime + " " + msttime + "\n")