def reduce(self, uid, values):
        values = [eval(text_dict) for text_dict in values]
        c = Cluster()
        c.uid = uid
        c_total = len(values)
        sqdist = 0.0

        # set cluster center to sum of members
        for doc in values:
            for tokenid in doc:
                if c.tfidf.has_key(tokenid):
                    c.tfidf[tokenid] += doc[tokenid]
                else:
                    c.tfidf[tokenid] = doc[tokenid]


        # set cluster center, currently the sum, to the mean
        for tokenid in c.tfidf:
            c.tfidf[tokenid] = c.tfidf[tokenid] / float(c_total)

        # set sqdist to the squared sum of deviations from mean
        for doc in values:
            sqdist += MathUtil.compute_distance(c.tfidf, doc, squared=True)

        # Output the cluster center into file: clusteri
        self.emit("cluster" + str(c.uid), str(c))
        # Output the within distance into file: distancei
        self.emit("distance" + str(c.uid), str(c.uid) + "|" + str(sqdist))
    def reduce(self, uid, values):
        c = Cluster()
        c.uid = uid
        sqdist = 0.0
        # TODO: update the cluster center 
        instances = []
        for value in values:
            doc = Document(value)
            instances.append(doc)
            for token in doc.tfidf.keys():
                bool = token in c.tfidf.keys()
                if bool:
                    c.tfidf[token] += doc.tfidf[token]
                else:
                    c.tfidf[token] = doc.tfidf[token]
                
        size = float(len(values))
        for token in c.tfidf.keys():
            c.tfidf[token] = c.tfidf[token]/size
        #compute the distance    
        for instance in instances:
            sqdist += MathUtil.compute_distance(map1 = c.tfidf, map2 = instance.tfidf) 

        # Output the cluster center into file: clusteri
        self.emit("cluster" + str(c.uid), str(c))
        # Output the within distance into file: distancei
        self.emit("distance" + str(c.uid), str(c.uid) + "|" + str(sqdist))
def construct(options, system, ruby_system):
    if (buildEnv['PROTOCOL'] != 'GPU_VIPER' or
        buildEnv['PROTOCOL'] != 'GPU_VIPER_Region' or
        buildEnv['PROTOCOL'] != 'GPU_VIPER_Baseline'):
        panic("This script requires VIPER based protocols \
        to be built.")
    cpu_sequencers = []
    cpuCluster = None
    cpuCluster = Cluster(name="CPU Cluster", extBW = 8, intBW=8) # 16 GB/s
    for i in xrange((options.num_cpus + 1) / 2):

        cp_cntrl = CPCntrl()
        cp_cntrl.create(options, ruby_system, system)

        # Connect the CP controllers to the ruby network
        cp_cntrl.requestFromCore = ruby_system.network.slave
        cp_cntrl.responseFromCore = ruby_system.network.slave
        cp_cntrl.unblockFromCore = ruby_system.network.slave
        cp_cntrl.probeToCore = ruby_system.network.master
        cp_cntrl.responseToCore = ruby_system.network.master

        exec("system.cp_cntrl%d = cp_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])
        cpuCluster.add(cp_cntrl)
    return cpu_sequencers, cpuCluster
Exemple #4
0
    def __init__(self, tf, k, capacity):
        self.ccnt = k           # Total number of clusters. 
        self.capacity = capacity
        self.tf = open(tf)
        self.itemset = set([])  # keep uniqueness. 
        n=0                     # number of occurrences.
        cnt =0                  # number of trans. 
        for line in self.tf:
            items = line.strip().split(' ')
            [self.itemset.add(i) for i in items]
            n += len(items)
            cnt +=1
        n /= cnt
        
        # Create k clusters and 
        # Create list for all items appear in the trans. 
        root = Cluster(0)
        root.leaf = False
        self.clusters = [root]
        self.itemset = list(self.itemset)        
        for i in range(2):
            c = Cluster(i)
            #c.rand_init(self.itemset,int(n) )
            self.clusters.append(c)
            c.pprint()

        #self.clusters[0].add_trans(['a','b','c'])
        #self.clusters[1].add_trans(['x','y','z'])
        self.members = []
        exit
 def testCluster(self):
     cluster_test = Cluster([point])
     self.assertEquals(cluster_test.dimension, 2)
     self.assertFalse(cluster_test.converge)
     np.testing.assert_array_equal(cluster_test.centroid, np.array([2, 2]))
     cluster_test.update_cluster(list_points)
     self.assertEquals(cluster_test.dimension, 2)
     self.assertTrue(cluster_test.converge)
     np.testing.assert_array_equal(cluster_test.centroid, np.array([2, 2]))
def checkDataPoint(dataPoint):
    for cluster in clusterDb.clusters:
        if cluster.checkDataPoint(dataPoint):
            print 'Add point to: ' + cluster.toString()
            cluster.addDataPoint(dataPoint)
            print 'and gives: ' + cluster.toString()
            return

    newCluster = Cluster()
    newCluster.addDataPoint(dataPoint)
    clusterDb.addCluster(newCluster)
    print 'Made new cluser: ' + newCluster.toString()
 def test_cluster(self):
     cluster_test = Cluster([point], 1)
     self.assertEquals(cluster_test.dimension, 2)
     self.assertFalse(cluster_test.converge)
     np.testing.assert_array_equal(cluster_test.mean, np.array([2, 2]))
     np.testing.assert_array_equal(cluster_test.std, np.array([1, 1]))
     self.assertEquals(cluster_test.cluster_probability, 1)
     cluster_test.update_cluster(listPoints, 4)
     self.assertEquals(cluster_test.dimension, 2)
     self.assertTrue(cluster_test.converge)
     np.testing.assert_array_equal(cluster_test.mean, np.array([2, 2]))
     self.assertEquals(cluster_test.std[0] - cluster_test.std[1], 0)
     self.assertEquals(cluster_test.cluster_probability, 1)
Exemple #8
0
def kmeansClustering(cluster_list, k, iterations, shuffle = True):
    """
    Compute the k-means clustering of a set of clusters (reads/kmers)
    Note: the function may not mutate cluster_list
    
    Input: List of clusters, k number of clusters, iterations, 
    select initial clusters: randomly or by size?
    Output: List of clusters.
    """
    kclusters = [] # this list to store k clusters to compare with (non-mutable)
    centroids = [] # this list to store the initial k centroids (average stats vectors)
    
    if shuffle:
        # shuffle cluster list
        random.shuffle(cluster_list) 
    else:
        # sort by size
        cluster_list.sort(key = lambda cluster: cluster.getSize(), reverse = True)

    # k initial clusters to define initial centroids
    for cluster in cluster_list[:k]:
        kclusters.append(cluster.copy())
        centroids.append(cluster.getAvgAbundance())
        
    for iteration in range(iterations):
        clusters = []
        # initialize new empty cluster objects at the centroids
        for idx in range(k):
            cluster = Cluster([])
            cluster.avg_abundance_vectors = list(centroids[idx])
            clusters.append(cluster)
        
        # for every cluster in cluster_list
        for num in range(len(cluster_list)):
            best = (float('inf'), -1)
            # compare distance to every centroid at kclusters
            for idx in range(k):
                temp = cluster_list[num].distance(kclusters[idx])
                if temp < best[0]:
                    best = (temp, idx)
            # merge cluster to best centroid in list of mutable clusters
            clusters[best[1]].mergeClusters(cluster_list[num])
        
        # make a copy of re-computed centroids: kclusters and centroids.
        for idx in range(k):
            kclusters[idx] = clusters[idx].copy()
            centroids[idx] = (clusters[idx].getAvgAbundance())
    
    return kclusters
 def load_cluster(self, path, initial=False):
     """
     Load the cluster centers from hdfs path.
     @param path
     """
     clusters = []
     for line in HDFSUtil.read_lines(path, hadoop_prefix=HADOOP_PREFIX):
         if not initial:
             if line.startswith("cluster"):
                 line = line.split("\t", 1)[1]
             else:
                 continue
         c = Cluster()
         c.read(line)
         clusters.append(c)
     return clusters
 def main(self):
     if self.iteration == 1:
         path = self.kmeans_hdfs_path + "/cluster0/cluster0.txt"
     else:
         path = self.kmeans_hdfs_path + "/output/cluster" + str(self.iteration - 1) + "/part-00000"
     for line in HDFSUtil.read_lines(path, hadoop_prefix=self.hadoop_prefix):
         if self.iteration > 1:
             if line.startswith("cluster"):
                 line = line.split("\t", 1)[1]
             else:
                 continue
         c = Cluster()
         c.read(line)
         self.clusters.append(c)
     data = self.read_input(sys.stdin)
     for line in data:
         self.map(line)
Exemple #11
0
 def printSystemInfo(prefix):
     """Print system information to stdout. Print prefix first."""
     if prefix:
         Utils.Print(str(prefix))
     clientVersion=Cluster.getClientVersion()
     Utils.Print("UTC time: %s" % str(datetime.utcnow()))
     Utils.Print("EOS Client version: %s" % (clientVersion))
     Utils.Print("Processor: %s" % (platform.processor()))
     Utils.Print("OS name: %s" % (platform.platform()))
 def __init__(self, cluster_id, customers, optimal_solution = None, sum_cargo = None, radius = None, center = None, calculate = True):
     self.customers = customers
     self.sum_cargo = sum_cargo
     self.optimal_solution = optimal_solution
     self.cluster_id = cluster_id
     self.radius = radius
     self.center = center
     self.cluster = Cluster(customers)
     if calculate:
         self.custom_solution()
class ClusterSolution():
    def __init__(self, cluster_id, customers, optimal_solution = None, sum_cargo = None, radius = None, center = None, calculate = True):
        self.customers = customers
        self.sum_cargo = sum_cargo
        self.optimal_solution = optimal_solution
        self.cluster_id = cluster_id
        self.radius = radius
        self.center = center
        self.cluster = Cluster(customers)
        if calculate:
            self.custom_solution()

    def custom_solution(self):
        self.optimal_solution = self.cluster.get_solution(self.customers)
        self.sum_cargo = self.cluster.get_cargo()
        self.radius = self.cluster.get_radius()
        self.center = self.cluster.get_center()

    def next_to_visit_ids(self, customer):
        """Given a customer ID, returns the customer ID of the next customer on the optimal solution"""
        index = self.optimal_solution.route.index( filter( lambda c: c.number == customer, self.optimal_solution.route )[0] )
        return self.optimal_solution.route[( index + 1 ) % len( self.customers )].number
Exemple #14
0
def zookeeper(restart, start, stop, force_stop, clean, zk_servers, wait_before_start, wait_before_kill):
  cluster = Cluster()
  if(restart or stop):
    logging.debug("Shutting down Zookeeper")
    cluster.shutdownZookeeper()
  
  if(force_stop):
    logging.debug("Waiting for "+str(wait_before_kill)+" seconds")
    sleep(wait_before_kill)
    logging.debug("Force Killing Zookeeper")
    cluster.killZookeeper()
  
  if(clean):
    logging.debug("Cleaning Zookeeper")
    cluster.cleanZookeeper()
  
  if(restart or start):
    logging.debug("Waiting for "+str(wait_before_start)+" seconds")
    sleep(wait_before_start)
    logging.debug("Starting Zookeeper")
    cluster.startZookeeper()
  
  click.echo(cluster.getReport())
Exemple #15
0
    def getAmenities(self):
        """Extract the required map features from the database
        for each map square and store the data in the 'amenities'
        dictionary.
        An index of the street names is stored in self.streetIndex dictionary.
        
        FIXME:  This is very inefficient - it queries the database
        for the bounding box and each required feature in turn.
        It sould be more efficient to get all of the data in the bounding
        box then query that, but I haven't done it.

        01oct2009 GJ ORIGINAL VERSION
        04oct2009  GJ Added use of expandWhereClause to allow grouping
                      of different key values under the same heading
                      in the features page.
        11oct2009 GJ  Added support for areas (polygons) as well as nodes.
        """
        # Extract the data for each 1km square.
        if (self.debug): print "getAmenities()"
        c0 = self.c0                    # lon, lat
        streetList = {}
        streetIndexSorted = {}
        print c0

        # Get all the features and then create clusters ===
        if is_true(self.preferences_list['clusters']):
            print "........... Trying out something .........."
            tilesize = self.preferences_list['tilesize']
            map_width = self.map_size_x
            map_height = self.preferences_list['map_size_y']

            min_lon = c0.x
            min_lat = c0.y
            max_lon = c0.x + (tilesize * map_width)
            max_lat = c0.y + (tilesize * map_height)

            # Experimental cluster markers
            # tolerance to become a cluster should change according to the zooming factor
            tolerance = 20 * self.map_size_x

            # k-means first iteration
            clusters = {}
            if HAS_MAPNIK2:
                bbox = mapnik.Box2d(min_lon, min_lat, max_lon, max_lat)
            else:
                bbox = mapnik.Envelope(min_lon, min_lat, max_lon, max_lat)

            for featureStr in self.features:
                title,wc = self.expandWhereClause(featureStr)
                print "::::", title, wc
                feature = title
                if self.debug: print "Extracting feature %s using %s." %\
                   (feature,featureStr)            
                pois = self.getBBContents(bbox,wc)

                # poi[6]: lat
                # poi[7]: lon
                print "Current Contents for ", featureStr, pois

                for poi in pois:
                    if not len(self.clusters) > 0:
                        print "POI:", poi
                        c = Cluster(feature)
                        c.add_poi(poi)
                        self.clusters.append(c)
                        print "Created the First cluster: ", c.centroid
                    else:
                        # Look for the custer where the POI should be inserted
                        # by noticing the difference with that cluster's centroid

                        # If the distance to that cluster's centroid is less than
                        # the tolerance it will be inserted there.                    
                        # This should work like a non-optimal first iteration of k-means
                        CLUSTER_FOUND = False
                        for (scounter, cluster) in enumerate(self.clusters):
                            distance = cluster.distance_from_poi(poi)
                            print "This cluster", cluster.pois
                            print distance
                            if distance < tolerance:
                                # Add POI to cluster
                                print "--- POI should get in this cluster"
                                cluster.add_poi(poi)
                                CLUSTER_FOUND = True
                                print "BREAK!!!"
                                break
                        if not CLUSTER_FOUND:
                            print "_____ Could not find a suited cluster _____"
                            # Create a new cluster
                            print "------ Creating a new cluster for this poi"
                            new_cluster = Cluster(feature)
                            new_cluster.add_poi(poi)
                            self.clusters.append(new_cluster)

            for kluster in self.clusters:
                print len(kluster.pois), vars(kluster)

            print "............................................."
            #=========================================================== 

        for tx in range(0,self.map_size_x):
            minx = c0.x + self.preferences_list['tilesize'] * tx
            for ty in range(0,self.preferences_list['map_size_y']):
                # sys.stdout.write("%s" % self.cellLabel(tx,ty))
                # sys.stdout.flush()
                print "%s " % self.cellLabel(tx,ty)
                miny = c0.y + self.preferences_list['tilesize'] * ty

                if HAS_MAPNIK2:
                    bbox = mapnik.Box2d(minx,
                                        miny,
                                        minx + self.preferences_list['tilesize'],
                                        miny + self.preferences_list['tilesize'])
                else:
                    bbox = mapnik.Envelope(minx,
                                           miny,
                                           minx + self.preferences_list['tilesize'],
                                           miny + self.preferences_list['tilesize'])
                
                fname = "image_%02d_%02d.png" % (tx,ty)
                #if self.debug: print bbox

                ########################################################
                # Extract points of interest into amenities dictionary #
                ########################################################
                
                for featureStr in self.features:
                    title,wc = self.expandWhereClause(featureStr)
                    # print "::::", title, wc
                    feature = title
                    if self.debug: print "Extracting feature %s using %s." %\
                       (feature,featureStr)
                    
                    # FIXME: This needs to be optimized for clustering
                    pois = self.getBBContents(bbox,wc)
                    for poi in pois:
                        if feature in self.amenities:
                            self.amenities[feature].append((tx,ty,poi))
                        else:
                            self.amenities[feature]=[(tx,ty,poi)]
                    # print self.amenities
                    # print len(self.amenities)
                    # print "__________________________________"

                ##############################################
                # Extract all of the streetnames in the cell #
                ##############################################
                bbStreets = self.getBBStreets(bbox)
                streetList[self.cellLabel(tx,ty)] = bbStreets

                # Render a high resolution tile of this cell.
                #fname = "%s/%s.png" % (self.outdir,self.cellLabel(tx,ty))
                #self.drawTile(bbox,1000,1000,fname)


        #####################################################
        # Sort the amenities list to remove duplicates      #
        # The result is self.amenitiesSorted which is a     #
        # dictionary of features.  Each feature contains    #
        # a dictionary of amenity names.   Each amenity     #
        # name entry contains a list of cell IDs as strings #
        #                                                   #
        #####################################################
        for feature in self.amenities.keys():
            # poi = (tx,ty,(id, name, operator))
            # example:
            # (0, 0, (663528594, 'Tangerine', None, 'restaurant', None, None, 4546182.5512522003, -13628929.6157306))
            for poi in self.amenities[feature]:
                cellId = self.cellLabel(poi[0],poi[1])
                osm_id = poi[2][0]      # 663528594
                name = poi[2][1]        # 'Tangerine'
                operator = poi[2][2]    # Company that runs the place
                amenityVal = poi[2][3]  # Type of amenity
                shopVal = poi[2][4]      
                landuse = poi[2][5]
                if name == None:
                    if amenityVal == None:
                        if shopVal == None:
                            if landuse == None:
                                name = "Unidentified thing - osm_id=%d" % osm_id
                            else:
                                name = "Unnamed %s" % landuse
                        else:
                            name = "Unnamed %s" % shopVal
                    else:
                        name = "Unnamed %s" % amenityVal
                        
                print "%s,  %s, %s" % (feature, name, cellId)
                # Entertainment,  Castro Theater,  A1
                if not feature in self.amenitiesSorted:
                    print "creating dictionary for feature %s" % feature
                    self.amenitiesSorted[feature]={}
                if name in self.amenitiesSorted[feature]:
                    print "dictionary %s already exists in feature %s" % (name,feature)
                    if not cellId in self.amenitiesSorted[feature][name]:
                        print "appending cellid"
                        self.amenitiesSorted[feature][name].append(cellId)
                    else:
                        print "skipping duplicate feature %s %s in %s\n" % (feature,name,cellId)
                else:
                    print "adding cell id to list"
                    self.amenitiesSorted[feature][name] = [cellId]
                    print "self.amenitiesSorted=", self.amenitiesSorted[feature][name]


        #########################################################
        # Sort the street index into a simple dictionary of     #
        # street name mapped to a list of which cells contain   #
        # parts of the street of that name                      #
        #########################################################

        # In the following, streetList is a dictionary with keys which
        # are cell IDs - the contents of the dictionary is a list of all
        # of the streets in a given cell.

        # StreeetIndexSorted is a dictionary with streetnames as the keys
        # The contents is a list of all of the cells that contain a way
        # of the given name.

        # self.StreetIndex is a tidied up version of StreetIndexSorted - the
        # contents is a displayable string showing which cells contain the
        # given street name.
        

        # Now we need to sort the street index so that we have a schedule
        # of streets identifying which cells the street is in.
        for cell in streetList:
            for street in streetList[cell]:
                streetName = street[1]
                if streetName != 'None':
                    if streetName in streetIndexSorted:
                        # Avoid multiple entries of the same cell for a
                        #   given street name - if it exists in the list,
                        # reject it, otherwise add it to the list..
                        try:
                            i = streetIndexSorted[streetName].index(cell)
                            if self.debug:
                                print "Rejected Duplicate Entry ",\
                                      streetName,cell,i
                        except:
                            streetIndexSorted[streetName].append(cell)
                    else:
                        streetIndexSorted[streetName] = [cell]


        streets = streetIndexSorted.keys()
        streets.sort()
        for street in streets:
            cellstr=""
            first = True
            cells = streetIndexSorted[street]
            cells.sort()
            for cell in cells:
                if first==True:
                    cellstr = "%s" % cell
                    first=False
                else:
                    cellstr = "%s, %s" % (cellstr,cell)
            self.streetIndex[street]=cellstr
Exemple #16
0
            "ERROR: eosio-blocklog didn't return block output")
        block_num = block['block_num']
        assert block_num == expected_block_num
        expected_block_num += 1
    Print("Block_log contiguous from block number %d to %d" %
          (firstBlockNum, expected_block_num - 1))


appArgs = AppArgs()
args = TestHelper.parse_args({
    "--dump-error-details", "--keep-logs", "-v", "--leave-running",
    "--clean-run"
})
Utils.Debug = args.v
pnodes = 2
cluster = Cluster(walletd=True)
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontKill = args.leave_running
prodCount = 2
killAll = args.clean_run
walletPort = TestHelper.DEFAULT_WALLET_PORT
totalNodes = pnodes + 1

walletMgr = WalletMgr(True, port=walletPort)
testSuccessful = False
killEosInstances = not dontKill
killWallet = not dontKill

WalletdName = Utils.EosWalletName
ClientName = "cleos"
Exemple #17
0
def create_system(options, full_system, system, dma_devices, ruby_system):
    if buildEnv['PROTOCOL'] != 'GPU_VIPER':
        panic("This script requires the GPU_VIPER protocol to be built.")

    cpu_sequencers = []

    #
    # The ruby network creation expects the list of nodes in the system to be
    # consistent with the NetDest list.  Therefore the l1 controller nodes
    # must be listed before the directory nodes and directory nodes before
    # dma nodes, etc.
    #
    cp_cntrl_nodes = []
    tcp_cntrl_nodes = []
    sqc_cntrl_nodes = []
    tcc_cntrl_nodes = []
    dir_cntrl_nodes = []
    l3_cntrl_nodes = []

    #
    # Must create the individual controllers before the network to ensure the
    # controller constructors are called before the network constructor
    #

    # For an odd number of CPUs, still create the right number of controllers
    TCC_bits = int(math.log(options.num_tccs, 2))

    # This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
    # Clusters
    crossbar_bw = None
    mainCluster = None
    if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
        #Assuming a 2GHz clock
        crossbar_bw = 16 * options.num_compute_units * options.bw_scalor
        mainCluster = Cluster(intBW=crossbar_bw)
    else:
        mainCluster = Cluster(intBW=8) # 16 GB/s
    for i in xrange(options.num_dirs):

        dir_cntrl = DirCntrl(noTCCdir = True, TCC_select_num_bits = TCC_bits)
        dir_cntrl.create(options, ruby_system, system)
        dir_cntrl.number_of_TBEs = options.num_tbes
        dir_cntrl.useL3OnWT = options.use_L3_on_WT
        # the number_of_TBEs is inclusive of TBEs below

        # Connect the Directory controller to the ruby network
        dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
        dir_cntrl.requestFromCores.slave = ruby_system.network.master

        dir_cntrl.responseFromCores = MessageBuffer()
        dir_cntrl.responseFromCores.slave = ruby_system.network.master

        dir_cntrl.unblockFromCores = MessageBuffer()
        dir_cntrl.unblockFromCores.slave = ruby_system.network.master

        dir_cntrl.probeToCore = MessageBuffer()
        dir_cntrl.probeToCore.master = ruby_system.network.slave

        dir_cntrl.responseToCore = MessageBuffer()
        dir_cntrl.responseToCore.master = ruby_system.network.slave

        dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
        dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
        dir_cntrl.responseFromMemory = MessageBuffer()

        exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
        dir_cntrl_nodes.append(dir_cntrl)

        mainCluster.add(dir_cntrl)

    cpuCluster = None
    if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
        cpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
    else:
        cpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
    for i in xrange((options.num_cpus + 1) / 2):

        cp_cntrl = CPCntrl()
        cp_cntrl.create(options, ruby_system, system)

        exec("ruby_system.cp_cntrl%d = cp_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])

        # Connect the CP controllers and the network
        cp_cntrl.requestFromCore = MessageBuffer()
        cp_cntrl.requestFromCore.master = ruby_system.network.slave

        cp_cntrl.responseFromCore = MessageBuffer()
        cp_cntrl.responseFromCore.master = ruby_system.network.slave

        cp_cntrl.unblockFromCore = MessageBuffer()
        cp_cntrl.unblockFromCore.master = ruby_system.network.slave

        cp_cntrl.probeToCore = MessageBuffer()
        cp_cntrl.probeToCore.slave = ruby_system.network.master

        cp_cntrl.responseToCore = MessageBuffer()
        cp_cntrl.responseToCore.slave = ruby_system.network.master

        cp_cntrl.mandatoryQueue = MessageBuffer()
        cp_cntrl.triggerQueue = MessageBuffer(ordered = True)

        cpuCluster.add(cp_cntrl)

    gpuCluster = None
    if hasattr(options, 'bw_scalor') and options.bw_scalor > 0:
      gpuCluster = Cluster(extBW = crossbar_bw, intBW = crossbar_bw)
    else:
      gpuCluster = Cluster(extBW = 8, intBW = 8) # 16 GB/s
    for i in xrange(options.num_compute_units):

        tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                             issue_latency = 1,
                             number_of_TBEs = 2560)
        # TBEs set to max outstanding requests
        tcp_cntrl.create(options, ruby_system, system)
        tcp_cntrl.WB = options.WB_L1
        tcp_cntrl.disableL1 = options.noL1
        tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
        tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency

        exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(tcp_cntrl.coalescer)
        tcp_cntrl_nodes.append(tcp_cntrl)

        # Connect the TCP controller to the ruby network
        tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.requestFromTCP.master = ruby_system.network.slave

        tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseFromTCP.master = ruby_system.network.slave

        tcp_cntrl.unblockFromCore = MessageBuffer()
        tcp_cntrl.unblockFromCore.master = ruby_system.network.slave

        tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.probeToTCP.slave = ruby_system.network.master

        tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseToTCP.slave = ruby_system.network.master

        tcp_cntrl.mandatoryQueue = MessageBuffer()

        gpuCluster.add(tcp_cntrl)

    for i in xrange(options.num_sqc):

        sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
        sqc_cntrl.create(options, ruby_system, system)

        exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(sqc_cntrl.sequencer)

        # Connect the SQC controller to the ruby network
        sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
        sqc_cntrl.requestFromSQC.master = ruby_system.network.slave

        sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.probeToSQC.slave = ruby_system.network.master

        sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.responseToSQC.slave = ruby_system.network.master

        sqc_cntrl.mandatoryQueue = MessageBuffer()

        # SQC also in GPU cluster
        gpuCluster.add(sqc_cntrl)

    for i in xrange(options.num_cp):

        tcp_ID = options.num_compute_units + i
        sqc_ID = options.num_sqc + i

        tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                             issue_latency = 1,
                             number_of_TBEs = 2560)
        # TBEs set to max outstanding requests
        tcp_cntrl.createCP(options, ruby_system, system)
        tcp_cntrl.WB = options.WB_L1
        tcp_cntrl.disableL1 = options.noL1
        tcp_cntrl.L1cache.tagAccessLatency = options.TCP_latency
        tcp_cntrl.L1cache.dataAccessLatency = options.TCP_latency

        exec("ruby_system.tcp_cntrl%d = tcp_cntrl" % tcp_ID)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(tcp_cntrl.sequencer)
        tcp_cntrl_nodes.append(tcp_cntrl)

        # Connect the CP (TCP) controllers to the ruby network
        tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.requestFromTCP.master = ruby_system.network.slave

        tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseFromTCP.master = ruby_system.network.slave

        tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
        tcp_cntrl.unblockFromCore.master = ruby_system.network.slave

        tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.probeToTCP.slave = ruby_system.network.master

        tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseToTCP.slave = ruby_system.network.master

        tcp_cntrl.mandatoryQueue = MessageBuffer()

        gpuCluster.add(tcp_cntrl)

        sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
        sqc_cntrl.create(options, ruby_system, system)

        exec("ruby_system.sqc_cntrl%d = sqc_cntrl" % sqc_ID)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(sqc_cntrl.sequencer)

        # SQC also in GPU cluster
        gpuCluster.add(sqc_cntrl)

    for i in xrange(options.num_tccs):

        tcc_cntrl = TCCCntrl(l2_response_latency = options.TCC_latency)
        tcc_cntrl.create(options, ruby_system, system)
        tcc_cntrl.l2_request_latency = options.gpu_to_dir_latency
        tcc_cntrl.l2_response_latency = options.TCC_latency
        tcc_cntrl_nodes.append(tcc_cntrl)
        tcc_cntrl.WB = options.WB_L2
        tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units
        # the number_of_TBEs is inclusive of TBEs below

        # Connect the TCC controllers to the ruby network
        tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tcc_cntrl.requestFromTCP.slave = ruby_system.network.master

        tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
        tcc_cntrl.responseToCore.master = ruby_system.network.slave

        tcc_cntrl.probeFromNB = MessageBuffer()
        tcc_cntrl.probeFromNB.slave = ruby_system.network.master

        tcc_cntrl.responseFromNB = MessageBuffer()
        tcc_cntrl.responseFromNB.slave = ruby_system.network.master

        tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
        tcc_cntrl.requestToNB.master = ruby_system.network.slave

        tcc_cntrl.responseToNB = MessageBuffer()
        tcc_cntrl.responseToNB.master = ruby_system.network.slave

        tcc_cntrl.unblockToNB = MessageBuffer()
        tcc_cntrl.unblockToNB.master = ruby_system.network.slave

        tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)

        exec("ruby_system.tcc_cntrl%d = tcc_cntrl" % i)

        # connect all of the wire buffers between L3 and dirs up
        # TCC cntrls added to the GPU cluster
        gpuCluster.add(tcc_cntrl)

    # Assuming no DMA devices
    assert(len(dma_devices) == 0)

    # Add cpu/gpu clusters to main cluster
    mainCluster.add(cpuCluster)
    mainCluster.add(gpuCluster)

    ruby_system.network.number_of_virtual_networks = 10

    return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
args = TestHelper.parse_args({
    "--defproducera_prvt_key", "--dump-error-details", "--dont-launch",
    "--keep-logs", "-v", "--leave-running", "--clean-run", "--p2p-plugin"
})
debug = args.v
defproduceraPrvtKey = args.defproducera_prvt_key
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontLaunch = args.dont_launch
dontKill = args.leave_running
killAll = args.clean_run
p2pPlugin = args.p2p_plugin

Utils.Debug = debug
cluster = Cluster(walletd=True, defproduceraPrvtKey=defproduceraPrvtKey)
walletMgr = WalletMgr(True)
testSuccessful = False
killEosInstances = not dontKill
killWallet = not dontKill

WalletdName = Utils.DADAPPSWalletName
ClientName = "cleos"
timeout = .5 * 12 * 2 + 60  # time for finalization with 1 producer + 60 seconds padding
Utils.setIrreversibleTimeout(timeout)

try:
    TestHelper.printSystemInfo("BEGIN")

    cluster.setWalletMgr(walletMgr)
Exemple #19
0
chainSyncStrategyStr = args.c
debug = args.v
total_nodes = pnodes
killCount = args.kill_count if args.kill_count > 0 else 1
killSignal = args.kill_sig
killLtnInstances = not args.leave_running
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
killAll = args.clean_run

seed = 1
Utils.Debug = debug
testSuccessful = False

random.seed(seed)  # Use a fixed seed for repeatability.
cluster = Cluster(walletd=True)
walletMgr = WalletMgr(True)

try:
    TestHelper.printSystemInfo("BEGIN")
    cluster.setWalletMgr(walletMgr)

    cluster.setChainStrategy(chainSyncStrategyStr)
    cluster.setWalletMgr(walletMgr)

    cluster.killall(allInstances=killAll)
    cluster.cleanup()
    walletMgr.killall(allInstances=killAll)
    walletMgr.cleanup()

    Print(
    info1=prodNodes[1].getInfo(exitOnError=True)
    headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"]))
    libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"]))
    return (headBlockNum, libNum)



args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
                              "--wallet-port"})
Utils.Debug=args.v
totalProducerNodes=2
totalNonProducerNodes=1
totalNodes=totalProducerNodes+totalNonProducerNodes
maxActiveProducers=21
totalProducers=maxActiveProducers
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
walletPort=args.wallet_port

walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill

WalletdName=Utils.EosWalletName
ClientName="cleos"
defproduceraPrvtKey=args.defproducera_prvt_key
defproducerbPrvtKey=args.defproducerb_prvt_key
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontLaunch=args.dont_launch
dontKill=args.leave_running
prodCount=args.prod_count
onlyBios=args.only_bios
killAll=args.clean_run
sanityTest=args.sanity_test
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port

Utils.Debug=debug
localTest=True if server == TestHelper.LOCAL_HOST else False
cluster=Cluster(host=server, port=port, walletd=True, enableMongo=enableMongo, defproduceraPrvtKey=defproduceraPrvtKey, defproducerbPrvtKey=defproducerbPrvtKey)
walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill
dontBootstrap=sanityTest # intent is to limit the scope of the sanity test to just verifying that nodes can be started

WalletdName=Utils.EosWalletName
ClientName="cleon"
timeout = .5 * 12 * 2 + 60 # time for finalization with 1 producer + 60 seconds padding
Utils.setIrreversibleTimeout(timeout)

try:
    TestHelper.printSystemInfo("BEGIN")
    cluster.setWalletMgr(walletMgr)
    Print("SERVER: %s" % (server))
Exemple #22
0
# Parse command line arguments
args = TestHelper.parse_args({
    "-v", "--clean-run", "--dump-error-details", "--leave-running",
    "--keep-logs", "--alternate-version-labels-file"
})
Utils.Debug = args.v
killAll = args.clean_run
dumpErrorDetails = args.dump_error_details
dontKill = args.leave_running
killscsInstances = not dontKill
killWallet = not dontKill
keepLogs = args.keep_logs
alternateVersionLabelsFile = args.alternate_version_labels_file

walletMgr = WalletMgr(True)
cluster = Cluster(walletd=True)
cluster.setWalletMgr(walletMgr)


def restartNode(node: Node,
                nodeId,
                chainArg=None,
                addSwapFlags=None,
                nodscsPath=None):
    if not node.killed:
        node.kill(signal.SIGTERM)
    isRelaunchSuccess = node.relaunch(nodeId,
                                      chainArg,
                                      addSwapFlags=addSwapFlags,
                                      timeout=5,
                                      cachePopen=True,
Exemple #23
0
class DataAnalysis:
    def __init__(self, path_):
        self.path = path_
        self.data_object = None
        self.data = None
        self.forward_trips = {}
        self.backwards_trips = {}
        self.sort_ft = []
        self.sort_bt = []
        self.forward_split = {}
        self.backward_split = {}
        self.outliers = {}
        self.c_object = None

    def count_items(self, dict_):
        num = 0
        for items in dict_:
            num += len(dict_[items])
        return num

    def load_data(self, file_):
        """Loads sorted valid trips from DataProcess class"""
        self.data_object = RawDataManager(self.path)
        self.data_object.load_sorted_trips(file_)
        self.data = self.data_object.get_valid_trips()

    def get_trips(self, sequence):
        """finds trips in all valid trips that contain the desired 
		sequence of sensors where sequence = [sensor1, sensor2....]"""
        self.forward_trips = self.data_object.check_seq(sequence)
        print "Foward Trips:", len(self.forward_trips)
        reverse = sequence
        reverse.reverse()
        self.backwards_trips = self.data_object.check_seq(reverse)
        print "Backward Trips:", len(self.backwards_trips)

    def find_times(self, timelist):
        """finds the start time and end time in a list of sensor data, 
		where some sensors may have two readings. In this case the first sensor is taken"""
        if type(timelist[0][1]) == tuple:
            start_time = sorted(timelist[0], key=lambda x: x[-1])[0][0][0]
        else:
            start_time = timelist[0][0][0]
        if type(timelist[-1][1]) == tuple:
            end_time = sorted(timelist[-1], key=lambda x: x[-1])[0][0][0]
        else:
            end_time = timelist[-1][0][0]

        return (datetime.utcfromtimestamp(start_time),
                datetime.utcfromtimestamp(end_time))

    def list_times(self, group, type0_=None):
        """for a group of 50 trips, this finds the start time, end time and total time for each trip"""
        start_times = []
        end_times = []
        for trip in group:
            (start, end) = self.find_times(trip[1])
            if type0_ == 'time':
                start = start.time()
                end = end.time()

            start_times.append(start)
            end_times.append(end)

        return start_times, end_times

    def diff_time(self, time1, time2):
        """finds the difference in minutes between two timestamps"""
        start = datetime.utcfromtimestamp(time1)
        end = datetime.utcfromtimestamp(time2)
        total_time = end - start
        return total_time.total_seconds() / 60.

    def sort_trips(self, type_=None):
        """sorts trips containing desired sequnce by day and time or just time"""
        if type_ == 'day' or type_ == None:
            self.sort_ft = sorted(self.forward_trips.items(),
                                  key=lambda x: x[0][2])
            self.sort_bt = sorted(self.backwards_trips.items(),
                                  key=lambda x: x[0][2])

        if type_ == 'time':
            self.sort_ft = sorted(
                self.forward_trips.items(),
                key=lambda x: datetime.utcfromtimestamp(x[0][2]).time())
            self.sort_bt = sorted(
                self.backwards_trips.items(),
                key=lambda x: datetime.utcfromtimestamp(x[0][2]).time())

    def split_trips(self, group_number, overlap=None):
        """splits sorted trips into groups of fifty, if overlap it set to a number, groups will overlap by that number"""
        tripi = 0
        group = 1
        while tripi < len(self.sort_ft):
            i = 0
            trips = []
            while i < group_number:
                if tripi < len(self.sort_ft):
                    trips.append(self.sort_ft[tripi])
                    tripi = tripi + 1
                    i = i + 1
                else:
                    break

            if len(trips) < 10:
                old_trips = self.forward_split['Group ' + str(group - 1)]
                new_trips = old_trips + trips
                self.forward_split['Group ' + str(group - 1)] = new_trips
                break

            self.forward_split['Group ' + str(group)] = trips
            group = group + 1
            if overlap:
                if tripi < len(self.sort_ft):
                    if overlap < group_number:
                        tripi = tripi - overlap
                    else:
                        raise ValueError, 'overlap value too large'

        tripi = 0
        group = 1
        while tripi < len(self.sort_bt):
            i = 0
            trips = []
            while i < group_number:
                if tripi < len(self.sort_bt):
                    trips.append(self.sort_bt[tripi])
                    tripi = tripi + 1
                    i = i + 1
                else:
                    break

            if len(trips) < 10:
                old_trips = self.forward_split['Group ' + str(group - 1)]
                new_trips = old_trips + trips
                self.backward_split['Group ' + str(group - 1)] = new_trips
                break

            self.backward_split['Group ' + str(group)] = trips
            group = group + 1
            if overlap:
                if tripi < len(self.sort_bt):
                    if overlap < group_number:
                        tripi = tripi - overlap
                    else:
                        raise ValueError, 'overlap value too large'

    def get_avg_stdv(self, timelist):
        """returns average and stdv of a list of values (timelist)"""
        sum_ = 0
        var = 0.
        for times in timelist:
            sum_ = sum_ + times
        average = sum_ / len(timelist)

        for times in timelist:
            var = var + (times - average)**2
        var = var / len(timelist)
        std = np.sqrt(var)

        return average, std

    def get_time_value(self, info):
        """returns time of sensor detection from sensor list"""
        if type(info[1]) == tuple:
            time = sorted(info, key=lambda x: x[-1])[0][0][0]
        else:
            time = info[0][0]
        return time

    def find_R(self, X, Y):
        """finds the R value from two lists of link times"""

        if len(X) != len(Y):
            print X, Y
            raise ValueError, 'unequal length'

        sum_ = error = residual = 0.

        avgx, stdvx = self.get_avg_stdv(X)
        avgy, stdvy = self.get_avg_stdv(Y)

        n = float(len(X))

        for x, y in zip(X, Y):
            xin = (x - avgx) / stdvx
            yin = (y - avgy) / stdvy
            sum_ = sum_ + xin * yin

        R = sum_ / (n - 1)
        return R

    def find_sensor(self, tuple_):
        """used to find time traveled on a link, this finds the sensor_name in group sensor data"""
        if type(tuple_[1]) == tuple:
            sensors = []
            for tuples in tuple_:
                sensors.append(tuples[1])
            return sorted(sensors)[0]
        return tuple_[1]

    def split_sensors(self, list_):
        """splits the time data in a group of 50 into sensors instead of trips"""
        sensors = {}
        keys = []
        for signal in list_[0][1]:
            sensors[self.find_sensor(signal)] = []
            keys.append(self.find_sensor(signal))

        for trip in list_:
            i = 0
            for signal in trip[1]:
                long_time = self.get_time_value(signal)
                sensors[keys[i]].append(long_time)
                i = i + 1

        return sensors, keys

    def time_between(self, sensor_list):
        """finds the time between two sensors"""
        time_values = []
        sensors, keys = sensor_list
        for i in xrange(len(keys) - 1):
            times = []
            link = keys[i] + '_' + keys[i + 1]
            sensor1 = sensors[keys[i]]
            sensor2 = sensors[keys[i + 1]]

            for i in xrange(len(sensor1)):
                time = self.diff_time(sensor1[i], sensor2[i])
                times.append(time)
            time_values.append((link, times))

        return time_values

    def get_trip_times(self, time_list):
        trips = {}
        for i in range(len(time_list[0])):
            times = [link[i] for link in time_list]
            trips[i + 1] = times
        return trips

    def find_totals(self, sensor_list):
        all_totals = []
        #print sensor_list
        for j in range(len(sensor_list[0][1])):
            total = 0
            for i in range(len(sensor_list)):
                #		print sensor_list[i][j]
                total += sensor_list[i][1][j]
            all_totals.append(total)
        return all_totals

    def filter_data(self, sensors, group):
        sensor_data = [sensor_dt[0] for sensor_dt in sensors]
        times = [sensor_dt[1] for sensor_dt in sensors]
        info = self.get_trip_times(times)
        #	print "Info", info
        fi = Filter.Final(info)
        trips_new, self.outliers[group] = fi.get_new_trips()
        #print "New", trips_new
        #print len(trips_new)
        print "removed: ", len(self.outliers[group])
        temp = []
        for sensor in sensor_data:
            temp.append((sensor, []))
        #print temp
        for trip in sorted(trips_new.keys()):
            for i in range(len(trips_new[trip])):
                temp[i][1].append(trips_new[trip][i])
        return temp

    def get_excel_data(self):
        new_data = {}
        sensor_times = {}
        total_time = {}

        book = open_workbook("I-5_S_BlueStats 39-9-10-11.xls")
        sheet = book.sheet_by_name('Data-1')
        mac_ids = sheet.col_values(0, start_rowx=1)
        starttime = sheet.col_values(4, start_rowx=1)
        tt_39_09 = sheet.col_values(7, start_rowx=1)
        tt_09_10 = sheet.col_values(11, start_rowx=1)
        tt_10_11 = sheet.col_values(15, start_rowx=1)
        totals = sheet.col_values(17, start_rowx=1)
        go = True
        last = False
        start = 0
        end = 50
        g_index = 0

        while go:
            if last:
                go = False

            g_index += 1
            group = "Group " + str(g_index)
            stimes = starttime[start:end]
            t1 = tt_39_09[start:end]
            t2 = tt_09_10[start:end]
            t3 = tt_10_11[start:end]
            total = totals[start:end]
            sensors = [('39_09', t1), ('09_10', t2), ('10_11', t3)]
            sorted_sensors = self.filter_data(sensors, group)

            Rvalues = []
            j = 0
            while j < len(sorted_sensors) - 1:
                (link1, times1) = sorted_sensors[j]
                (link2, times2) = sorted_sensors[j + 1]
                link = 'tt_' + link1 + '/' + link2
                #print link
                R = self.find_R(times1, times2)
                Rvalues.append((link, R))
                j = j + 1

            new_data[group] = [r[1] for r in Rvalues]
            sensor_times[group] = sorted_sensors
            total_time[group] = total
            start = end
            end += 50

            if end >= len(mac_ids):
                end = len(mac_ids)
                last = True

            g_index += 1
        return new_data, None, sensor_times, total_time

    def compile_data(self, dict_, type_=None, filter_=True):
        """compiles all data ("R" value, Timespan of group (first starttime: laststarttime)),
		 the average time and stdv for each link, the average trip time, trip stdv) and 
		returns in dictionary. Also returns sensor times and total times"""
        data = {}
        sensor_times = {}
        total_time = {}
        for group in dict_:
            Rvalues = []
            j = 0
            sensorlist = self.split_sensors(dict_[group])
            sensors = self.time_between(sensorlist)
            #print "sensors" , sensors
            if filter_:
                sensors = self.filter_data(sensors, group)

            sensor_times[group] = sensors
            totals = self.find_totals(sensors)
            total_time[group] = totals

            while j < len(sensors) - 1:
                (link1, times1) = sensors[j]
                (link2, times2) = sensors[j + 1]
                link = 'tt_' + link1 + '/' + link2
                #print link
                R = self.find_R(times1, times2)
                Rvalues.append((link, R))
                j = j + 1

            start, end = self.list_times(dict_[group], type_)
            avgtimes = []
            for sensor in sensors:
                avg, stdv = self.get_avg_stdv(sensor[1])
                avgtimes.append((sensor[0], avg, stdv))

            timespan = self.timespan(start)
            average, stdv = self.get_avg_stdv(totals)

            #, 'Start,End,Total(min)':times,
            data[group] = {
                'R': Rvalues,
                'Timespan': timespan,
                'Segment:(Average time(sec), Average stdv)': avgtimes,
                'Average_trip(min)': average,
                'Stdv(min)': stdv
            }
        print "points removed:", self.count_items(self.outliers)
        return data, sensor_times, total_time

    def get_group_rvalues(self, sensors, type_='time', reverse=False):
        long_, longtime, l_times = self.get_long(sensors,
                                                 type_=type_,
                                                 reverse=reverse)
        GR = {}
        for group in long_:
            rlist = long_[group]['R']
            rs = [r[1] for r in rlist]
            #print rs
            GR[group] = rs

        return GR, long_, longtime, l_times

    def cluster_data(self, sensors, type_='time', reverse=False):
        GR, long_, longtime, l_times = self.get_group_rvalues(sensors,
                                                              type_=None,
                                                              reverse=reverse)
        #GR, long_, longtime, l_times = self.get_excel_data()
        self.c_object = Cluster(GR)
        gc = self.c_object.main()
        total_times = {}
        link_times = {}
        for cluster in gc:
            link_times[cluster] = {}
            total_times[cluster] = []
            for i in range(len(longtime[longtime.keys()[0]])):
                link_times[cluster][i + 1] = []
            for group in gc[cluster]:
                for i in range(len(longtime[group])):
                    ti = longtime[group][i][1]
                    link_times[cluster][i + 1].extend(ti)
                total_times[cluster].extend(l_times[group])
            print "trips in cluster " + str(cluster), len(total_times[cluster])
        #print "c_object_1", c_object
        return gc, link_times, total_times

    def load_segment(self, sensors, type0_='time'):
        """loads a segment of grid"""
        self.load_data('SortedTrips.csv')
        self.get_trips(sensors)
        self.sort_trips(type_=type0_)
        self.split_trips(20)
        foward, forward_time, tottimes_f = self.compile_data(
            self.forward_split, type_=type0_)
        backward, backward_time, tottimes_b = self.compile_data(
            self.backward_split, type_=type0_)
        return foward, backward, forward_time, backward_time, tottimes_f, tottimes_b

    def get_long(self, sensors, type_=None, reverse=False):
        """get the direction of segment with the most trips"""
        forward, backward, forward_time, backward_time, tottimes_f, tottimes_b = self.load_segment(
            sensors, type0_=type_)
        if len(forward) >= len(backward):
            long_ = forward
            longtime = forward_time
            l_times = tottimes_f
            short = backward
            shorttime = backward_time
            s_times = tottimes_b
        else:
            long_ = backward
            longtime = backward_time
            l_times = tottimes_b
            short = forward
            shorttime = forward_time
            s_times = tottimes_f

        if reverse:
            long_ = short
            longtime = shorttime
            l_times = s_times
        return long_, longtime, l_times

    """finds begining and end of list of times"""

    def timespan(self, times):
        s_times = sorted(times)
        return (s_times[0].isoformat(), s_times[-1].isoformat())

    def save_file(self, filename, dict_, del_):
        with open(filename + ".csv", 'wb') as f:
            writer = csv.writer(f)
            writer = csv.writer(f, delimiter=del_)
            for row in dict_.iteritems():
                writer.writerow(row)

    def save_R_values(self, dicts):
        with open('Rvalues.csv', 'wb') as f:
            for direction in dicts:
                for group in direction.keys():
                    writer = csv.writer(f)
                    writer.writerow('')
                    writer.writerow(direction[group]['Start,End,Total(min)'])
                    fieldnames = direction[group]['R'].keys()
                    writer = csv.DictWriter(f, fieldnames=fieldnames)
                    writer.writeheader()
                    writer.writerow(direction[group]['R'])

    def main(self, streets, type_='time'):
        """returns data from a dictionary containing streets: {segmentname:[sensors...], ....}"""
        for street in streets:
            da = DataAnalysis(pathname)
            forward, backward, foward_time, backward_time, tottimes_f, tottimes_b = da.load_segment(
                streets[street], type0_=type_)
            if len(forward) >= len(backward):
                longest = forward
            else:
                longest = backward
            print street, len(longest)
            da.save_file(street, longest, ';')
Exemple #24
0
from datetime import datetime

# Parse command line arguments
args = TestHelper.parse_args({"-v","--clean-run","--dump-error-details","--leave-running",
                              "--keep-logs", "--alternate-version-labels-file"})
Utils.Debug=args.v
killAll=args.clean_run
dumpErrorDetails=args.dump_error_details
dontKill=args.leave_running
killLtnInstances=not dontKill
killWallet=not dontKill
keepLogs=args.keep_logs
alternateVersionLabelsFile=args.alternate_version_labels_file

walletMgr=WalletMgr(True)
cluster=Cluster(walletd=True)
cluster.setWalletMgr(walletMgr)

def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None, nodltnPath=None):
    if not node.killed:
        node.kill(signal.SIGTERM)
    isRelaunchSuccess = node.relaunch(nodeId, chainArg, addOrSwapFlags=addOrSwapFlags,
                                      timeout=5, cachePopen=True, nodltnPath=nodltnPath)
    assert isRelaunchSuccess, "Fail to relaunch"

def shouldNodeContainPreactivateFeature(node):
    preactivateFeatureDigest = node.getSupportedProtocolFeatureDict()["PREACTIVATE_FEATURE"]["feature_digest"]
    assert preactivateFeatureDigest
    blockHeaderState = node.getLatestBlockHeaderState()
    activatedProtocolFeatures = blockHeaderState["activated_protocol_features"]["protocol_features"]
    return preactivateFeatureDigest in activatedProtocolFeatures
def create_system(options, full_system, system, dma_devices, ruby_system):

    if not buildEnv['GPGPU_SIM']:
        m5.util.panic("This script requires GPGPU-Sim integration to be built.")

    # Run the protocol script to setup CPU cluster, directory and DMA
    (all_sequencers, dir_cntrls, dma_cntrls, cpu_cluster) = \
                                        VI_hammer.create_system(options,
                                                                full_system,
                                                                system,
                                                                dma_devices,
                                                                ruby_system)

    cpu_cntrl_count = len(cpu_cluster) + len(dir_cntrls)

    #
    # Build GPU cluster
    #
    # Empirically, Fermi per-core bandwidth peaks at roughly 23GB/s
    # (32B/cycle @ 772MHz). Use ~16B per Ruby cycle to match this. Maxwell
    # per-core bandwidth peaks at 40GB/s (42B/cycle @ 1029MHz). Use ~24B per
    # Ruby cycle to match this.
    if options.gpu_core_config == 'Fermi':
        l1_cluster_bw = 16
    elif options.gpu_core_config == 'Maxwell':
        l1_cluster_bw = 24
    elif options.gpu_core_config == 'Tegra':
        #FIXME using Fermi for now
        l1_cluster_bw = 16
    else:
        m5.util.fatal("Unknown GPU core config: %s" % options.gpu_core_config)

    gpu_cluster = Cluster(intBW = l1_cluster_bw, extBW = l1_cluster_bw)
    gpu_cluster.disableConnectToParent()

    l2_bits = int(math.log(options.gpu_num_l2caches, 2))
    block_size_bits = int(math.log(options.cacheline_size, 2))
    # This represents the L1 to L2 interconnect latency
    # NOTES! 1) This latency is in Ruby (cache) cycles, not SM cycles
    #        2) Since the cluster interconnect doesn't model multihop latencies,
    #           model these latencies with the controller latency variables. If
    #           the interconnect model is changed, latencies will need to be
    #           adjusted for reasonable total memory access delay.
    per_hop_interconnect_latency = 45 # ~15 GPU cycles
    num_dance_hall_hops = int(math.log(options.num_sc, 2))
    if num_dance_hall_hops == 0:
        num_dance_hall_hops = 1
    l1_to_l2_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops

    #
    # Caches for GPU cores
    #
    for i in xrange(options.num_sc):
        #
        # First create the Ruby objects associated with the GPU cores
        #
        data_cache = L1Cache(size = options.sc_l1_size,
                            assoc = options.sc_l1_assoc,
                            replacement_policy = LRUReplacementPolicy(),
                            start_index_bit = block_size_bits,
                            dataArrayBanks = 4,
                            tagArrayBanks = 4,
                            dataAccessLatency = 4,
                            tagAccessLatency = 4,
                            resourceStalls = False)

        data_l1_cntrl = GPUL1Cache_Controller(version = i*2,
                                  cache = data_cache,
                                  l2_select_num_bits = l2_bits,
                                  num_l2 = options.gpu_num_l2caches,
                                  transitions_per_cycle = options.ports,
                                  issue_latency = l1_to_l2_noc_latency,
                                  number_of_TBEs = options.gpu_l1_buf_depth,
                                  ruby_system = ruby_system)

        data_gpu_seq = RubySequencer(version = options.num_cpus + i*2,
                            icache = data_cache,
                            dcache = data_cache,
                            max_outstanding_requests = options.gpu_l1_buf_depth,
                            ruby_system = ruby_system,
                            deadlock_threshold = 2000000,
                            connect_to_io = False)

        tex_cache = L1Cache(size = options.sc_tl1_size,
                            assoc = options.sc_tl1_assoc,
                            replacement_policy = LRUReplacementPolicy(),
                            start_index_bit = block_size_bits,
                            dataArrayBanks = 4,
                            tagArrayBanks = 4,
                            dataAccessLatency = 4,
                            tagAccessLatency = 4,
                            resourceStalls = False)

        tex_l1_cntrl = GPUL1Cache_Controller(version = i*2+1,
                                  cache = tex_cache,
                                  l2_select_num_bits = l2_bits,
                                  num_l2 = options.gpu_num_l2caches,
								          transitions_per_cycle = options.ports,
                                  issue_latency = l1_to_l2_noc_latency,
                                  number_of_TBEs = options.gpu_tl1_buf_depth,
                                  ruby_system = ruby_system)

        tex_gpu_seq = RubySequencer(version = options.num_cpus + i*2+1,
                            icache = tex_cache,
                            dcache = tex_cache,
                            max_outstanding_requests = options.gpu_tl1_buf_depth,
                            ruby_system = ruby_system,
                            deadlock_threshold = 2000000,
                            connect_to_io = False)

        data_l1_cntrl.sequencer = data_gpu_seq
        tex_l1_cntrl.sequencer = tex_gpu_seq

        data_i = i*2;
        tex_i = i*2 +1;
        exec("ruby_system.l1_cntrl_sp%02d = data_l1_cntrl" % data_i)
        exec("ruby_system.l1_cntrl_sp%02d = tex_l1_cntrl" % tex_i)

        #
        # Add controllers and sequencers to the appropriate lists
        #
        all_sequencers.append(data_gpu_seq)
        all_sequencers.append(tex_gpu_seq)
        gpu_cluster.add(data_l1_cntrl)
        gpu_cluster.add(tex_l1_cntrl)

        # Connect the controllers to the network
        data_l1_cntrl.requestFromL1Cache = MessageBuffer(ordered = True)
        data_l1_cntrl.requestFromL1Cache.master = ruby_system.network.slave
        data_l1_cntrl.responseToL1Cache = MessageBuffer(ordered = True)
        data_l1_cntrl.responseToL1Cache.slave = ruby_system.network.master
        data_l1_cntrl.mandatoryQueue = MessageBuffer()

        tex_l1_cntrl.requestFromL1Cache = MessageBuffer(ordered = True)
        tex_l1_cntrl.requestFromL1Cache.master = ruby_system.network.slave
        tex_l1_cntrl.responseToL1Cache = MessageBuffer(ordered = True)
        tex_l1_cntrl.responseToL1Cache.slave = ruby_system.network.master
        tex_l1_cntrl.mandatoryQueue = MessageBuffer()


    l2_index_start = block_size_bits + l2_bits
    # Use L2 cache and interconnect latencies to calculate protocol latencies
    # NOTES! 1) These latencies are in Ruby (cache) cycles, not SM cycles
    #        2) Since the cluster interconnect doesn't model multihop latencies,
    #           model these latencies with the controller latency variables. If
    #           the interconnect model is changed, latencies will need to be
    #           adjusted for reasonable total memory access delay.
    l2_cache_access_latency = 30 # ~10 GPU cycles
    l2_to_l1_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops
    l2_to_mem_noc_latency = 125 # ~40 GPU cycles
    # Empirically, Fermi per-L2 bank bandwidth peaks at roughly 66GB/s
    # (92B/cycle @ 772MHz). Use ~34B per Ruby cycle to match this. Maxwell
    # per-L2 bank bandwidth peaks at 123GB/s (128B/cycle @ 1029MHz). Use ~64B
    # per Ruby cycle to match this.
    if options.gpu_core_config == 'Fermi':
        l2_cluster_bw = 34
    elif options.gpu_core_config == 'Maxwell':
        l2_cluster_bw = 68
    elif options.gpu_core_config == 'Tegra':
        #FIXME using Fermi configs for now
        l2_cluster_bw = 34
    else:
        m5.util.fatal("Unknown GPU core config: %s" % options.gpu_core_config)

    l2_clusters = []
    for i in xrange(options.gpu_num_l2caches):
        #
        # First create the Ruby objects associated with this cpu
        #
        l2_cache = L2Cache(size = options.sc_l2_size,
                           assoc = options.sc_l2_assoc,
                           start_index_bit = l2_index_start,
                           replacement_policy = LRUReplacementPolicy(),
                           dataArrayBanks = 4,
                           tagArrayBanks = 4,
                           dataAccessLatency = 4,
                           tagAccessLatency = 4,
                           resourceStalls = options.gpu_l2_resource_stalls)

        l2_cntrl = GPUL2Cache_Controller(version = i,
                                L2cache = l2_cache,
                                transitions_per_cycle = options.ports,
                                l2_response_latency = l2_cache_access_latency +
                                                      l2_to_l1_noc_latency,
                                l2_request_latency = l2_to_mem_noc_latency,
                                cache_response_latency = l2_cache_access_latency,
                                ruby_system = ruby_system)

        exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
        l2_cluster = Cluster(intBW = l2_cluster_bw, extBW = l2_cluster_bw)
        l2_cluster.add(l2_cntrl)
        gpu_cluster.add(l2_cluster)
        l2_clusters.append(l2_cluster)

        # Connect the controller to the network
        l2_cntrl.responseToL1Cache = MessageBuffer(ordered = True)
        l2_cntrl.responseToL1Cache.master = ruby_system.network.slave
        l2_cntrl.requestFromCache = MessageBuffer()
        l2_cntrl.requestFromCache.master = ruby_system.network.slave
        l2_cntrl.responseFromCache = MessageBuffer()
        l2_cntrl.responseFromCache.master = ruby_system.network.slave
        l2_cntrl.unblockFromCache = MessageBuffer()
        l2_cntrl.unblockFromCache.master = ruby_system.network.slave

        l2_cntrl.requestFromL1Cache = MessageBuffer(ordered = True)
        l2_cntrl.requestFromL1Cache.slave = ruby_system.network.master
        l2_cntrl.forwardToCache = MessageBuffer()
        l2_cntrl.forwardToCache.slave = ruby_system.network.master
        l2_cntrl.responseToCache = MessageBuffer()
        l2_cntrl.responseToCache.slave = ruby_system.network.master

        l2_cntrl.triggerQueue = MessageBuffer()

    ############################################################################
    # Pagewalk cache
    # NOTE: We use a CPU L1 cache controller here. This is to facilatate MMU
    #       cache coherence (as the GPU L1 caches are incoherent without flushes
    #       The L2 cache is small, and should have minimal affect on the 
    #       performance (see Section 6.2 of Power et al. HPCA 2014).
    pwd_cache = L1Cache(size = options.pwc_size,
                            assoc = options.pwc_assoc, 
                            replacement_policy = options.pwc_policy,
                            start_index_bit = block_size_bits,
                            resourceStalls = False)
    # Small cache since CPU L1 requires I and D
    pwi_cache = L1Cache(size = "512B",
                            assoc = 2,
                            replacement_policy = LRUReplacementPolicy(),
                            start_index_bit = block_size_bits,
                            resourceStalls = False)

    # Small cache since CPU L1 controller requires L2
    l2_cache = L2Cache(size = "512B",
                           assoc = 2,
                           start_index_bit = block_size_bits,
                           resourceStalls = False)

    l1_cntrl = L1Cache_Controller(version = options.num_cpus,
                                  L1Icache = pwi_cache,
                                  L1Dcache = pwd_cache,
                                  L2cache = l2_cache,
                                  send_evictions = False,
                                  transitions_per_cycle = options.ports,
                                  issue_latency = l1_to_l2_noc_latency,
                                  cache_response_latency = 1,
                                  l2_cache_hit_latency = 1,
                                  number_of_TBEs = options.gpu_l1_buf_depth,
                                  ruby_system = ruby_system)

    cpu_seq = RubySequencer(version = options.num_cpus + options.num_sc*2,
                            icache = pwd_cache, # Never get data from pwi_cache
                            dcache = pwd_cache,
                            dcache_hit_latency = 8,
                            icache_hit_latency = 8,
                            max_outstanding_requests = options.gpu_l1_buf_depth,
                            ruby_system = ruby_system,
                            deadlock_threshold = 2000000,
                            connect_to_io = False)

    l1_cntrl.sequencer = cpu_seq


    ruby_system.l1_pw_cntrl = l1_cntrl
    all_sequencers.append(cpu_seq)

    gpu_cluster.add(l1_cntrl)

    # Connect the L1 controller and the network
    # Connect the buffers from the controller to network
    l1_cntrl.requestFromCache = MessageBuffer()
    l1_cntrl.requestFromCache.master = ruby_system.network.slave
    l1_cntrl.responseFromCache = MessageBuffer()
    l1_cntrl.responseFromCache.master = ruby_system.network.slave
    l1_cntrl.unblockFromCache = MessageBuffer()
    l1_cntrl.unblockFromCache.master = ruby_system.network.slave

    # Connect the buffers from the network to the controller
    l1_cntrl.forwardToCache = MessageBuffer()
    l1_cntrl.forwardToCache.slave = ruby_system.network.master
    l1_cntrl.responseToCache = MessageBuffer()
    l1_cntrl.responseToCache.slave = ruby_system.network.master

    l1_cntrl.mandatoryQueue = MessageBuffer()
    l1_cntrl.triggerQueue = MessageBuffer()


    #
    # Create controller for the copy engine to connect to in GPU cluster
    # Cache is unused by controller
    #
    cache = L1Cache(size = "4096B", assoc = 2)

    # Setting options.ce_buffering = 0 indicates that the CE can use infinite
    # buffering, but we need to specify a finite number of outstandng accesses
    # that the CE is allowed to issue. Just set it to some large number greater
    # than normal memory access latencies to ensure that the sequencer could
    # service one access per cycle.
    max_out_reqs = options.ce_buffering
    if max_out_reqs == 0:
        max_out_reqs = 1024

    gpu_ce_seq = RubySequencer(version = options.num_cpus + options.num_sc*2 +1,
                               icache = cache,
                               dcache = cache,
                               max_outstanding_requests = max_out_reqs,
                               support_inst_reqs = False,
                               ruby_system = ruby_system,
                               connect_to_io = False)

    gpu_ce_cntrl = GPUCopyDMA_Controller(version = 0,
                                  sequencer = gpu_ce_seq,
                                  transitions_per_cycle = options.ports,
                                  number_of_TBEs = max_out_reqs,
                                  ruby_system = ruby_system)

    gpu_ce_cntrl.responseFromDir = MessageBuffer(ordered = True)
    gpu_ce_cntrl.responseFromDir.slave = ruby_system.network.master
    gpu_ce_cntrl.reqToDirectory = MessageBuffer(ordered = True)
    gpu_ce_cntrl.reqToDirectory.master = ruby_system.network.slave

    gpu_ce_cntrl.mandatoryQueue = MessageBuffer()

    ruby_system.ce_cntrl = gpu_ce_cntrl

    all_sequencers.append(gpu_ce_seq)

    # To limit the copy engine's bandwidth, we add it to a limited bandwidth
    # cluster. Approximate settings are as follows (assuming 2GHz Ruby clock):
    #   PCIe v1.x x16 effective bandwidth ~= 4GB/s: intBW = 3, extBW = 3
    #   PCIe v2.x x16 effective bandwidth ~= 8GB/s: intBW = 5, extBW = 5
    #   PCIe v3.x x16 effective bandwidth ~= 16GB/s: intBW = 10, extBW = 10
    #   PCIe v4.x x16 effective bandwidth ~= 32GB/s: intBW = 21, extBW = 21
    # NOTE: Bandwidth may bottleneck at other parts of the memory hierarchy,
    # so bandwidth considerations should be made in other parts of the memory
    # hierarchy also.
    gpu_ce_cluster = Cluster(intBW = 10, extBW = 10)
    gpu_ce_cluster.add(gpu_ce_cntrl)


    #z cache
    z_cache = L1Cache(size = options.sc_zl1_size,
          assoc = options.sc_zl1_assoc,
          replacement_policy = LRUReplacementPolicy(),
          start_index_bit = block_size_bits,
          dataArrayBanks = 8,
          tagArrayBanks = 8,
          dataAccessLatency = 1,
          tagAccessLatency = 1,
          resourceStalls = False)

    z_cntrl = GPUL1Cache_Controller(version = options.num_sc*2,
          cache = z_cache,
          l2_select_num_bits = l2_bits,
          num_l2 = options.gpu_num_l2caches,
          issue_latency = l1_to_l2_noc_latency,
          number_of_TBEs = options.gpu_zl1_buf_depth,
          ruby_system = ruby_system)

    z_seq = RubySequencer(version = options.num_cpus + options.num_sc*2+2,
          icache = z_cache,
          dcache = z_cache,
          max_outstanding_requests = options.gpu_zl1_buf_depth,
          ruby_system = ruby_system,
          deadlock_threshold = 2000000,
          connect_to_io = False)

    z_cntrl.sequencer = z_seq
    ruby_system.l1z_cntrl = z_cntrl

    all_sequencers.append(z_seq)
    gpu_cluster.add(z_cntrl)
   
    z_cntrl.requestFromL1Cache = MessageBuffer(ordered = True)
    z_cntrl.requestFromL1Cache.master = ruby_system.network.slave
    z_cntrl.responseToL1Cache = MessageBuffer(ordered = True)
    z_cntrl.responseToL1Cache.slave = ruby_system.network.master

    z_cntrl.mandatoryQueue = MessageBuffer()
    #z cache

    acl_cntrls = []
    if options.accel_cfg_file:
        for idx, datapath in enumerate(system.datapaths):
          acl_cache = L1Cache(size = str(datapath.cacheSize),
                assoc = datapath.cacheAssoc,
                replacement_policy = LRUReplacementPolicy(),
                start_index_bit = block_size_bits,
                dataAccessLatency = datapath.cacheHitLatency)
          acli_cache = L1Cache(size = "512B",
                assoc = 2,
                replacement_policy = LRUReplacementPolicy(),
                start_index_bit = block_size_bits,
                dataAccessLatency = datapath.cacheHitLatency)

          #l2 cache to satisfy ruby
          l2_cache = L2Cache(size = "512B",
                           #size = str(datapath.cacheSize),
                           assoc = 2,
                           #assoc = datapath.cacheAssoc,
                           start_index_bit = block_size_bits)
         
          assert (not options.is_perfect_cache) #TODO: handle this option

          acl_cntrl = L1Cache_Controller(version = options.num_cpus+idx+1,
                L1Dcache = acl_cache,
                L1Icache = acli_cache, #never used
                L2cache = l2_cache,
                no_mig_atomic = not options.allow_atomic_migration,
                send_evictions = send_evicts(options),
                transitions_per_cycle = options.ports,
                ruby_system = ruby_system)

          acl_seq = RubySequencer(version = options.num_cpus + options.num_sc*2+3+idx,
                icache = acl_cache,
                dcache = acl_cache,
                ruby_system = ruby_system,
                deadlock_threshold = 2000000)
        
          # Connect the L1 controller and the network
          # Connect the buffers from the controller to network
          acl_cntrl.requestFromCache = MessageBuffer()
          acl_cntrl.requestFromCache.master = ruby_system.network.slave
          acl_cntrl.responseFromCache = MessageBuffer()
          acl_cntrl.responseFromCache.master = ruby_system.network.slave
          acl_cntrl.unblockFromCache = MessageBuffer()
          acl_cntrl.unblockFromCache.master = ruby_system.network.slave

          # Connect the buffers from the network to the controller
          acl_cntrl.forwardToCache = MessageBuffer()
          acl_cntrl.forwardToCache.slave = ruby_system.network.master
          acl_cntrl.responseToCache = MessageBuffer()
          acl_cntrl.responseToCache.slave = ruby_system.network.master

          acl_cntrl.mandatoryQueue = MessageBuffer()
          acl_cntrl.triggerQueue = MessageBuffer()

          acl_cntrl.sequencer = acl_seq
          exec("ruby_system.acl_cntrl%02d = acl_cntrl" % idx)
          all_sequencers.append(acl_seq)
          acl_cntrls.append(acl_cntrl)


    complete_cluster = Cluster(intBW = 32, extBW = 32)
    complete_cluster.add(gpu_ce_cluster)
    complete_cluster.add(cpu_cluster)
    complete_cluster.add(gpu_cluster)

    for cntrl in dir_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in dma_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in acl_cntrls:
        complete_cluster.add(cntrl)

    for cluster in l2_clusters:
        complete_cluster.add(cluster)

    return (all_sequencers, dir_cntrls, complete_cluster)
dumpErrorDetails = args.dump_error_details
onlyBios = args.only_bios
killAll = args.clean_run

Utils.Debug = debug

killSeatInstances = not dontKill
topo = "mesh"
delay = 1
prodCount = 1  # producers per producer node
pnodes = 1
total_nodes = pnodes
actualTest = "tests/nodeseat_run_test.py"
testSuccessful = False

cluster = Cluster()
try:
    Print("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    Print(
        "producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d"
        % (pnodes, total_nodes - pnodes, topo, delay))
    Print("Stand up cluster")
    if cluster.launch(pnodes,
                      total_nodes,
                      prodCount,
                      topo,
                      delay,
                      onlyBios=onlyBios,
Exemple #27
0
Print = Utils.Print
errorExit = Utils.errorExit
cmdError = Utils.cmdError

args = TestHelper.parse_args(
    {"-v", "--clean-run", "--dump-error-details", "--keep-logs"})
debug = args.v
killAll = args.clean_run
killEosInstances = True
keepLogs = args.keep_logs
dumpErrorDetails = dumpErrorDetails = args.dump_error_details

Utils.Debug = debug
https_port = 5555
cluster = Cluster(walletd=True)

testSuccessful = False

ClientName = "cleos"
timeout = .5 * 12 * 2 + 60  # time for finalization with 1 producer + 60 seconds padding
Utils.setIrreversibleTimeout(timeout)

try:
    TestHelper.printSystemInfo("BEGIN")

    Print("Stand up cluster")
    # standup cluster with HTTPS enabled, but not configured
    # HTTP should still work
    extraArgs = {0: "--https-server-address 127.0.0.1:5555"}
    # specificExtraNodeosArgs=extraArgs
Exemple #28
0
import pandas as pd
from Cluster import Cluster


def normalize(data, min_tab, max_tab):
    for index, row in min_tab.iteritems():
        data.at[index] = (data.at[index] - min_tab.at[index]) / (
            max_tab.at[index] - min_tab.at[index])
    return data


input_file = "./zoo.data"

df = pd.read_csv(input_file, header=None)
dcat = df.describe().transpose()

mins = dcat['min']
maxs = dcat['max']

clusters = []

for index, row in df.iterrows():
    cluster = Cluster(data1=normalize(row, mins, maxs))
    clusters.append(cluster)
nodesFile=args.nodes_file
seed=args.seed
dontKill=args.leave_running
dumpErrorDetails=args.dump_error_details
killAll=args.clean_run

killWallet=not dontKill
killEosInstances=not dontKill
if nodesFile is not None:
    killEosInstances=False

Utils.Debug=debug
testSuccessful=False

random.seed(seed) # Use a fixed seed for repeatability.
cluster=Cluster(walletd=True)
walletMgr=WalletMgr(True)

try:
    cluster.setWalletMgr(walletMgr)

    if nodesFile is not None:
        jsonStr=None
        with open(nodesFile, "r") as f:
            jsonStr=f.read()
        if not cluster.initializeNodesFromJson(jsonStr):
            errorExit("Failed to initilize nodes from Json string.")
        total_nodes=len(cluster.getNodes())
    else:
        cluster.killall(allInstances=killAll)
        cluster.cleanup()
Exemple #30
0
        
        return 0 if profit2 == 0 else float(profit1 / profit2)

    def toString(self):
        i = 0;
        s = ''
        for cluster in self.clusters:
            s += '\t' + str(i) + ' => ' + cluster.toString() + ',\n'
            i += 1;         
        
        return '{\n%s}' % s
    
if __name__ == "__main__":
    t1 = Transaction('a,b').getTransaction()
    t2 = Transaction('a,b,c').getTransaction()    
    cluster1 = Cluster(t1)
    cluster1.addTransaction(t2)
    print 'cluster1:'
    print cluster1.getCluster()    
    
    t3 = Transaction('e,f').getTransaction() 
    t4 = Transaction('e,g,f').getTransaction() 
    cluster2 = Cluster(t3)
    cluster2.addTransaction(t4)
    print 'cluster2:'
    print cluster2.getCluster()
    
    print "clusters:"
    clope = Clope()
    
    clusters = []
Exemple #31
0
debug = args.v
total_nodes = pnodes
killCount = args.kill_count if args.kill_count > 0 else 1
killSignal = args.kill_sig
killRsnInstances = not args.leave_running
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
killAll = args.clean_run
p2pPlugin = args.p2p_plugin

seed = 1
Utils.Debug = debug
testSuccessful = False

random.seed(seed)  # Use a fixed seed for repeatability.
cluster = Cluster(walletd=True)
walletMgr = WalletMgr(True)

try:
    TestHelper.printSystemInfo("BEGIN")

    cluster.setChainStrategy(chainSyncStrategyStr)
    cluster.setWalletMgr(walletMgr)

    cluster.killall(allInstances=killAll)
    cluster.cleanup()
    walletMgr.killall(allInstances=killAll)
    walletMgr.cleanup()

    Print(
        "producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s"
Exemple #32
0
def status(role):
  cluster = Cluster()
  if len(role) > 0 :
    cluster = cluster.getServersByRole(role)
  
  click.echo(cluster.getReport())
Exemple #33
0
 def testGetNearestCluster(self):
     self.assertEquals(
         KMeans.get_nearest_cluster(
             [cluster, Cluster([Point(np.array([8, 8]))])], point), 0)
Exemple #34
0
# Parse command line arguments
args = TestHelper.parse_args({
    "-v", "--clean-run", "--dump-error-details", "--leave-running",
    "--keep-logs"
})
Utils.Debug = args.v
killAll = args.clean_run
dumpErrorDetails = args.dump_error_details
dontKill = args.leave_running
killrsnInstances = not dontKill
killWallet = not dontKill
keepLogs = args.keep_logs

walletMgr = WalletMgr(True)
cluster = Cluster(walletd=True)
cluster.setWalletMgr(walletMgr)

testSuccessful = False
try:
    TestHelper.printSystemInfo("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    # The following is the list of chainbase objects that need to be verified:
    # - account_object (bootstrap)
    # - code_object (bootstrap)
    # - generated_transaction_object
    # - global_property_object
    # - key_value_object (bootstrap)
    # - protocol_state_object (bootstrap)
Exemple #35
0
# The following test case will test the Protocol Feature JSON reader of the blockchain


def restartNode(node: Node, nodeId, chainArg=None, addOrSwapFlags=None):
    if not node.killed:
        node.kill(signal.SIGTERM)
    isRelaunchSuccess = node.relaunch(nodeId,
                                      chainArg,
                                      addOrSwapFlags=addOrSwapFlags,
                                      timeout=5,
                                      cachePopen=True)
    assert isRelaunchSuccess, "Fail to relaunch"


walletMgr = WalletMgr(True)
cluster = Cluster(walletd=True)
cluster.setWalletMgr(walletMgr)

# List to contain the test result message
testSuccessful = False
try:
    TestHelper.printSystemInfo("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()
    cluster.launch(extraNodroxeArgs=" --plugin roxe::producer_api_plugin ",
                   dontBootstrap=True,
                   pfSetupPolicy=PFSetupPolicy.NONE)
    biosNode = cluster.biosNode

    # Modify the JSON file and then restart the node so it updates the internal state
    newSubjectiveRestrictions = {
Exemple #36
0
killEosInstances=not args.leave_running
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
killAll=args.clean_run
relaunchTimeout=10
# Don't want to set too big, trying to reduce test time, but needs to be large enough for test to finish before
# restart re-creates this many blocks.
numBlocksToProduceBeforeRelaunch=80
numBlocksToWaitBeforeChecking=20

Utils.Debug=debug
testSuccessful=False

seed=1
random.seed(seed) # Use a fixed seed for repeatability.
cluster=Cluster(walletd=True)
walletMgr=WalletMgr(True)
cluster.setWalletMgr(walletMgr)

def relaunchNode(node: Node, nodeId, chainArg="", skipGenesis=True, relaunchAssertMessage="Fail to relaunch"):
   isRelaunchSuccess=node.relaunch(nodeId, chainArg=chainArg, timeout=relaunchTimeout, skipGenesis=skipGenesis, cachePopen=True)
   time.sleep(1) # Give a second to replay or resync if needed
   assert isRelaunchSuccess, relaunchAssertMessage
   return isRelaunchSuccess

try:
    TestHelper.printSystemInfo("BEGIN")

    cluster.killall(allInstances=killAll)
    cluster.cleanup()
    walletMgr.killall(allInstances=killAll)
Exemple #37
0
def create_system(options, full_system, system, dma_devices, ruby_system):
    if buildEnv['PROTOCOL'] != 'GPU_RfO':
        panic("This script requires the GPU_RfO protocol to be built.")

    cpu_sequencers = []

    #
    # The ruby network creation expects the list of nodes in the system to be
    # consistent with the NetDest list.  Therefore the l1 controller nodes
    # must be listed before the directory nodes and directory nodes before
    # dma nodes, etc.
    #
    cp_cntrl_nodes = []
    tcp_cntrl_nodes = []
    sqc_cntrl_nodes = []
    tcc_cntrl_nodes = []
    tccdir_cntrl_nodes = []
    dir_cntrl_nodes = []
    l3_cntrl_nodes = []

    #
    # Must create the individual controllers before the network to ensure the
    # controller constructors are called before the network constructor
    #

    TCC_bits = int(math.log(options.num_tccs, 2))

    # This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
    # Clusters
    mainCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
    for i in xrange(options.num_dirs):

        dir_cntrl = DirCntrl(TCC_select_num_bits = TCC_bits)
        dir_cntrl.create(options, ruby_system, system)
        dir_cntrl.number_of_TBEs = 2560 * options.num_compute_units
        #Enough TBEs for all TCP TBEs

        # Connect the Directory controller to the ruby network
        dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
        dir_cntrl.requestFromCores.slave = ruby_system.network.master

        dir_cntrl.responseFromCores = MessageBuffer()
        dir_cntrl.responseFromCores.slave = ruby_system.network.master

        dir_cntrl.unblockFromCores = MessageBuffer()
        dir_cntrl.unblockFromCores.slave = ruby_system.network.master

        dir_cntrl.probeToCore = MessageBuffer()
        dir_cntrl.probeToCore.master = ruby_system.network.slave

        dir_cntrl.responseToCore = MessageBuffer()
        dir_cntrl.responseToCore.master = ruby_system.network.slave

        dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
        dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
        dir_cntrl.responseFromMemory = MessageBuffer()

        exec("system.dir_cntrl%d = dir_cntrl" % i)
        dir_cntrl_nodes.append(dir_cntrl)

        mainCluster.add(dir_cntrl)

    # For an odd number of CPUs, still create the right number of controllers
    cpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
    for i in xrange((options.num_cpus + 1) / 2):

        cp_cntrl = CPCntrl()
        cp_cntrl.create(options, ruby_system, system)

        exec("system.cp_cntrl%d = cp_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])

        # Connect the CP controllers and the network
        cp_cntrl.requestFromCore = MessageBuffer()
        cp_cntrl.requestFromCore.master = ruby_system.network.slave

        cp_cntrl.responseFromCore = MessageBuffer()
        cp_cntrl.responseFromCore.master = ruby_system.network.slave

        cp_cntrl.unblockFromCore = MessageBuffer()
        cp_cntrl.unblockFromCore.master = ruby_system.network.slave

        cp_cntrl.probeToCore = MessageBuffer()
        cp_cntrl.probeToCore.slave = ruby_system.network.master

        cp_cntrl.responseToCore = MessageBuffer()
        cp_cntrl.responseToCore.slave = ruby_system.network.master

        cp_cntrl.mandatoryQueue = MessageBuffer()
        cp_cntrl.triggerQueue = MessageBuffer(ordered = True)

        cpuCluster.add(cp_cntrl)

    gpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s

    for i in xrange(options.num_compute_units):

        tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                             number_of_TBEs = 2560) # max outstanding requests
        tcp_cntrl.create(options, ruby_system, system)

        exec("system.tcp_cntrl%d = tcp_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(tcp_cntrl.coalescer)
        tcp_cntrl_nodes.append(tcp_cntrl)

        # Connect the TCP controller to the ruby network
        tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.requestFromTCP.master = ruby_system.network.slave

        tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseFromTCP.master = ruby_system.network.slave

        tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
        tcp_cntrl.unblockFromCore.master = ruby_system.network.slave

        tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.probeToTCP.slave = ruby_system.network.master

        tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseToTCP.slave = ruby_system.network.master

        tcp_cntrl.mandatoryQueue = MessageBuffer()

        gpuCluster.add(tcp_cntrl)

    for i in xrange(options.num_sqc):

        sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
        sqc_cntrl.create(options, ruby_system, system)

        exec("system.sqc_cntrl%d = sqc_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(sqc_cntrl.sequencer)

        # Connect the SQC controller to the ruby network
        sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
        sqc_cntrl.requestFromSQC.master = ruby_system.network.slave

        sqc_cntrl.responseFromSQC = MessageBuffer(ordered = True)
        sqc_cntrl.responseFromSQC.master = ruby_system.network.slave

        sqc_cntrl.unblockFromCore = MessageBuffer(ordered = True)
        sqc_cntrl.unblockFromCore.master = ruby_system.network.slave

        sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.probeToSQC.slave = ruby_system.network.master

        sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.responseToSQC.slave = ruby_system.network.master

        sqc_cntrl.mandatoryQueue = MessageBuffer()

        # SQC also in GPU cluster
        gpuCluster.add(sqc_cntrl)

    for i in xrange(options.num_cp):

        tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                             number_of_TBEs = 2560) # max outstanding requests
        tcp_cntrl.createCP(options, ruby_system, system)

        exec("system.tcp_cntrl%d = tcp_cntrl" % (options.num_compute_units + i))
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(tcp_cntrl.sequencer)
        tcp_cntrl_nodes.append(tcp_cntrl)

        # Connect the TCP controller to the ruby network
        tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.requestFromTCP.master = ruby_system.network.slave

        tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseFromTCP.master = ruby_system.network.slave

        tcp_cntrl.unblockFromCore = MessageBuffer(ordered = True)
        tcp_cntrl.unblockFromCore.master = ruby_system.network.slave

        tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.probeToTCP.slave = ruby_system.network.master

        tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseToTCP.slave = ruby_system.network.master

        tcp_cntrl.mandatoryQueue = MessageBuffer()

        gpuCluster.add(tcp_cntrl)

        sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
        sqc_cntrl.createCP(options, ruby_system, system)

        exec("system.sqc_cntrl%d = sqc_cntrl" % (options.num_compute_units + i))
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(sqc_cntrl.sequencer)

        # Connect the SQC controller to the ruby network
        sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
        sqc_cntrl.requestFromSQC.master = ruby_system.network.slave

        sqc_cntrl.responseFromSQC = MessageBuffer(ordered = True)
        sqc_cntrl.responseFromSQC.master = ruby_system.network.slave

        sqc_cntrl.unblockFromCore = MessageBuffer(ordered = True)
        sqc_cntrl.unblockFromCore.master = ruby_system.network.slave

        sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.probeToSQC.slave = ruby_system.network.master

        sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.responseToSQC.slave = ruby_system.network.master

        sqc_cntrl.mandatoryQueue = MessageBuffer()

        # SQC also in GPU cluster
        gpuCluster.add(sqc_cntrl)

    for i in xrange(options.num_tccs):

        tcc_cntrl = TCCCntrl(TCC_select_num_bits = TCC_bits,
                             number_of_TBEs = options.num_compute_units * 2560)
        #Enough TBEs for all TCP TBEs
        tcc_cntrl.create(options, ruby_system, system)
        tcc_cntrl_nodes.append(tcc_cntrl)

        tccdir_cntrl = TCCDirCntrl(TCC_select_num_bits = TCC_bits,
                              number_of_TBEs = options.num_compute_units * 2560)
        #Enough TBEs for all TCP TBEs
        tccdir_cntrl.create(options, ruby_system, system)
        tccdir_cntrl_nodes.append(tccdir_cntrl)

        exec("system.tcc_cntrl%d = tcc_cntrl" % i)
        exec("system.tccdir_cntrl%d = tccdir_cntrl" % i)

        # connect all of the wire buffers between L3 and dirs up
        req_to_tccdir = RubyWireBuffer()
        resp_to_tccdir = RubyWireBuffer()
        tcc_unblock_to_tccdir = RubyWireBuffer()
        req_to_tcc = RubyWireBuffer()
        probe_to_tcc = RubyWireBuffer()
        resp_to_tcc = RubyWireBuffer()

        tcc_cntrl.connectWireBuffers(req_to_tccdir, resp_to_tccdir,
                                     tcc_unblock_to_tccdir, req_to_tcc,
                                     probe_to_tcc, resp_to_tcc)
        tccdir_cntrl.connectWireBuffers(req_to_tccdir, resp_to_tccdir,
                                        tcc_unblock_to_tccdir, req_to_tcc,
                                        probe_to_tcc, resp_to_tcc)

        # Connect the TCC controller to the ruby network
        tcc_cntrl.responseFromTCC = MessageBuffer(ordered = True)
        tcc_cntrl.responseFromTCC.master = ruby_system.network.slave

        tcc_cntrl.responseToTCC = MessageBuffer(ordered = True)
        tcc_cntrl.responseToTCC.slave = ruby_system.network.master

        # Connect the TCC Dir controller to the ruby network
        tccdir_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tccdir_cntrl.requestFromTCP.slave = ruby_system.network.master

        tccdir_cntrl.responseFromTCP = MessageBuffer(ordered = True)
        tccdir_cntrl.responseFromTCP.slave = ruby_system.network.master

        tccdir_cntrl.unblockFromTCP = MessageBuffer(ordered = True)
        tccdir_cntrl.unblockFromTCP.slave = ruby_system.network.master

        tccdir_cntrl.probeToCore = MessageBuffer(ordered = True)
        tccdir_cntrl.probeToCore.master = ruby_system.network.slave

        tccdir_cntrl.responseToCore = MessageBuffer(ordered = True)
        tccdir_cntrl.responseToCore.master = ruby_system.network.slave

        tccdir_cntrl.probeFromNB = MessageBuffer()
        tccdir_cntrl.probeFromNB.slave = ruby_system.network.master

        tccdir_cntrl.responseFromNB = MessageBuffer()
        tccdir_cntrl.responseFromNB.slave = ruby_system.network.master

        tccdir_cntrl.requestToNB = MessageBuffer()
        tccdir_cntrl.requestToNB.master = ruby_system.network.slave

        tccdir_cntrl.responseToNB = MessageBuffer()
        tccdir_cntrl.responseToNB.master = ruby_system.network.slave

        tccdir_cntrl.unblockToNB = MessageBuffer()
        tccdir_cntrl.unblockToNB.master = ruby_system.network.slave

        tccdir_cntrl.triggerQueue = MessageBuffer(ordered = True)

        # TCC cntrls added to the GPU cluster
        gpuCluster.add(tcc_cntrl)
        gpuCluster.add(tccdir_cntrl)

    # Assuming no DMA devices
    assert(len(dma_devices) == 0)

    # Add cpu/gpu clusters to main cluster
    mainCluster.add(cpuCluster)
    mainCluster.add(gpuCluster)

    ruby_system.network.number_of_virtual_networks = 10

    return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
Exemple #38
0
# The following test case will test the Protocol Feature JSON reader of the blockchain


def restartNode(node: Node, nodeId, chainArg=None, addSwapFlags=None):
    if not node.killed:
        node.kill(signal.SIGTERM)
    isRelaunchSuccess = node.relaunch(nodeId,
                                      chainArg,
                                      addSwapFlags=addSwapFlags,
                                      timeout=5,
                                      cachePopen=True)
    assert isRelaunchSuccess, "Fail to relaunch"


walletMgr = WalletMgr(True)
cluster = Cluster(walletd=True)
cluster.setWalletMgr(walletMgr)

testSuccessful = False
try:
    TestHelper.printSystemInfo("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()
    cluster.launch(
        extraNodeosArgs=
        " --plugin eosio::producer_api_plugin  --http-max-response-time-ms 990000 ",
        dontBootstrap=True,
        pfSetupPolicy=PFSetupPolicy.NONE)
    biosNode = cluster.biosNode

    # Modify the JSON file and then restart the node so it updates the internal state
def create_system(options, full_system, system, dma_devices, ruby_system):

    if not buildEnv['GPGPU_SIM']:
        m5.util.panic("This script requires GPGPU-Sim integration to be built.")

    # Run the protocol script to setup CPU cluster, directory and DMA
    (all_sequencers, dir_cntrls, dma_cntrls, cpu_cluster) = \
                                        VI_hammer.create_system(options,
                                                                full_system,
                                                                system,
                                                                dma_devices,
                                                                ruby_system)

    # If we're going to split the directories/memory controllers
    if options.num_dev_dirs > 0:
        cpu_cntrl_count = len(cpu_cluster)
    else:
        cpu_cntrl_count = len(cpu_cluster) + len(dir_cntrls)

    #
    # Create controller for the copy engine to connect to in CPU cluster
    # Cache is unused by controller
    #
    cache = L1Cache(size = "4096B", assoc = 2)

    cpu_ce_seq = RubySequencer(version = options.num_cpus + options.num_sc,
                               icache = cache,
                               dcache = cache,
                               max_outstanding_requests = 64,
                               ruby_system = ruby_system,
                               connect_to_io = False)

    cpu_ce_cntrl = GPUCopyDMA_Controller(version = 0,
                                         sequencer = cpu_ce_seq,
                                         number_of_TBEs = 256,
                                         ruby_system = ruby_system)

    cpu_cntrl_count += 1

    cpu_ce_cntrl.responseFromDir = ruby_system.network.master
    cpu_ce_cntrl.reqToDirectory = ruby_system.network.slave

    #
    # Build GPU cluster
    #
    gpu_cluster = Cluster(intBW = 32, extBW = 32)
    gpu_cluster.disableConnectToParent()

    l2_bits = int(math.log(options.num_l2caches, 2))
    block_size_bits = int(math.log(options.cacheline_size, 2))
    # This represents the L1 to L2 interconnect latency
    # NOTE! This latency is in Ruby (cache) cycles, not SM cycles
    per_hop_interconnect_latency = 45 # ~15 GPU cycles
    num_dance_hall_hops = int(math.log(options.num_sc, 2))
    if num_dance_hall_hops == 0:
        num_dance_hall_hops = 1
    l1_to_l2_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops

    #
    # Caches for GPU cores
    #
    for i in xrange(options.num_sc):
        #
        # First create the Ruby objects associated with the GPU cores
        #
        cache = L1Cache(size = options.sc_l1_size,
                            assoc = options.sc_l1_assoc,
                            replacement_policy = "LRU",
                            start_index_bit = block_size_bits,
                            dataArrayBanks = 4,
                            tagArrayBanks = 4,
                            dataAccessLatency = 4,
                            tagAccessLatency = 4,
                            resourceStalls = False)

        l1_cntrl = GPUL1Cache_Controller(version = i,
                                  cache = cache,
                                  l2_select_num_bits = l2_bits,
                                  num_l2 = options.num_l2caches,
                                  issue_latency = l1_to_l2_noc_latency,
                                  number_of_TBEs = options.gpu_l1_buf_depth,
                                  ruby_system = ruby_system)

        gpu_seq = RubySequencer(version = options.num_cpus + i,
                            icache = cache,
                            dcache = cache,
                            max_outstanding_requests = options.gpu_l1_buf_depth,
                            ruby_system = ruby_system,
                            deadlock_threshold = 2000000,
                            connect_to_io = False)

        l1_cntrl.sequencer = gpu_seq

        exec("ruby_system.l1_cntrl_sp%02d = l1_cntrl" % i)

        #
        # Add controllers and sequencers to the appropriate lists
        #
        all_sequencers.append(gpu_seq)
        gpu_cluster.add(l1_cntrl)

        # Connect the controller to the network
        l1_cntrl.requestFromL1Cache = ruby_system.network.slave
        l1_cntrl.responseToL1Cache = ruby_system.network.master

    l2_index_start = block_size_bits + l2_bits
    # Use L2 cache and interconnect latencies to calculate protocol latencies
    # NOTE! These latencies are in Ruby (cache) cycles, not SM cycles
    l2_cache_access_latency = 30 # ~10 GPU cycles
    l2_to_l1_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops
    l2_to_mem_noc_latency = 125 # ~40 GPU cycles

    l2_clusters = []
    for i in xrange(options.num_l2caches):
        #
        # First create the Ruby objects associated with this cpu
        #
        l2_cache = L2Cache(size = options.sc_l2_size,
                           assoc = options.sc_l2_assoc,
                           start_index_bit = l2_index_start,
                           replacement_policy = "LRU",
                           dataArrayBanks = 4,
                           tagArrayBanks = 4,
                           dataAccessLatency = 4,
                           tagAccessLatency = 4,
                           resourceStalls = options.gpu_l2_resource_stalls)

	region_buffer = regionBuffer_Obj(size = "8MB",
                           assoc = 2^16,
                           start_index_bit = l2_index_start,
                           replacement_policy = "LRU",
                           dataArrayBanks = 4,
                           tagArrayBanks = 4,
                           dataAccessLatency = 4,
                           tagAccessLatency = 4,
                           resourceStalls = options.gpu_l2_resource_stalls,
 			   regionSize = options.region_size)



        l2_cntrl = GPUL2Cache_Controller(version = i,
                                L2cache = l2_cache,
				regionBuffer = region_buffer,
                                l2_response_latency = l2_cache_access_latency +
                                                      l2_to_l1_noc_latency,
                                l2_request_latency = l2_to_mem_noc_latency,
                                cache_response_latency = l2_cache_access_latency,
                                ruby_system = ruby_system)

        exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
        l2_cluster = Cluster(intBW = 32, extBW = 32)
        l2_cluster.add(l2_cntrl)
        gpu_cluster.add(l2_cluster)
        l2_clusters.append(l2_cluster)

        # Connect the controller to the network
        l2_cntrl.responseToL1Cache = ruby_system.network.slave
        l2_cntrl.requestFromCache = ruby_system.network.slave
        l2_cntrl.responseFromCache = ruby_system.network.slave
        l2_cntrl.unblockFromCache = ruby_system.network.slave

        l2_cntrl.requestFromL1Cache = ruby_system.network.master
        l2_cntrl.forwardToCache = ruby_system.network.master
        l2_cntrl.responseToCache = ruby_system.network.master

    gpu_phys_mem_size = system.gpu.gpu_memory_range.size()

    if options.num_dev_dirs > 0:
        mem_module_size = gpu_phys_mem_size / options.num_dev_dirs

        #
        # determine size and index bits for probe filter
        # By default, the probe filter size is configured to be twice the
        # size of the L2 cache.
        #
        pf_size = MemorySize(options.sc_l2_size)
        pf_size.value = pf_size.value * 2
        dir_bits = int(math.log(options.num_dev_dirs, 2))
        pf_bits = int(math.log(pf_size.value, 2))
        if options.numa_high_bit:
            if options.pf_on or options.dir_on:
                # if numa high bit explicitly set, make sure it does not overlap
                # with the probe filter index
                assert(options.numa_high_bit - dir_bits > pf_bits)

            # set the probe filter start bit to just above the block offset
            pf_start_bit = block_size_bits
        else:
            if dir_bits > 0:
                pf_start_bit = dir_bits + block_size_bits - 1
            else:
                pf_start_bit = block_size_bits

        dev_dir_cntrls = []
        dev_mem_ctrls = []
        num_cpu_dirs = len(dir_cntrls)
        for i in xrange(options.num_dev_dirs):
            #
            # Create the Ruby objects associated with the directory controller
            #

            dir_version = i + num_cpu_dirs

            dir_size = MemorySize('0B')
            dir_size.value = mem_module_size

            pf = ProbeFilter(size = pf_size, assoc = 4,
                             start_index_bit = pf_start_bit)

            dev_dir_cntrl = Directory_Controller(version = dir_version,
                                 directory = \
                                 RubyDirectoryMemory( \
                                            version = dir_version,
                                            size = dir_size,
                                            numa_high_bit = \
                                            options.numa_high_bit,
                                            device_directory = True),
                                 probeFilter = pf,
                                 probe_filter_enabled = options.pf_on,
                                 full_bit_dir_enabled = options.dir_on,
                                 ruby_system = ruby_system)

            if options.recycle_latency:
                dev_dir_cntrl.recycle_latency = options.recycle_latency

            exec("ruby_system.dev_dir_cntrl%d = dev_dir_cntrl" % i)
            dev_dir_cntrls.append(dev_dir_cntrl)

            # Connect the directory controller to the network
            dev_dir_cntrl.forwardFromDir = ruby_system.network.slave
            dev_dir_cntrl.responseFromDir = ruby_system.network.slave
            dev_dir_cntrl.dmaResponseFromDir = ruby_system.network.slave

            dev_dir_cntrl.unblockToDir = ruby_system.network.master
            dev_dir_cntrl.responseToDir = ruby_system.network.master
            dev_dir_cntrl.requestToDir = ruby_system.network.master
            dev_dir_cntrl.dmaRequestToDir = ruby_system.network.master

            dev_mem_ctrl = MemConfig.create_mem_ctrl(
                MemConfig.get(options.mem_type), system.gpu.gpu_memory_range,
                i, options.num_dev_dirs, int(math.log(options.num_dev_dirs, 2)),
                options.cacheline_size)
            dev_mem_ctrl.port = dev_dir_cntrl.memory
            dev_mem_ctrls.append(dev_mem_ctrl)

        system.dev_mem_ctrls = dev_mem_ctrls
    else:
        # Since there are no device directories, use CPU directories
        # Fix up the memory sizes of the CPU directories
        num_dirs = len(dir_cntrls)
        add_gpu_mem = gpu_phys_mem_size / num_dirs
        for cntrl in dir_cntrls:
            new_size = cntrl.directory.size.value + add_gpu_mem
            cntrl.directory.size.value = new_size

    #
    # Create controller for the copy engine to connect to in GPU cluster
    # Cache is unused by controller
    #
    cache = L1Cache(size = "4096B", assoc = 2)

    gpu_ce_seq = RubySequencer(version = options.num_cpus + options.num_sc + 1,
                               icache = cache,
                               dcache = cache,
                               max_outstanding_requests = 64,
                               support_inst_reqs = False,
                               ruby_system = ruby_system,
                               connect_to_io = False)

    gpu_ce_cntrl = GPUCopyDMA_Controller(version = 1,
                                  sequencer = gpu_ce_seq,
                                  number_of_TBEs = 256,
                                  ruby_system = ruby_system)

    ruby_system.l1_cntrl_ce = gpu_ce_cntrl

    all_sequencers.append(cpu_ce_seq)
    all_sequencers.append(gpu_ce_seq)

    gpu_ce_cntrl.responseFromDir = ruby_system.network.master
    gpu_ce_cntrl.reqToDirectory = ruby_system.network.slave

    complete_cluster = Cluster(intBW = 32, extBW = 32)
    complete_cluster.add(cpu_ce_cntrl)
    complete_cluster.add(gpu_ce_cntrl)
    complete_cluster.add(cpu_cluster)
    complete_cluster.add(gpu_cluster)

    for cntrl in dir_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in dev_dir_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in dma_cntrls:
        complete_cluster.add(cntrl)

    for cluster in l2_clusters:
        complete_cluster.add(cluster)

    return (all_sequencers, dir_cntrls, complete_cluster)
Exemple #40
0
def create_system(options, full_system, system, dma_devices, ruby_system):
    if buildEnv['PROTOCOL'] != 'MOESI_AMD_Base':
        panic("This script requires the MOESI_AMD_Base protocol.")

    cpu_sequencers = []

    #
    # The ruby network creation expects the list of nodes in the system to
    # be consistent with the NetDest list.  Therefore the l1 controller
    # nodes must be listed before the directory nodes and directory nodes
    # before dma nodes, etc.
    #
    l1_cntrl_nodes = []
    l3_cntrl_nodes = []
    dir_cntrl_nodes = []

    control_count = 0

    #
    # Must create the individual controllers before the network to ensure
    # the controller constructors are called before the network constructor
    #

    # This is the base crossbar that connects the L3s, Dirs, and cpu
    # Cluster
    mainCluster = Cluster(extBW = 512, intBW = 512) # 1 TB/s
    for i in xrange(options.num_dirs):

        dir_cntrl = DirCntrl(TCC_select_num_bits = 0)
        dir_cntrl.create(options, ruby_system, system)

        # Connect the Directory controller to the ruby network
        dir_cntrl.requestFromCores = MessageBuffer(ordered = True)
        dir_cntrl.requestFromCores.slave = ruby_system.network.master

        dir_cntrl.responseFromCores = MessageBuffer()
        dir_cntrl.responseFromCores.slave = ruby_system.network.master

        dir_cntrl.unblockFromCores = MessageBuffer()
        dir_cntrl.unblockFromCores.slave = ruby_system.network.master

        dir_cntrl.probeToCore = MessageBuffer()
        dir_cntrl.probeToCore.master = ruby_system.network.slave

        dir_cntrl.responseToCore = MessageBuffer()
        dir_cntrl.responseToCore.master = ruby_system.network.slave

        dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
        dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
        dir_cntrl.responseFromMemory = MessageBuffer()

        exec("system.dir_cntrl%d = dir_cntrl" % i)
        dir_cntrl_nodes.append(dir_cntrl)

        mainCluster.add(dir_cntrl)

    # Technically this config can support an odd number of cpus, but the top
    # level config files, such as the ruby_random_tester, will get confused if
    # the number of cpus does not equal the number of sequencers.  Thus make
    # sure that an even number of cpus is specified.
    assert((options.num_cpus % 2) == 0)

    # For an odd number of CPUs, still create the right number of controllers
    cpuCluster = Cluster(extBW = 512, intBW = 512)  # 1 TB/s
    for i in xrange((options.num_cpus + 1) / 2):

        cp_cntrl = CPCntrl()
        cp_cntrl.create(options, ruby_system, system)

        exec("system.cp_cntrl%d = cp_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])

        # Connect the CP controllers and the network
        cp_cntrl.requestFromCore = MessageBuffer()
        cp_cntrl.requestFromCore.master = ruby_system.network.slave

        cp_cntrl.responseFromCore = MessageBuffer()
        cp_cntrl.responseFromCore.master = ruby_system.network.slave

        cp_cntrl.unblockFromCore = MessageBuffer()
        cp_cntrl.unblockFromCore.master = ruby_system.network.slave

        cp_cntrl.probeToCore = MessageBuffer()
        cp_cntrl.probeToCore.slave = ruby_system.network.master

        cp_cntrl.responseToCore = MessageBuffer()
        cp_cntrl.responseToCore.slave = ruby_system.network.master

        cp_cntrl.mandatoryQueue = MessageBuffer()
        cp_cntrl.triggerQueue = MessageBuffer(ordered = True)

        cpuCluster.add(cp_cntrl)

    # Assuming no DMA devices
    assert(len(dma_devices) == 0)

    # Add cpu/gpu clusters to main cluster
    mainCluster.add(cpuCluster)

    ruby_system.network.number_of_virtual_networks = 10

    return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
        Utils.cmdError("only saw %s producers of expected 21. At blockNum %s only the following producers were seen: %s" % (len(prodsSeenKeys), blockNum, ",".join(prodsSeenKeys)))
        Utils.errorExit("Failed because of missing block producers")

    Utils.Debug=temp


Print=Utils.Print
errorExit=Utils.errorExit

from core_symbol import CORE_SYMBOL

args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
                              "--p2p-plugin","--wallet-port"})
Utils.Debug=args.v
totalNodes=4
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
prodCount=args.prod_count
killAll=args.clean_run
p2pPlugin=args.p2p_plugin
walletPort=args.wallet_port

walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killEosInstances=not dontKill
killWallet=not dontKill

WalletdName=Utils.EosWalletName
ClientName="cleon"
        ResourceManager._cluster_dict = {}
        try:
            exist_cluster = ResourceManager._db.sync_from_db()
            for cluster in exist_cluster:
                ResourceManager.create_cluster(
                    cluster["cluster_name"],
                    cluster["protected_layers_string"])
                if cluster["node_list"] != []:
                    ResourceManager.add_node(cluster["cluster_name"],
                                             cluster["node_list"])
                for instance in cluster["instance_list"]:
                    ResourceManager.add_instance(cluster["cluster_name"],
                                                 instance)
            logging.info("ResourceManager--synco from DB finish")
        except Exception as e:
            print str(e)
            logging.error("ClusterManagwer--synco from DB fail")

    @staticmethod
    def sync_to_database():
        cluster_list = ResourceManager._cluster_dict
        ResourceManager._db.sync_to_db(cluster_list)


if __name__ == "__main__":
    cluster_name = 'test'
    ResourceManager.init()
    b = Cluster(cluster_name)
    ResourceManager._cluster_dict[cluster_name] = b
    print(ResourceManager._cluster_dict)
Exemple #43
0
def doMonitor(interval):
  cluster = Cluster()
  while True:
    click.echo(cluster.getReport())
    click.echo("\n\n")
    sleep(interval)
Exemple #44
0
"""
Solves the N-body problem using RungeKutta or Velocity-Verlet
"""
from Cluster import Cluster
if __name__ == "__main__":

    test = Cluster(N=4, n_steps=5)
    test.initialize(radius=100)
    test.animate()

Exemple #45
0
def kafka(restart, start, stop, force_stop, clean, brokers, wait_before_start, wait_before_kill):
  cluster = Cluster()
  
  if len(brokers) > 0 :
    brokerList = brokers.split(",")
    cluster = cluster.getServersByHostname(brokerList)
  
  if(restart or stop):
    logging.debug("Shutting down Kafka")
    cluster.shutdownKafka()
  
  if(force_stop):
    logging.debug("Waiting for "+str(wait_before_kill)+" seconds")
    sleep(wait_before_kill)
    logging.debug("Force Killing Kafka")
    cluster.killKafka()
  
  if(clean):
    logging.debug("Cleaning Kafka")
    cluster.cleanKafka()
  
  if(restart or start):
    logging.debug("Waiting for "+str(wait_before_start)+" seconds")
    sleep(wait_before_start)
    logging.debug("Starting Kafka")
    cluster.startKafka()
  click.echo(cluster.getReport())  
defproduceraPrvtKey = args.defproducera_prvt_key
defproducerbPrvtKey = args.defproducerb_prvt_key
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontLaunch = args.dont_launch
dontKill = args.leave_running
prodCount = args.prod_count
onlyBios = args.only_bios
killAll = args.clean_run
sanityTest = args.sanity_test
p2pPlugin = args.p2p_plugin

Utils.Debug = debug
localTest = True if server == TestHelper.LOCAL_HOST else False
cluster = Cluster(walletd=True,
                  enableMongo=enableMongo,
                  defproduceraPrvtKey=defproduceraPrvtKey,
                  defproducerbPrvtKey=defproducerbPrvtKey)
walletMgr = WalletMgr(True)
testSuccessful = False
killSeatInstances = not dontKill
killWallet = not dontKill
dontBootstrap = sanityTest

WalletdName = "keyseatd"
ClientName = "cliseat"
timeout = .5 * 12 * 2 + 60  # time for finalization with 1 producer + 60 seconds padding
Utils.setIrreversibleTimeout(timeout)

try:
    TestHelper.printSystemInfo("BEGIN")
    Print("SERVER: %s" % (server))
Exemple #47
0
  file.close()

def load_data(filename):
  """
  Returns the object stored in the file
  @param filename: the name of the file
  @return the object stored in the file
  """
  file = open(filename, "r")
  data = pickle.load(file)
  file.close()
  return data


"""
Example use of Advise.py

scrapper.query(QUERY='university', OBJECT_TYPE='page')
university_pages = scrapper.get_pages(QUERY='university', OBJECT_TYPE='page')
scrapper.dump("query_data")
store_data(university_pages, "university_pages")
clusters = Cluster.kmeans(data=university_pages, k=20)
"""
if __name__ == "__main__":
  scrapper = Scrapper()
  scrapper.load("query_data")
  university_pages = load_data("university_pages")
  clusters = Cluster.kmeans(data=university_pages)
  visualizer = Visualizer(data=university_pages, clusters=clusters)

Exemple #48
0
                        help="How many catchup-nodes to launch",
                        default=10)
extraArgs = appArgs.add(flag="--txn-gen-nodes",
                        type=int,
                        help="How many transaction generator nodes",
                        default=2)
args = TestHelper.parse_args(
    {
        "--prod-count", "--dump-error-details", "--keep-logs", "-v",
        "--leave-running", "--clean-run", "-p", "--wallet-port"
    },
    applicationSpecificArgs=appArgs)
Utils.Debug = args.v
pnodes = args.p if args.p > 0 else 1
startedNonProdNodes = args.txn_gen_nodes if args.txn_gen_nodes >= 2 else 2
cluster = Cluster(walletd=True)
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontKill = args.leave_running
prodCount = args.prod_count if args.prod_count > 1 else 2
killAll = args.clean_run
walletPort = args.wallet_port
catchupCount = args.catchup_count if args.catchup_count > 0 else 1
totalNodes = startedNonProdNodes + pnodes + catchupCount

walletMgr = WalletMgr(True, port=walletPort)
testSuccessful = False
killscsInstances = not dontKill
killWallet = not dontKill

WalletdName = Utils.scsWalletName
Exemple #49
0
"""
Solves the N-body problem using RungeKutta or Velocity-Verlet
"""
from Cluster import Cluster
if __name__ == "__main__":
    
    test = Cluster(N=40, n_steps=100)
    test.initialize(radius=20)
    test.animate()
    info1=prodNodes[1].getInfo(exitOnError=True)
    headBlockNum=min(int(info0["head_block_num"]),int(info1["head_block_num"]))
    libNum=min(int(info0["last_irreversible_block_num"]), int(info1["last_irreversible_block_num"]))
    return (headBlockNum, libNum)



args = TestHelper.parse_args({"--prod-count","--dump-error-details","--keep-logs","-v","--leave-running","--clean-run",
                              "--wallet-port"})
Utils.Debug=args.v
totalProducerNodes=2
totalNonProducerNodes=1
totalNodes=totalProducerNodes+totalNonProducerNodes
maxActiveProducers=3
totalProducers=maxActiveProducers
cluster=Cluster(walletd=True)
dumpErrorDetails=args.dump_error_details
keepLogs=args.keep_logs
dontKill=args.leave_running
killAll=args.clean_run
walletPort=args.wallet_port

walletMgr=WalletMgr(True, port=walletPort)
testSuccessful=False
killscsInstances=not dontKill
killWallet=not dontKill

WalletdName=Utils.scsWalletName
ClientName="clscs"

try:
dumpErrorDetails=args.dump_error_details
onlyBios=args.only_bios
killAll=args.clean_run

Utils.Debug=debug

killEosInstances=not dontKill
topo="mesh"
delay=1
prodCount=1 # producers per producer node
pnodes=1
total_nodes=pnodes
actualTest="tests/nodeos_run_test.py"
testSuccessful=False

cluster=Cluster()
try:
    Print("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    Print ("producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d" %
           (pnodes, total_nodes-pnodes, topo, delay))
    Print("Stand up cluster")
    if cluster.launch(pnodes, total_nodes, prodCount, topo, delay, onlyBios=onlyBios, dontKill=dontKill) is False:
        errorExit("Failed to stand up eos cluster.")

    Print ("Wait for Cluster stabilization")
    # wait for cluster to start producing blocks
    if not cluster.waitOnClusterBlockNumSync(3):
        errorExit("Cluster never stabilized")
testSuccessful = False

clusterMapJsonTemplate = """{
    "keys": {
        "defproduceraPrivateKey": "%s",
        "defproducerbPrivateKey": "%s"
    },
    "nodes": [
        {"port": 8888, "host": "localhost"},
        {"port": 8889, "host": "localhost"},
        {"port": 8890, "host": "localhost"}
    ]
}
"""

cluster = Cluster(walletd=True)

(fd, nodesFile) = tempfile.mkstemp()
try:
    TestHelper.printSystemInfo("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    Print(
        "producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d"
        % (pnodes, total_nodes - pnodes, topo, delay))
    Print("Stand up cluster")
    if cluster.launch(pnodes=pnodes,
                      totalNodes=total_nodes,
                      prodCount=prodCount,
                      topo=topo,
Exemple #53
0
def create_system(options, full_system, system, dma_ports, ruby_system):

    if 'VI_hammer' not in buildEnv['PROTOCOL']:
        panic("This script requires the VI_hammer protocol to be built.")

    options.access_backing_store = True

    cpu_sequencers = []

    topology = Cluster(intBW = 32, extBW = 32)

    #
    # Must create the individual controllers before the network to ensure the
    # controller constructors are called before the network constructor
    #
    l2_bits_float = math.log(options.num_l2caches, 2)
    l2_bits = int(l2_bits_float)
    if l2_bits_float > l2_bits:
        l2_bits += 1
    block_size_bits = int(math.log(options.cacheline_size, 2))

    for i in xrange(options.num_cpus):
        #
        # First create the Ruby objects associated with this cpu
        #
        l1i_cache = L1Cache(size = options.l1i_size,
                            assoc = options.l1i_assoc,
                            start_index_bit = block_size_bits,
                            is_icache = True)
        l1d_cache = L1Cache(size = options.l1d_size,
                            assoc = options.l1d_assoc,
                            start_index_bit = block_size_bits)
        l2_cache = L2Cache(size = "2MB",
                           assoc = options.l2_assoc,
                           start_index_bit = block_size_bits)
	
	#region_buffer = regionBuffer_Obj(size = "4MB",
        #                   assoc = 32768,
        #                   start_index_bit = l2_index_start,
        #                   replacement_policy = "LRU",
        #                   dataArrayBanks = 4,
        #                   tagArrayBanks = 4,
        #                   dataAccessLatency = 4,
        #                   tagAccessLatency = 4,
        #                   resourceStalls = options.gpu_l2_resource_stalls,
 	#		   regionSize = options.region_size)


        l1_cntrl = L1Cache_Controller(version = i,
                                      L1Icache = l1i_cache,
                                      L1Dcache = l1d_cache,
                                      L2cache = l2_cache,
                                      no_mig_atomic = not \
                                        options.allow_atomic_migration,
                                      send_evictions = (
                                         options.cpu_type == "detailed"),
                                      ruby_system = ruby_system)

        cpu_seq = RubySequencer(version = i,
                                icache = l1i_cache,
                                dcache = l1d_cache,
                                ruby_system = ruby_system)

        l1_cntrl.sequencer = cpu_seq

        if options.recycle_latency:
            l1_cntrl.recycle_latency = options.recycle_latency

        exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(cpu_seq)
        topology.add(l1_cntrl)

        # Connect the L1 controller and the network
        # Connect the buffers from the controller to network
        l1_cntrl.requestFromCache = ruby_system.network.slave
        l1_cntrl.responseFromCache = ruby_system.network.slave
        l1_cntrl.unblockFromCache = ruby_system.network.slave

        # Connect the buffers from the network to the controller
        l1_cntrl.forwardToCache = ruby_system.network.master
        l1_cntrl.responseToCache = ruby_system.network.master

    cpu_mem_range = AddrRange(options.total_mem_size)
    mem_module_size = cpu_mem_range.size() / options.num_dirs

    #
    # determine size and index bits for probe filter
    # By default, the probe filter size is configured to be twice the
    # size of the L2 cache.
    #
    pf_size = MemorySize(options.l2_size)
    pf_size.value = pf_size.value * 2
    dir_bits = int(math.log(options.num_dirs, 2))
    pf_bits = int(math.log(pf_size.value, 2))
    if options.numa_high_bit:
        if options.pf_on or options.dir_on:
            # if numa high bit explicitly set, make sure it does not overlap
            # with the probe filter index
            assert(options.numa_high_bit - dir_bits > pf_bits)

        # set the probe filter start bit to just above the block offset
        pf_start_bit = block_size_bits
    else:
        if dir_bits > 0:
            pf_start_bit = dir_bits + block_size_bits - 1
        else:
            pf_start_bit = block_size_bits

    dir_cntrl_nodes = []
    for i in xrange(options.num_dirs):
        #
        # Create the Ruby objects associated with the directory controller
        #

        dir_size = MemorySize('0B')
        dir_size.value = mem_module_size

        pf = ProbeFilter(size = pf_size, assoc = 4,
                         start_index_bit = pf_start_bit)

        dir_cntrl = Directory_Controller(version = i,
                                         directory = \
                                         RubyDirectoryMemory( \
                                                    version = i,
                                                    size = dir_size,
                                                    numa_high_bit = \
                                                      options.numa_high_bit),
                                         probeFilter = pf,
                                         probe_filter_enabled = options.pf_on,
                                         full_bit_dir_enabled = options.dir_on,
                                         ruby_system = ruby_system)

        if options.recycle_latency:
            dir_cntrl.recycle_latency = options.recycle_latency

        exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
        dir_cntrl_nodes.append(dir_cntrl)

        # Connect the directory controller to the network
        dir_cntrl.forwardFromDir = ruby_system.network.slave
        dir_cntrl.responseFromDir = ruby_system.network.slave
        dir_cntrl.dmaResponseFromDir = ruby_system.network.slave

        dir_cntrl.unblockToDir = ruby_system.network.master
        dir_cntrl.responseToDir = ruby_system.network.master
        dir_cntrl.requestToDir = ruby_system.network.master
        dir_cntrl.dmaRequestToDir = ruby_system.network.master

    dma_cntrl_nodes = []
    for i, dma_port in enumerate(dma_ports):
        #
        # Create the Ruby objects associated with the dma controller
        #
        dma_seq = DMASequencer(version = i,
                               ruby_system = ruby_system)

        dma_cntrl = DMA_Controller(version = i,
                                   dma_sequencer = dma_seq,
                                   ruby_system = ruby_system)

        exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
        exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
        dma_cntrl_nodes.append(dma_cntrl)

        if options.recycle_latency:
            dma_cntrl.recycle_latency = options.recycle_latency

        # Connect the dma controller to the network
        dma_cntrl.responseFromDir = ruby_system.network.master
        dma_cntrl.reqToDirectory = ruby_system.network.slave

    # Create the io controller and the sequencer
    if full_system:
        io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
        ruby_system._io_port = io_seq
        io_controller = DMA_Controller(version = len(dma_ports),
                                       dma_sequencer = io_seq,
                                       ruby_system = ruby_system)
        ruby_system.io_controller = io_controller

        # Connect the dma controller to the network
        io_controller.responseFromDir = ruby_system.network.master
        io_controller.reqToDirectory = ruby_system.network.slave

        dma_cntrl_nodes.append(io_controller)

    return (cpu_sequencers, dir_cntrl_nodes, dma_cntrl_nodes, topology)
nodesFile = args.nodes_file
seed = args.seed
dontKill = args.leave_running
dumpErrorDetails = args.dump_error_details
killAll = args.clean_run

killWallet = not dontKill
killEnuInstances = not dontKill
if nodesFile is not None:
    killEnuInstances = False

Utils.Debug = debug
testSuccessful = False

random.seed(seed)  # Use a fixed seed for repeatability.
cluster = Cluster(enuwalletd=True)
walletMgr = WalletMgr(True)

try:
    cluster.setWalletMgr(walletMgr)

    if nodesFile is not None:
        jsonStr = None
        with open(nodesFile, "r") as f:
            jsonStr = f.read()
        if not cluster.initializeNodesFromJson(jsonStr):
            errorExit("Failed to initilize nodes from Json string.")
        total_nodes = len(cluster.getNodes())
    else:
        cluster.killall(allInstances=killAll)
        cluster.cleanup()
Exemple #55
0
        return retStr


###############################################################
# nodeos_voting_test
# --dump-error-details <Upon error print etc/yosemite/node_*/config.ini and var/lib/node_*/stderr.log to stdout>
# --keep-logs <Don't delete var/lib/node_* folders upon test completion>
###############################################################

args = TestHelper.parse_args({
    "--dump-error-details", "--keep-logs", "-v", "--leave-running",
    "--clean-run", "--wallet-port"
})
Utils.Debug = args.v
totalNodes = 4
cluster = Cluster(walletd=True)
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontKill = args.leave_running
killAll = args.clean_run
walletPort = args.wallet_port

walletMgr = WalletMgr(True, port=walletPort)
testSuccessful = False
killEosInstances = not dontKill
killWallet = not dontKill

WalletdName = Utils.EosWalletName
ClientName = "cleos"

try:
            (len(prodsSeenKeys), prodsSize, blockNum, ",".join(prodsSeenKeys)))
        Utils.errorExit("Failed because of missing block producers")

    Utils.Debug = temp


Print = Utils.Print
errorExit = Utils.errorExit

args = TestHelper.parse_args({
    "--prod-count", "--dump-error-details", "--keep-logs", "-v",
    "--leave-running", "--clean-run", "--wallet-port"
})
Utils.Debug = args.v
totalNodes = 3
cluster = Cluster(walletd=True)
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontKill = args.leave_running
prodCount = args.prod_count
killAll = args.clean_run
walletPort = args.wallet_port

walletMgr = WalletMgr(True, port=walletPort)
testSuccessful = False
killEosInstances = not dontKill
killWallet = not dontKill

WalletdName = Utils.EosWalletName
ClientName = "clio"
Exemple #57
0
def create_system(options, full_system, system, dma_devices, ruby_system):
    if buildEnv['PROTOCOL'] != 'GPU_VIPER_Region':
        panic("This script requires the GPU_VIPER_Region protocol to be built.")

    cpu_sequencers = []

    #
    # The ruby network creation expects the list of nodes in the system to be
    # consistent with the NetDest list.  Therefore the l1 controller nodes
    # must be listed before the directory nodes and directory nodes before
    # dma nodes, etc.
    #
    dir_cntrl_nodes = []

    # For an odd number of CPUs, still create the right number of controllers
    TCC_bits = int(math.log(options.num_tccs, 2))

    #
    # Must create the individual controllers before the network to ensure the
    # controller constructors are called before the network constructor
    #

    # For an odd number of CPUs, still create the right number of controllers
    crossbar_bw = 16 * options.num_compute_units #Assuming a 2GHz clock
    cpuCluster = Cluster(extBW = (crossbar_bw), intBW=crossbar_bw)
    for i in xrange((options.num_cpus + 1) / 2):

        cp_cntrl = CPCntrl()
        cp_cntrl.create(options, ruby_system, system)

        rb_cntrl = RBCntrl()
        rb_cntrl.create(options, ruby_system, system)
        rb_cntrl.number_of_TBEs = 256
        rb_cntrl.isOnCPU = True

        cp_cntrl.regionBufferNum = rb_cntrl.version

        exec("system.cp_cntrl%d = cp_cntrl" % i)
        exec("system.rb_cntrl%d = rb_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.extend([cp_cntrl.sequencer, cp_cntrl.sequencer1])

        # Connect the CP controllers and the network
        cp_cntrl.requestFromCore = MessageBuffer()
        cp_cntrl.requestFromCore.master = ruby_system.network.slave

        cp_cntrl.responseFromCore = MessageBuffer()
        cp_cntrl.responseFromCore.master = ruby_system.network.slave

        cp_cntrl.unblockFromCore = MessageBuffer()
        cp_cntrl.unblockFromCore.master = ruby_system.network.slave

        cp_cntrl.probeToCore = MessageBuffer()
        cp_cntrl.probeToCore.slave = ruby_system.network.master

        cp_cntrl.responseToCore = MessageBuffer()
        cp_cntrl.responseToCore.slave = ruby_system.network.master

        cp_cntrl.mandatoryQueue = MessageBuffer()
        cp_cntrl.triggerQueue = MessageBuffer(ordered = True)

        # Connect the RB controllers to the ruby network
        rb_cntrl.requestFromCore = MessageBuffer(ordered = True)
        rb_cntrl.requestFromCore.slave = ruby_system.network.master

        rb_cntrl.responseFromCore = MessageBuffer()
        rb_cntrl.responseFromCore.slave = ruby_system.network.master

        rb_cntrl.requestToNetwork = MessageBuffer()
        rb_cntrl.requestToNetwork.master = ruby_system.network.slave

        rb_cntrl.notifyFromRegionDir = MessageBuffer()
        rb_cntrl.notifyFromRegionDir.slave = ruby_system.network.master

        rb_cntrl.probeFromRegionDir = MessageBuffer()
        rb_cntrl.probeFromRegionDir.slave = ruby_system.network.master

        rb_cntrl.unblockFromDir = MessageBuffer()
        rb_cntrl.unblockFromDir.slave = ruby_system.network.master

        rb_cntrl.responseToRegDir = MessageBuffer()
        rb_cntrl.responseToRegDir.master = ruby_system.network.slave

        rb_cntrl.triggerQueue = MessageBuffer(ordered = True)

        cpuCluster.add(cp_cntrl)
        cpuCluster.add(rb_cntrl)

    gpuCluster = Cluster(extBW = (crossbar_bw), intBW = crossbar_bw)
    for i in xrange(options.num_compute_units):

        tcp_cntrl = TCPCntrl(TCC_select_num_bits = TCC_bits,
                             issue_latency = 1,
                             number_of_TBEs = 2560)
        # TBEs set to max outstanding requests
        tcp_cntrl.create(options, ruby_system, system)
        tcp_cntrl.WB = options.WB_L1
        tcp_cntrl.disableL1 = False

        exec("system.tcp_cntrl%d = tcp_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(tcp_cntrl.coalescer)

        # Connect the CP (TCP) controllers to the ruby network
        tcp_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.requestFromTCP.master = ruby_system.network.slave

        tcp_cntrl.responseFromTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseFromTCP.master = ruby_system.network.slave

        tcp_cntrl.unblockFromCore = MessageBuffer()
        tcp_cntrl.unblockFromCore.master = ruby_system.network.slave

        tcp_cntrl.probeToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.probeToTCP.slave = ruby_system.network.master

        tcp_cntrl.responseToTCP = MessageBuffer(ordered = True)
        tcp_cntrl.responseToTCP.slave = ruby_system.network.master

        tcp_cntrl.mandatoryQueue = MessageBuffer()

        gpuCluster.add(tcp_cntrl)

    for i in xrange(options.num_sqc):

        sqc_cntrl = SQCCntrl(TCC_select_num_bits = TCC_bits)
        sqc_cntrl.create(options, ruby_system, system)

        exec("system.sqc_cntrl%d = sqc_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(sqc_cntrl.sequencer)

        # Connect the SQC controller to the ruby network
        sqc_cntrl.requestFromSQC = MessageBuffer(ordered = True)
        sqc_cntrl.requestFromSQC.master = ruby_system.network.slave

        sqc_cntrl.probeToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.probeToSQC.slave = ruby_system.network.master

        sqc_cntrl.responseToSQC = MessageBuffer(ordered = True)
        sqc_cntrl.responseToSQC.slave = ruby_system.network.master

        sqc_cntrl.mandatoryQueue = MessageBuffer()

        # SQC also in GPU cluster
        gpuCluster.add(sqc_cntrl)

    numa_bit = 6

    for i in xrange(options.num_tccs):

        tcc_cntrl = TCCCntrl()
        tcc_cntrl.create(options, ruby_system, system)
        tcc_cntrl.l2_request_latency = 1
        tcc_cntrl.l2_response_latency = options.TCC_latency
        tcc_cntrl.WB = options.WB_L2
        tcc_cntrl.number_of_TBEs = 2560 * options.num_compute_units

        # Connect the TCC controllers to the ruby network
        tcc_cntrl.requestFromTCP = MessageBuffer(ordered = True)
        tcc_cntrl.requestFromTCP.slave = ruby_system.network.master

        tcc_cntrl.responseToCore = MessageBuffer(ordered = True)
        tcc_cntrl.responseToCore.master = ruby_system.network.slave

        tcc_cntrl.probeFromNB = MessageBuffer()
        tcc_cntrl.probeFromNB.slave = ruby_system.network.master

        tcc_cntrl.responseFromNB = MessageBuffer()
        tcc_cntrl.responseFromNB.slave = ruby_system.network.master

        tcc_cntrl.requestToNB = MessageBuffer(ordered = True)
        tcc_cntrl.requestToNB.master = ruby_system.network.slave

        tcc_cntrl.responseToNB = MessageBuffer()
        tcc_cntrl.responseToNB.master = ruby_system.network.slave

        tcc_cntrl.unblockToNB = MessageBuffer()
        tcc_cntrl.unblockToNB.master = ruby_system.network.slave

        tcc_cntrl.triggerQueue = MessageBuffer(ordered = True)

        rb_cntrl = RBCntrl()
        rb_cntrl.create(options, ruby_system, system)
        rb_cntrl.number_of_TBEs = 2560 * options.num_compute_units
        rb_cntrl.isOnCPU = False

        # Connect the RB controllers to the ruby network
        rb_cntrl.requestFromCore = MessageBuffer(ordered = True)
        rb_cntrl.requestFromCore.slave = ruby_system.network.master

        rb_cntrl.responseFromCore = MessageBuffer()
        rb_cntrl.responseFromCore.slave = ruby_system.network.master

        rb_cntrl.requestToNetwork = MessageBuffer()
        rb_cntrl.requestToNetwork.master = ruby_system.network.slave

        rb_cntrl.notifyFromRegionDir = MessageBuffer()
        rb_cntrl.notifyFromRegionDir.slave = ruby_system.network.master

        rb_cntrl.probeFromRegionDir = MessageBuffer()
        rb_cntrl.probeFromRegionDir.slave = ruby_system.network.master

        rb_cntrl.unblockFromDir = MessageBuffer()
        rb_cntrl.unblockFromDir.slave = ruby_system.network.master

        rb_cntrl.responseToRegDir = MessageBuffer()
        rb_cntrl.responseToRegDir.master = ruby_system.network.slave

        rb_cntrl.triggerQueue = MessageBuffer(ordered = True)

        tcc_cntrl.regionBufferNum = rb_cntrl.version

        exec("system.tcc_cntrl%d = tcc_cntrl" % i)
        exec("system.tcc_rb_cntrl%d = rb_cntrl" % i)

        # TCC cntrls added to the GPU cluster
        gpuCluster.add(tcc_cntrl)
        gpuCluster.add(rb_cntrl)

    # Because of wire buffers, num_l3caches must equal num_dirs
    # Region coherence only works with 1 dir
    assert(options.num_l3caches == options.num_dirs == 1)

    # This is the base crossbar that connects the L3s, Dirs, and cpu/gpu
    # Clusters
    mainCluster = Cluster(intBW = crossbar_bw)

    dir_cntrl = DirCntrl()
    dir_cntrl.create(options, ruby_system, system)
    dir_cntrl.number_of_TBEs = 2560 * options.num_compute_units
    dir_cntrl.useL3OnWT = options.use_L3_on_WT

    # Connect the Directory controller to the ruby network
    dir_cntrl.requestFromCores = MessageBuffer()
    dir_cntrl.requestFromCores.slave = ruby_system.network.master

    dir_cntrl.responseFromCores = MessageBuffer()
    dir_cntrl.responseFromCores.slave = ruby_system.network.master

    dir_cntrl.unblockFromCores = MessageBuffer()
    dir_cntrl.unblockFromCores.slave = ruby_system.network.master

    dir_cntrl.probeToCore = MessageBuffer()
    dir_cntrl.probeToCore.master = ruby_system.network.slave

    dir_cntrl.responseToCore = MessageBuffer()
    dir_cntrl.responseToCore.master = ruby_system.network.slave

    dir_cntrl.reqFromRegBuf = MessageBuffer()
    dir_cntrl.reqFromRegBuf.slave = ruby_system.network.master

    dir_cntrl.reqToRegDir = MessageBuffer(ordered = True)
    dir_cntrl.reqToRegDir.master = ruby_system.network.slave

    dir_cntrl.reqFromRegDir = MessageBuffer(ordered = True)
    dir_cntrl.reqFromRegDir.slave = ruby_system.network.master

    dir_cntrl.unblockToRegDir = MessageBuffer()
    dir_cntrl.unblockToRegDir.master = ruby_system.network.slave

    dir_cntrl.triggerQueue = MessageBuffer(ordered = True)
    dir_cntrl.L3triggerQueue = MessageBuffer(ordered = True)
    dir_cntrl.responseFromMemory = MessageBuffer()

    exec("system.dir_cntrl%d = dir_cntrl" % i)
    dir_cntrl_nodes.append(dir_cntrl)

    mainCluster.add(dir_cntrl)

    reg_cntrl = RegionCntrl(noTCCdir=True,TCC_select_num_bits = TCC_bits)
    reg_cntrl.create(options, ruby_system, system)
    reg_cntrl.number_of_TBEs = options.num_tbes
    reg_cntrl.cpuRegionBufferNum = system.rb_cntrl0.version
    reg_cntrl.gpuRegionBufferNum = system.tcc_rb_cntrl0.version

    # Connect the Region Dir controllers to the ruby network
    reg_cntrl.requestToDir = MessageBuffer(ordered = True)
    reg_cntrl.requestToDir.master = ruby_system.network.slave

    reg_cntrl.notifyToRBuffer = MessageBuffer()
    reg_cntrl.notifyToRBuffer.master = ruby_system.network.slave

    reg_cntrl.probeToRBuffer = MessageBuffer()
    reg_cntrl.probeToRBuffer.master = ruby_system.network.slave

    reg_cntrl.responseFromRBuffer = MessageBuffer()
    reg_cntrl.responseFromRBuffer.slave = ruby_system.network.master

    reg_cntrl.requestFromRegBuf = MessageBuffer()
    reg_cntrl.requestFromRegBuf.slave = ruby_system.network.master

    reg_cntrl.triggerQueue = MessageBuffer(ordered = True)

    exec("system.reg_cntrl%d = reg_cntrl" % i)

    mainCluster.add(reg_cntrl)

    # Assuming no DMA devices
    assert(len(dma_devices) == 0)

    # Add cpu/gpu clusters to main cluster
    mainCluster.add(cpuCluster)
    mainCluster.add(gpuCluster)

    ruby_system.network.number_of_virtual_networks = 10

    return (cpu_sequencers, dir_cntrl_nodes, mainCluster)
        output["stdout"] = outs.decode("utf-8")
        output["stderr"] = errs.decode("utf-8")
        output["returncode"] = proc.returncode
    except (subprocess.TimeoutExpired) as _:
        Print(
            "ERROR: Nodpico is running beyond the defined wait time. Hard killing nodpico instance."
        )
        proc.send_signal(signal.SIGKILL)
        return (False, None)

    if debug: Print("Returning success.")
    return (True, output)


random.seed(seed)  # Use a fixed seed for repeatability.
cluster = Cluster(walletd=True)

try:
    TestHelper.printSystemInfo("BEGIN")

    cluster.setChainStrategy(chainSyncStrategyStr)

    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    Print(
        "producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s"
        % (pnodes, topo, delay, chainSyncStrategyStr))

    Print("Stand up cluster")
    if cluster.launch(pnodes=pnodes,
Exemple #59
0
        if debug: Print("Setting nodeos process timeout.")
        outs,errs = proc.communicate(timeout=myTimeout)
        if debug: Print("Nodeos process has exited.")
        output["stdout"] = outs.decode("utf-8")
        output["stderr"] = errs.decode("utf-8")
        output["returncode"] = proc.returncode
    except (subprocess.TimeoutExpired) as _:
        Print("ERROR: Nodeos is running beyond the defined wait time. Hard killing nodeos instance.")
        proc.send_signal(signal.SIGKILL)
        return (False, None)

    if debug: Print("Returning success.")
    return (True, output)

random.seed(seed) # Use a fixed seed for repeatability.
cluster=Cluster(walletd=True)

try:
    TestHelper.printSystemInfo("BEGIN")

    cluster.setChainStrategy(chainSyncStrategyStr)

    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    Print ("producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s" % (
        pnodes, topo, delay, chainSyncStrategyStr))

    Print("Stand up cluster")
    if cluster.launch(pnodes, total_nodes, topo=topo, delay=delay, dontBootstrap=True) is False:
        errorExit("Failed to stand up eos cluster.")
Exemple #60
0
# Keenan Albee and Antonio Teran

from Cluster import Cluster
import math
import numpy as np
import pandas as pd
from cities import City, get_cities_table, parse_cities

# Get the full city information as a table.
# data = get_cities_table("1000-largest-us-cities.csv")
# cities = parse_cities("1000-largest-us-cities.csv") # get an array with city objects.

# for c in cities:
#     print(c)

# Run a single city simulation
dt = 0.1  # days
sim_iters = int(math.floor(180 / dt))

# boston = Cluster(sim_iters, 10000, 1, 0)
boston = Cluster(sim_iters, 4500000, 10000, 0)

for i in range(sim_iters):
    boston.increment_model(dt)

boston.plot_cluster()