def test02(self):
     (distance,idx1,idx2) = closest_pair_strip([Cluster(set([]), 0.32, 0.16, 1, 0), Cluster(set([]), 0.39, 0.4, 1, 0), Cluster(set([]), 0.54, 0.8, 1, 0), Cluster(set([]), 0.61, 0.8, 1, 0), Cluster(set([]), 0.76, 0.94, 1, 0)], 0.46500000000000002, 0.070000000000000007)
     
     self.assertAlmostEqual(distance, float('inf'))
     self.assertEqual(idx1, -1)
     self.assertEqual(idx2, -1)
 def __get_cluster_nodes_num(self):
     return Cluster().get_nodes_num()
示例#3
0
def createClusters(centroids):
    clusters = []
    for centroid in centroids:
        clusters.append(Cluster(centroid=centroid))
    return clusters
debug = args.v
total_nodes = pnodes
killCount = args.kill_count if args.kill_count > 0 else 1
killSignal = args.kill_sig
killEnuInstances = not args.leave_running
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
killAll = args.clean_run
p2pPlugin = args.p2p_plugin

seed = 1
Utils.Debug = debug
testSuccessful = False

random.seed(seed)  # Use a fixed seed for repeatability.
cluster = Cluster(mykeosdd=True)
walletMgr = WalletMgr(True)

try:
    cluster.setChainStrategy(chainSyncStrategyStr)
    cluster.setWalletMgr(walletMgr)

    cluster.killall(allInstances=killAll)
    cluster.cleanup()
    walletMgr.killall(allInstances=killAll)
    walletMgr.cleanup()

    Print(
        "producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d, chain sync strategy: %s"
        % (pnodes, topo, delay, chainSyncStrategyStr))
 def __get_cluster_name(self):
     return Cluster().get_name()
示例#6
0
def create_system(options, full_system, system, dma_devices, ruby_system):

    if not buildEnv['GPGPU_SIM']:
        m5.util.panic(
            "This script requires GPGPU-Sim integration to be built.")

    # Run the protocol script to setup CPU cluster, directory and DMA
    (all_sequencers, dir_cntrls, dma_cntrls, cpu_cluster) = \
                                        VI_hammer.create_system(options,
                                                                full_system,
                                                                system,
                                                                dma_devices,
                                                                ruby_system)

    cpu_cntrl_count = len(cpu_cluster) + len(dir_cntrls)

    #
    # Build GPU cluster
    #
    gpu_cluster = Cluster(intBW=32, extBW=32)
    gpu_cluster.disableConnectToParent()

    l2_bits = int(math.log(options.num_l2caches, 2))
    block_size_bits = int(math.log(options.cacheline_size, 2))
    # This represents the L1 to L2 interconnect latency
    # NOTES! 1) This latency is in Ruby (cache) cycles, not SM cycles
    #        2) Since the cluster interconnect doesn't model multihop latencies,
    #           model these latencies with the controller latency variables. If
    #           the interconnect model is changed, latencies will need to be
    #           adjusted for reasonable total memory access delay.
    per_hop_interconnect_latency = 45  # ~15 GPU cycles
    num_dance_hall_hops = int(math.log(options.num_sc, 2))
    if num_dance_hall_hops == 0:
        num_dance_hall_hops = 1
    l1_to_l2_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops

    #
    # Caches for GPU cores
    #
    for i in xrange(options.num_sc):
        #
        # First create the Ruby objects associated with the GPU cores
        #
        cache = L1Cache(size=options.sc_l1_size,
                        assoc=options.sc_l1_assoc,
                        replacement_policy=LRUReplacementPolicy(),
                        start_index_bit=block_size_bits,
                        dataArrayBanks=4,
                        tagArrayBanks=4,
                        dataAccessLatency=4,
                        tagAccessLatency=4,
                        resourceStalls=False)

        l1_cntrl = GPUL1Cache_Controller(
            version=i,
            cache=cache,
            l2_select_num_bits=l2_bits,
            num_l2=options.num_l2caches,
            transitions_per_cycle=options.ports,
            issue_latency=l1_to_l2_noc_latency,
            number_of_TBEs=options.gpu_l1_buf_depth,
            ruby_system=ruby_system)

        gpu_seq = RubySequencer(
            version=options.num_cpus + i,
            icache=cache,
            dcache=cache,
            max_outstanding_requests=options.gpu_l1_buf_depth,
            ruby_system=ruby_system,
            deadlock_threshold=2000000,
            connect_to_io=False)

        l1_cntrl.sequencer = gpu_seq

        exec("ruby_system.l1_cntrl_sp%02d = l1_cntrl" % i)

        #
        # Add controllers and sequencers to the appropriate lists
        #
        all_sequencers.append(gpu_seq)
        gpu_cluster.add(l1_cntrl)

        # Connect the controller to the network
        l1_cntrl.requestFromL1Cache = MessageBuffer(ordered=True)
        l1_cntrl.requestFromL1Cache.master = ruby_system.network.slave
        l1_cntrl.responseToL1Cache = MessageBuffer(ordered=True)
        l1_cntrl.responseToL1Cache.slave = ruby_system.network.master

        l1_cntrl.mandatoryQueue = MessageBuffer()

    l2_index_start = block_size_bits + l2_bits
    # Use L2 cache and interconnect latencies to calculate protocol latencies
    # NOTES! 1) These latencies are in Ruby (cache) cycles, not SM cycles
    #        2) Since the cluster interconnect doesn't model multihop latencies,
    #           model these latencies with the controller latency variables. If
    #           the interconnect model is changed, latencies will need to be
    #           adjusted for reasonable total memory access delay.
    l2_cache_access_latency = 30  # ~10 GPU cycles
    l2_to_l1_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops
    l2_to_mem_noc_latency = 125  # ~40 GPU cycles

    l2_clusters = []
    for i in xrange(options.num_l2caches):
        #
        # First create the Ruby objects associated with this cpu
        #
        l2_cache = L2Cache(size=options.sc_l2_size,
                           assoc=options.sc_l2_assoc,
                           start_index_bit=l2_index_start,
                           replacement_policy=LRUReplacementPolicy(),
                           dataArrayBanks=4,
                           tagArrayBanks=4,
                           dataAccessLatency=4,
                           tagAccessLatency=4,
                           resourceStalls=options.gpu_l2_resource_stalls)

        l2_cntrl = GPUL2Cache_Controller(
            version=i,
            L2cache=l2_cache,
            transitions_per_cycle=options.ports,
            l2_response_latency=l2_cache_access_latency + l2_to_l1_noc_latency,
            l2_request_latency=l2_to_mem_noc_latency,
            cache_response_latency=l2_cache_access_latency,
            ruby_system=ruby_system)

        exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
        l2_cluster = Cluster(intBW=32, extBW=32)
        l2_cluster.add(l2_cntrl)
        gpu_cluster.add(l2_cluster)
        l2_clusters.append(l2_cluster)

        # Connect the controller to the network
        l2_cntrl.responseToL1Cache = MessageBuffer(ordered=True)
        l2_cntrl.responseToL1Cache.master = ruby_system.network.slave
        l2_cntrl.requestFromCache = MessageBuffer()
        l2_cntrl.requestFromCache.master = ruby_system.network.slave
        l2_cntrl.responseFromCache = MessageBuffer()
        l2_cntrl.responseFromCache.master = ruby_system.network.slave
        l2_cntrl.unblockFromCache = MessageBuffer()
        l2_cntrl.unblockFromCache.master = ruby_system.network.slave

        l2_cntrl.requestFromL1Cache = MessageBuffer(ordered=True)
        l2_cntrl.requestFromL1Cache.slave = ruby_system.network.master
        l2_cntrl.forwardToCache = MessageBuffer()
        l2_cntrl.forwardToCache.slave = ruby_system.network.master
        l2_cntrl.responseToCache = MessageBuffer()
        l2_cntrl.responseToCache.slave = ruby_system.network.master

        l2_cntrl.triggerQueue = MessageBuffer()

    ############################################################################
    # Pagewalk cache
    # NOTE: We use a CPU L1 cache controller here. This is to facilatate MMU
    #       cache coherence (as the GPU L1 caches are incoherent without flushes
    #       The L2 cache is small, and should have minimal affect on the
    #       performance (see Section 6.2 of Power et al. HPCA 2014).
    pwd_cache = L1Cache(
        size=options.pwc_size,
        assoc=16,  # 64 is fully associative @ 8kB
        replacement_policy=LRUReplacementPolicy(),
        start_index_bit=block_size_bits,
        resourceStalls=False)
    # Small cache since CPU L1 requires I and D
    pwi_cache = L1Cache(size="512B",
                        assoc=2,
                        replacement_policy=LRUReplacementPolicy(),
                        start_index_bit=block_size_bits,
                        resourceStalls=False)

    # Small cache since CPU L1 controller requires L2
    l2_cache = L2Cache(size="512B",
                       assoc=2,
                       start_index_bit=block_size_bits,
                       resourceStalls=False)

    l1_cntrl = L1Cache_Controller(version=options.num_cpus,
                                  L1Icache=pwi_cache,
                                  L1Dcache=pwd_cache,
                                  L2cache=l2_cache,
                                  send_evictions=False,
                                  transitions_per_cycle=options.ports,
                                  issue_latency=l1_to_l2_noc_latency,
                                  cache_response_latency=1,
                                  l2_cache_hit_latency=1,
                                  number_of_TBEs=options.gpu_l1_buf_depth,
                                  ruby_system=ruby_system)

    cpu_seq = RubySequencer(
        version=options.num_cpus + options.num_sc,
        icache=pwd_cache,  # Never get data from pwi_cache
        dcache=pwd_cache,
        dcache_hit_latency=8,
        icache_hit_latency=8,
        max_outstanding_requests=options.gpu_l1_buf_depth,
        ruby_system=ruby_system,
        deadlock_threshold=2000000,
        connect_to_io=False)

    l1_cntrl.sequencer = cpu_seq

    ruby_system.l1_pw_cntrl = l1_cntrl
    all_sequencers.append(cpu_seq)

    gpu_cluster.add(l1_cntrl)

    # Connect the L1 controller and the network
    # Connect the buffers from the controller to network
    l1_cntrl.requestFromCache = MessageBuffer()
    l1_cntrl.requestFromCache.master = ruby_system.network.slave
    l1_cntrl.responseFromCache = MessageBuffer()
    l1_cntrl.responseFromCache.master = ruby_system.network.slave
    l1_cntrl.unblockFromCache = MessageBuffer()
    l1_cntrl.unblockFromCache.master = ruby_system.network.slave

    # Connect the buffers from the network to the controller
    l1_cntrl.forwardToCache = MessageBuffer()
    l1_cntrl.forwardToCache.slave = ruby_system.network.master
    l1_cntrl.responseToCache = MessageBuffer()
    l1_cntrl.responseToCache.slave = ruby_system.network.master

    l1_cntrl.mandatoryQueue = MessageBuffer()
    l1_cntrl.triggerQueue = MessageBuffer()

    #
    # Create controller for the copy engine to connect to in GPU cluster
    # Cache is unused by controller
    #
    cache = L1Cache(size="4096B", assoc=2)

    # Setting options.ce_buffering = 0 indicates that the CE can use infinite
    # buffering, but we need to specify a finite number of outstandng accesses
    # that the CE is allowed to issue. Just set it to some large number greater
    # than normal memory access latencies to ensure that the sequencer could
    # service one access per cycle.
    max_out_reqs = options.ce_buffering
    if max_out_reqs == 0:
        max_out_reqs = 1024

    gpu_ce_seq = RubySequencer(version=options.num_cpus + options.num_sc + 1,
                               icache=cache,
                               dcache=cache,
                               max_outstanding_requests=max_out_reqs,
                               support_inst_reqs=False,
                               ruby_system=ruby_system,
                               connect_to_io=False)

    gpu_ce_cntrl = GPUCopyDMA_Controller(version=0,
                                         sequencer=gpu_ce_seq,
                                         transitions_per_cycle=options.ports,
                                         number_of_TBEs=max_out_reqs,
                                         ruby_system=ruby_system)

    gpu_ce_cntrl.responseFromDir = MessageBuffer(ordered=True)
    gpu_ce_cntrl.responseFromDir.slave = ruby_system.network.master
    gpu_ce_cntrl.reqToDirectory = MessageBuffer(ordered=True)
    gpu_ce_cntrl.reqToDirectory.master = ruby_system.network.slave

    gpu_ce_cntrl.mandatoryQueue = MessageBuffer()

    ruby_system.ce_cntrl = gpu_ce_cntrl

    all_sequencers.append(gpu_ce_seq)

    # To limit the copy engine's bandwidth, we add it to a limited bandwidth
    # cluster. Approximate settings are as follows (assuming 2GHz Ruby clock):
    #   PCIe v1.x x16 effective bandwidth ~= 4GB/s: intBW = 3, extBW = 3
    #   PCIe v2.x x16 effective bandwidth ~= 8GB/s: intBW = 5, extBW = 5
    #   PCIe v3.x x16 effective bandwidth ~= 16GB/s: intBW = 10, extBW = 10
    #   PCIe v4.x x16 effective bandwidth ~= 32GB/s: intBW = 21, extBW = 21
    # NOTE: Bandwidth may bottleneck at other parts of the memory hierarchy,
    # so bandwidth considerations should be made in other parts of the memory
    # hierarchy also.
    gpu_ce_cluster = Cluster(intBW=10, extBW=10)
    gpu_ce_cluster.add(gpu_ce_cntrl)

    complete_cluster = Cluster(intBW=32, extBW=32)
    complete_cluster.add(gpu_ce_cluster)
    complete_cluster.add(cpu_cluster)
    complete_cluster.add(gpu_cluster)

    for cntrl in dir_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in dma_cntrls:
        complete_cluster.add(cntrl)

    for cluster in l2_clusters:
        complete_cluster.add(cluster)

    return (all_sequencers, dir_cntrls, complete_cluster)
示例#7
0
def miqu(U, V, candU, candV, _type, g, type_of_vertices, di=0):
    global c
    global check
    c += 1
    if config['DebugOption']['expansion'].lower() == "true":
        print(_type, U, V, "Cand_sets = ", candU, candV, "-*-")

    if len(U) >= msu and len(V) >= msv:
        # Pruning candidates when we have reached the minimum size constraint
        try:
            # check for connectedness of U+V. Since a QBC should be connected
            # G = nx.Graph(g.to_dict_of_lists(di))
            H = g.subgraph(U + V)
            if not nx.is_connected(H):
                raise Exception(
                    "Vertices U and V are not connected, so they cannot be part of an interesting QBC"
                )

            candU, candV, fail_flag = PruneTechniques.prune_vertices(
                U, V, candU, candV, g, [g_min, l_min], config)

            if fail_flag:
                # something went wrong when pruning. e.g. a node is disconnected from G
                raise Exception("The current node in SET won't form a cluster")

            check += 1
            # print("\tLooking for cluster in: ", U, V)

            #  First check
            u_min_edges = round(
                len(V) * l_min, 0
            )  # all u in U must have these min number of edges to be a QBC
            v_min_edges = round(
                len(U) * g_min,
                0)  # likewise v in V min number of edges to be a QBC

            for u in U:
                u_edges = 0
                for v in V:
                    if g.number_of_edges(u, v) >= 1:
                        u_edges += 1
                    if u_edges >= u_min_edges:  # reached the ideal number of edges for the vertex u
                        break  # optimization, no need to check further if curr. belongs to a QBC
                if u_edges < u_min_edges:
                    raise Exception("One vertex from U (", u,
                                    ") w/o enough edges to form a QBC with", v,
                                    " ---- edges", u_min_edges)
            # at this point,
            # u and v are in in G or CC
            for v in V:
                v_edges = 0
                for u in U:
                    if g.number_of_edges(u, v) >= 1:
                        v_edges += 1
                    if v_edges >= v_min_edges:  # reached the ideal number of edges for vertex v
                        break
                if v_edges < v_min_edges:  # if the min # of edges v is less than v_edge, v cannot be part of a QBC
                    raise Exception("One vertex from V (", v,
                                    ") w/o enough edges to form a QBC with ",
                                    u)
            # if u_edges >= gamma_min_edges and v_edges >= lambda_min_edges:
            # at this point there is no way U, V are not a cluster
            if config['DebugOption']['cluster'].lower() == "true":
                print("\tCluster found! ")
            # clusterList.append([U, V])
            clusterList.append(Cluster(U, V))
            # at this point we are sure that u,v are in the graph
        except Exception as er:
            if config['DebugOption']['exception'].lower() == "true":
                print("\t Exp: ", er, "!!!!")
            pass
        finally:
            pass

    if len(U) >= len(A) and len(V) >= len(B):
        print("No more U or V expansion: Max. QB in G")
        return
    # U-expansion
    if len(V) >= msv:
        i = 0
        while i < len(candU):
            if U[-1] >= max(candU):  # or U[-1] >= candU[0]:
                break
            copyOfCandU = list(candU)
            copyOfCandU.pop(i)
            copyOfU = list(U)
            copyOfU.append(candU[i])
            i += 1
            if copyOfU[-1] < copyOfU[-2]:
                continue
            miqu(copyOfU, V, copyOfCandU, [], "U", g, g_reader.vertex_type_dic)
    # V-expansion
    if len(U) >= msu:
        i = 0
        while i < len(candV):
            if V[-1] >= max(candV):  # or V[-1] >= candV[0]:
                break
            copyOfCandV = list(candV)
            copyOfCandV.pop(i)
            copyOfV = list(V)
            copyOfV.append(candV[i])
            i += 1
            if copyOfV[-1] < copyOfV[-2]:
                continue
            miqu(U, copyOfV, [], copyOfCandV, "V", g, g_reader.vertex_type_dic)
    i = 0
    while i < len(candU):
        j = 0
        while j < len(candV):
            copyOfU = list(U)
            copyOfV = list(V)
            copyOfU.append(candU[i])
            copyOfV.append(candV[j])
            if len(U) > 0:
                if copyOfU[-1] < copyOfU[-2]:
                    # copyOfU = copyOfU[:-1]
                    j += 1
                    continue
            if len(V) > 0:
                if copyOfV[-1] < copyOfV[-2]:
                    # copyOfV = copyOfV[:-1]
                    j += 1
                    continue
            copyOfCandU = candU[:]
            copyOfCandU.pop(i)

            copyOfCandV = candV[:]
            copyOfCandV.pop(j)

            if len(U) > 0 and len(V) > 0:
                if V[-1] >= max(candV) or U[-1] >= max(candU):
                    print("Not expanding:  ", U, V, candU, candV)
                    break
            # print "----> ", candU, candV
            miqu(copyOfU, copyOfV, copyOfCandU, copyOfCandV, "U-V", g,
                 g_reader.vertex_type_dic)
            j += 1
        i -= 1
        candU.pop(0)
        i += 1
示例#8
0
    def create(self,
               clustername="",
               domainname="",
               ipaddresses=[],
               superadminpassword="",
               superadminpasswords=[]):
        # TODO
        # j.console.echo("*** Ignore master for now, it's not yet implemented yet, just pick any node of the cluster.. ***")
        # j.console.echo("*** After specifying the information for a cluster, the information gets written to disk but is not used by the program, instead it tries to guess the information by probing the network (mostly wrong), in case of problems just restart the shell, your cluster will be there. .. ***")
        """
        domainname needs to be unique
        clustername is only a name which makes it easy for you to remember and used to store in config file
        """
        if superadminpasswords == []:
            superadminpasswords = [superadminpassword]
        if clustername != "":
            # fill in cluster configuration file with information already known
            ipaddresses2 = string.join(
                [str(ipaddr).strip() for ipaddr in ipaddresses], ",")

            if clustername not in j.remote.cluster.config.list():
                j.remote.cluster.config.add(
                    clustername, {
                        'domain': domainname,
                        'ip': ipaddresses2,
                        'rootpasswd': superadminpassword
                    })
            else:
                j.remote.cluster.config.configure(
                    clustername, {
                        'domain': domainname,
                        'ip': ipaddresses2,
                        'rootpasswd': superadminpassword
                    })
        if j.application.shellconfig.interactive:
            if domainname == "" or ipaddresses == [] or superadminpassword == "":
                if clustername == "":
                    #import pdb
                    # pdb.set_trace()
                    # How do I get the IP adresses?
                    # Get the ip adresses and put them in ipaddresses
                    # so the constructor of Cluster does not use avahi, because its results or wrong!
                    clustername = j.gui.dialog.askString(
                        'Name for the cluster', 'myCluster')
                    j.remote.cluster.config.add(itemname=clustername)
                else:
                    j.remote.cluster.config.review(clustername)
                self.__init__()
                return self.clusters[clustername]

        else:
            if ipaddresses == []:
                raise RuntimeError(
                    "Please specify ipaddresses of nodes you would like to add to cluster"
                )
            if superadminpasswords == []:
                raise RuntimeError(
                    "Please specify password(s) of nodes you would like to add to cluster"
                )
            if domainname == "":
                raise RuntimeError("Please specify domainname for cluster")
            if clustername == "":
                raise RuntimeError("Please specify short name for cluster")

        # at this point we know ipaddresses & possible superadminpasswords
        cl = Cluster(clustername=clustername,
                     domainname=domainname,
                     ipaddresses=ipaddresses,
                     superadminpasswords=superadminpasswords,
                     superadminpassword=superadminpassword)
        self.clusters[clustername] = cl
        return cl
        if len(cluster) == 0:
            continue
        color_val = scalar_map.to_rgba(i)
        for line in cluster:
            plt.arrow(line.a[0],
                      line.a[1],
                      line.vector[0],
                      line.vector[1],
                      color=color_val)


"""
Setup! Processes the first n lines of the csv to begin the clustering.
"""
partitioner = Partition(LIKELIHOOD_THRES, MIN_VELOCITY)
clusterer = Cluster(EPSILON, MIN_LINES)
partitioner.pre_process(FILE_NAME, 0, 4000)
partitions = partition(partitioner)
clusters = clusterer.segment_cluster(partitions[0])
for cluster in clusterer.segment_cluster(partitions[1]):
    clusters.append(cluster)
fig = plt.figure()
cmap = plt.cm.jet
# plt.ion()
img = mpimg.imread("ref.png")
plt.imshow(img)
plot_clusters(clusters)
# plt.draw()
"""
Real time!
"""
示例#10
0
class TraceApiPluginTest(unittest.TestCase):
    sleep_s = 1
    cluster=Cluster(walletd=True, defproduceraPrvtKey=None)
    walletMgr=WalletMgr(True)
    accounts = []
    cluster.setWalletMgr(walletMgr)

    # kill nodeos and keosd and clean up dir
    def cleanEnv(self, shouldCleanup: bool) :
        self.cluster.killall(allInstances=True)
        if shouldCleanup:
            self.cluster.cleanup()
        self.walletMgr.killall(allInstances=True)
        if shouldCleanup:
            self.walletMgr.cleanup()

    # start keosd and nodeos
    def startEnv(self) :
        account_names = ["alice", "bob", "charlie"]
        traceNodeosArgs = " --plugin eosio::trace_api_plugin --trace-no-abis --trace-dir=."
        self.cluster.launch(totalNodes=1, extraNodeosArgs=traceNodeosArgs)
        self.walletMgr.launch()
        testWalletName="testwallet"
        testWallet=self.walletMgr.create(testWalletName, [self.cluster.eosioAccount, self.cluster.defproduceraAccount])
        self.cluster.validateAccounts(None)
        self.accounts=Cluster.createAccountKeys(len(account_names))
        node = self.cluster.getNode(0)
        for idx in range(len(account_names)):
            self.accounts[idx].name =  account_names[idx]
            self.walletMgr.importKey(self.accounts[idx], testWallet)
        for account in self.accounts:
            node.createInitializeAccount(account, self.cluster.eosioAccount, buyRAM=1000000, stakedDeposit=5000000, waitForTransBlock=True, exitOnError=True)
        time.sleep(self.sleep_s)

    def get_block(self, params: str, node: Node) -> json:
        base_cmd_str = ("curl http://%s:%s/v1/") % (TestHelper.LOCAL_HOST, node.port)
        cmd_str = base_cmd_str + "trace_api/get_block  -X POST -d " + ("'{\"block_num\":%s}'") % params
        return Utils.runCmdReturnJson(cmd_str)

    def test_TraceApi(self) :
        node = self.cluster.getNode(0)
        for account in self.accounts:
            self.assertIsNotNone(node.verifyAccount(account))

        expectedAmount = Node.currencyIntToStr(5000000, CORE_SYMBOL)
        account_balances = []
        for account in self.accounts:
            amount = node.getAccountEosBalanceStr(account.name)
            self.assertEqual(amount, expectedAmount)
            account_balances.append(amount)

        xferAmount = Node.currencyIntToStr(123456, CORE_SYMBOL)
        trans = node.transferFunds(self.accounts[0], self.accounts[1], xferAmount, "test transfer a->b")
        transId = Node.getTransId(trans)
        blockNum = Node.getTransBlockNum(trans)

        self.assertEqual(node.getAccountEosBalanceStr(self.accounts[0].name), Utils.deduceAmount(expectedAmount, xferAmount))
        self.assertEqual(node.getAccountEosBalanceStr(self.accounts[1].name), Utils.addAmount(expectedAmount, xferAmount))
        time.sleep(self.sleep_s)

        # verify trans via node api before calling trace_api RPC
        blockFromNode = node.getBlock(blockNum)
        self.assertIn("transactions", blockFromNode)
        isTrxInBlockFromNode = False
        for trx in blockFromNode["transactions"]:
            self.assertIn("trx", trx)
            self.assertIn("id", trx["trx"])
            if (trx["trx"]["id"] == transId) :
                isTrxInBlockFromNode = True
                break
        self.assertTrue(isTrxInBlockFromNode)

        # verify trans via trace_api by calling get_block RPC
        blockFromTraceApi = self.get_block(blockNum, node)
        self.assertIn("transactions", blockFromTraceApi)
        isTrxInBlockFromTraceApi = False
        for trx in blockFromTraceApi["transactions"]:
            self.assertIn("id", trx)
            if (trx["id"] == transId) :
                isTrxInBlockFromTraceApi = True
                break
        self.assertTrue(isTrxInBlockFromTraceApi)

    @classmethod
    def setUpClass(self):
        self.cleanEnv(self, shouldCleanup=True)
        self.startEnv(self)

    @classmethod
    def tearDownClass(self):
        self.cleanEnv(self, shouldCleanup=False)   # not cleanup to save log in case for further investigation
示例#11
0
def create_system(options, system, piobus, dma_ports, ruby_system):

    if 'VI_hammer' not in buildEnv['PROTOCOL']:
        panic("This script requires the VI_hammer protocol to be built.")

    cpu_sequencers = []

    topology = Cluster(intBW = 32, extBW = 32)

    #
    # Must create the individual controllers before the network to ensure the
    # controller constructors are called before the network constructor
    #
    l2_bits_float = math.log(options.num_l2caches, 2)
    l2_bits = int(l2_bits_float)
    if l2_bits_float > l2_bits:
        l2_bits += 1
    block_size_bits = int(math.log(options.cacheline_size, 2))

    cntrl_count = 0

    for i in xrange(options.num_cpus):
        #
        # First create the Ruby objects associated with this cpu
        #
        l1i_cache = L1Cache(size = options.l1i_size,
                            assoc = options.l1i_assoc,
                            start_index_bit = block_size_bits,
                            is_icache = True)
        l1d_cache = L1Cache(size = options.l1d_size,
                            assoc = options.l1d_assoc,
                            start_index_bit = block_size_bits)
        l2_cache = L2Cache(size = options.l2_size,
                           assoc = options.l2_assoc,
                           start_index_bit = block_size_bits)

        l1_cntrl = L1Cache_Controller(version = i,
                                      cntrl_id = cntrl_count,
                                      L1Icache = l1i_cache,
                                      L1Dcache = l1d_cache,
                                      L2cache = l2_cache,
                                      no_mig_atomic = not \
                                        options.allow_atomic_migration,
                                      send_evictions = (
                                         options.cpu_type == "detailed"),
                                      ruby_system = ruby_system)

        cpu_seq = RubySequencer(version = i,
                                icache = l1i_cache,
                                dcache = l1d_cache,
                                access_phys_mem = True,
                                ruby_system = ruby_system)

        l1_cntrl.sequencer = cpu_seq

        if piobus != None:
            cpu_seq.pio_port = piobus.slave

        if options.recycle_latency:
            l1_cntrl.recycle_latency = options.recycle_latency

        exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(cpu_seq)
        topology.add(l1_cntrl)

        cntrl_count += 1

    cpu_mem_range = AddrRange(options.total_mem_size)
    mem_module_size = cpu_mem_range.size() / options.num_dirs

    #
    # determine size and index bits for probe filter
    # By default, the probe filter size is configured to be twice the
    # size of the L2 cache.
    #
    pf_size = MemorySize(options.l2_size)
    pf_size.value = pf_size.value * 2
    dir_bits = int(math.log(options.num_dirs, 2))
    pf_bits = int(math.log(pf_size.value, 2))
    if options.numa_high_bit:
        if options.pf_on or options.dir_on:
            # if numa high bit explicitly set, make sure it does not overlap
            # with the probe filter index
            assert(options.numa_high_bit - dir_bits > pf_bits)

        # set the probe filter start bit to just above the block offset
        pf_start_bit = block_size_bits
    else:
        if dir_bits > 0:
            pf_start_bit = dir_bits + block_size_bits - 1
        else:
            pf_start_bit = block_size_bits

    dir_cntrl_nodes = []
    for i in xrange(options.num_dirs):
        #
        # Create the Ruby objects associated with the directory controller
        #

        mem_cntrl = RubyMemoryControl(version = i, ruby_system = ruby_system)

        dir_size = MemorySize('0B')
        dir_size.value = mem_module_size

        pf = ProbeFilter(size = pf_size, assoc = 4,
                         start_index_bit = pf_start_bit)

        dir_cntrl = Directory_Controller(version = i,
                                         cntrl_id = cntrl_count,
                                         directory = \
                                         RubyDirectoryMemory( \
                                                    version = i,
                                                    size = dir_size,
                                                    use_map = options.use_map,
                                                    map_levels = \
                                                    options.map_levels,
                                                    numa_high_bit = \
                                                      options.numa_high_bit),
                                         probeFilter = pf,
                                         memBuffer = mem_cntrl,
                                         probe_filter_enabled = options.pf_on,
                                         full_bit_dir_enabled = options.dir_on,
                                         ruby_system = ruby_system)

        if options.recycle_latency:
            dir_cntrl.recycle_latency = options.recycle_latency

        exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
        dir_cntrl_nodes.append(dir_cntrl)

        cntrl_count += 1

    dma_cntrl_nodes = []
    for i, dma_port in enumerate(dma_ports):
        #
        # Create the Ruby objects associated with the dma controller
        #
        dma_seq = DMASequencer(version = i,
                               ruby_system = ruby_system)

        dma_cntrl = DMA_Controller(version = i,
                                   cntrl_id = cntrl_count,
                                   dma_sequencer = dma_seq,
                                   ruby_system = ruby_system)

        exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
        exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
        dma_cntrl_nodes.append(dma_cntrl)

        if options.recycle_latency:
            dma_cntrl.recycle_latency = options.recycle_latency

        cntrl_count += 1

    return (cpu_sequencers, dir_cntrl_nodes, dma_cntrl_nodes, topology)
示例#12
0
    exit(-1)

# normalize the data #
dataset = StandardScaler().fit_transform(X=dataset)

# plotting the original data
(fig, ax1) = original_plotting(dataset, dataset.shape[1])

# creating collection of all the points #
points = []
for row in dataset:
    points.append(Point(row))

# running the DBSCAN algorithm #
t1 = time.time()
cluster = [Cluster('#000000')]
visited = 0
while visited < len(points):
    visited = DBSCAN(points, eps, minPnt, cluster, color_gen, visited)

t2 = time.time()
print("runing time: {} sec".format((t2 - t1)))

# printing the results:
total_len = len(points)
print("\nnumber of cluster counts: {}".format(len(cluster) - 1))
for i in range(1, len(cluster)):
    print("cluster number {}: {}% of the points".format(
        i, round(((len(cluster[i].points)) / total_len) * 100), ndigit=2))
print("noise cluster have {}% of the points".format(round(
    ((len(cluster[0].points)) / total_len) * 100),
 def test10(self):
     (distance,idx1,idx2) = fast_closest_pair([Cluster(set(['06081']), 52.6171444847, 262.707477827, 707161, 5.6e-05), Cluster(set(['06075']), 52.7404001225, 254.517429395, 776733, 8.4e-05), Cluster(set(['06001']), 61.782098866, 259.312457296, 1443741, 7e-05), Cluster(set(['06085']), 63.1509653633, 270.516712105, 1682585, 6.3e-05), Cluster(set(['06021']), 65.2043358182, 213.245337355, 26453, 6.9e-05), Cluster(set(['06113']), 68.2602083189, 236.862609218, 168660, 5.9e-05), Cluster(set(['06101']), 74.2003718491, 229.646592975, 78930, 5.6e-05), Cluster(set(['06067']), 74.3547338322, 245.49501455, 1223499, 6.1e-05), Cluster(set(['06083']), 76.0382837186, 340.420376302, 399347, 6.4e-05), Cluster(set(['06089']), 77.359494209, 188.945068958, 163256, 5.7e-05), Cluster(set(['41067']), 92.2254623376, 76.2593957841, 445342, 7.3e-05), Cluster(set(['06111']), 93.4973310868, 344.590570899, 753197, 5.8e-05), Cluster(set(['06019']), 95.6093812211, 290.162708843, 799407, 6.4e-05), Cluster(set(['06039']), 97.2145136451, 278.975077449, 123109, 6e-05), Cluster(set(['41051']), 103.293707198, 79.5194104381, 660486, 9.3e-05), Cluster(set(['41005']), 103.421444616, 88.318590492, 338391, 6.6e-05), Cluster(set(['06029']), 103.787886113, 326.006585349, 661645, 9.7e-05), Cluster(set(['53011']), 104.00046468, 74.0182527309, 345238, 6.4e-05), Cluster(set(['06037']), 105.369854549, 359.050126004, 9519338, 0.00011), Cluster(set(['06107']), 108.085024898, 306.351832438, 368021, 5.8e-05), Cluster(set(['06059']), 113.997715586, 368.503452566, 2846289, 9.8e-05), Cluster(set(['53033']), 125.27486023, 39.1497730391, 1737034, 5.8e-05), Cluster(set(['06073']), 129.2075529, 387.064888184, 2813833, 6.6e-05), Cluster(set(['06065']), 146.410389633, 374.21707964, 1545387, 6.1e-05), Cluster(set(['06071']), 148.402461892, 350.061039619, 1709434, 7.7e-05), Cluster(set(['06025']), 156.397958859, 393.161127277, 142361, 5.6e-05), Cluster(set(['04013']), 214.128077618, 396.893960776, 3072149, 6.8e-05), Cluster(set(['08031']), 371.038986573, 266.847932979, 554636, 7.9e-05), Cluster(set(['08001']), 379.950978294, 265.078784954, 363857, 6.6e-05), Cluster(set(['08005']), 380.281283151, 270.268826873, 487967, 5.9e-05), Cluster(set(['31109']), 516.78216337, 250.188023316, 250291, 6.1e-05), Cluster(set(['31055']), 525.799353573, 238.14275337, 463585, 6.2e-05), Cluster(set(['48201']), 540.54731652, 504.62993865, 3400578, 6e-05), Cluster(set(['48245']), 565.746895809, 504.541799993, 252051, 5.7e-05), Cluster(set(['27053']), 570.131597541, 151.403325043, 1116200, 5.8e-05), Cluster(set(['22017']), 570.826412839, 442.202574191, 252161, 6.2e-05), Cluster(set(['27123']), 576.516685202, 151.219277482, 511035, 5.6e-05), Cluster(set(['19163']), 621.490118929, 227.666851619, 158668, 5.6e-05), Cluster(set(['29189']), 629.170659449, 297.571839563, 1016315, 6e-05), Cluster(set(['28027']), 631.700027283, 400.68741948, 30622, 6e-05), Cluster(set(['29510']), 632.327321169, 297.184524592, 348189, 6.9e-05), Cluster(set(['28049']), 638.051593606, 445.785870317, 250800, 6e-05), Cluster(set(['22071']), 651.338581076, 496.465402252, 484674, 6.4e-05), Cluster(set(['28159']), 663.514261498, 425.274137823, 20160, 5.9e-05), Cluster(set(['55079']), 664.855000617, 192.484141264, 940164, 7.4e-05), Cluster(set(['17031']), 668.978975824, 219.400257219, 5376741, 6.1e-05), Cluster(set(['47037']), 700.009323976, 350.107265446, 569891, 6.1e-05), Cluster(set(['01073']), 704.191210749, 411.014665198, 662047, 7.3e-05), Cluster(set(['01117']), 709.193528999, 417.394467797, 143293, 5.6e-05), Cluster(set(['21111']), 715.347723878, 301.167740487, 693604, 5.9e-05), Cluster(set(['01101']), 720.281573781, 440.436162917, 223510, 5.7e-05), Cluster(set(['01015']), 723.907941153, 403.837487318, 112249, 5.6e-05), Cluster(set(['47065']), 732.643747577, 370.017730905, 307896, 6.1e-05), Cluster(set(['13313']), 737.308367745, 378.040993858, 83525, 5.6e-05), Cluster(set(['01113']), 740.385154867, 436.939588695, 49756, 5.6e-05), Cluster(set(['26125']), 743.036942153, 192.129690868, 1194156, 5.7e-05), Cluster(set(['13215']), 745.265661102, 430.987078939, 186291, 5.9e-05), Cluster(set(['26163']), 746.37046732, 200.570021537, 2061162, 6.4e-05), Cluster(set(['13067']), 747.238620236, 397.293799252, 607751, 6.4e-05), Cluster(set(['13121']), 750.160287596, 399.907752014, 816006, 7e-05), Cluster(set(['13063']), 752.853876848, 406.722877803, 236517, 6.6e-05), Cluster(set(['47093']), 753.012743594, 348.235180569, 382032, 5.6e-05), Cluster(set(['13089']), 754.465443436, 400.059456026, 665865, 6.8e-05), Cluster(set(['13151']), 756.589546538, 407.288873768, 119341, 5.6e-05), Cluster(set(['13135']), 758.038826857, 395.110327675, 588448, 6.3e-05), Cluster(set(['13247']), 758.37864157, 402.49780372, 70111, 5.6e-05), Cluster(set(['12073']), 762.463896365, 477.365342219, 239452, 6.1e-05), Cluster(set(['21019']), 768.726553092, 290.270551648, 49752, 5.8e-05), Cluster(set(['39035']), 776.351457758, 216.558042612, 1393978, 5.8e-05), Cluster(set(['51520']), 784.05333332, 328.847863787, 17367, 5.6e-05), Cluster(set(['13245']), 796.799727342, 404.391349655, 199775, 5.9e-05), Cluster(set(['54009']), 799.221537984, 240.153315109, 25447, 7.7e-05), Cluster(set(['42003']), 809.003419092, 233.899638663, 1281666, 6.1e-05), Cluster(set(['37119']), 813.724315147, 356.853362811, 695454, 5.6e-05), Cluster(set(['51775']), 820.111751617, 307.695502162, 24747, 5.8e-05), Cluster(set(['51770']), 821.912162221, 307.548990323, 94911, 6.5e-05), Cluster(set(['51680']), 835.264653899, 302.326633095, 65269, 5.8e-05), Cluster(set(['51820']), 837.346467474, 285.851438947, 19520, 5.8e-05), Cluster(set(['51840']), 845.843602685, 258.214178983, 23585, 7.1e-05), Cluster(set(['51059']), 863.064397845, 262.414412378, 969749, 5.7e-05), Cluster(set(['24031']), 863.180208628, 255.65657011, 873341, 6.5e-05), Cluster(set(['51610']), 864.078108667, 261.655667801, 10377, 6.9e-05), Cluster(set(['51760']), 865.424050159, 293.735963553, 197790, 8.6e-05), Cluster(set(['51013']), 865.681962839, 261.222875114, 189453, 7.7e-05), Cluster(set(['51087']), 866.389610525, 292.780704494, 262300, 6.3e-05), Cluster(set(['51510']), 866.572477724, 262.734686855, 128283, 6.8e-05), Cluster(set(['24027']), 867.127763298, 252.141340019, 247842, 6e-05), Cluster(set(['11001']), 867.470401202, 260.460974222, 572059, 7.7e-05), Cluster(set(['51570']), 868.048530719, 299.360459202, 16897, 5.6e-05), Cluster(set(['24033']), 870.786325575, 261.829970016, 801515, 6.4e-05), Cluster(set(['24005']), 871.921241442, 246.932531615, 754292, 6.1e-05), Cluster(set(['24510']), 872.946822486, 249.834427518, 651154, 7.4e-05), Cluster(set(['42101']), 894.72914873, 227.900547575, 1517550, 5.8e-05), Cluster(set(['34007']), 899.061431482, 232.054232622, 508932, 5.7e-05), Cluster(set(['34031']), 904.161746346, 201.712206531, 489049, 6.3e-05), Cluster(set(['34023']), 904.976453741, 215.001458637, 750162, 5.9e-05), Cluster(set(['34039']), 905.587082153, 210.045085725, 522541, 7.3e-05), Cluster(set(['34013']), 906.236730753, 206.977429459, 793633, 7.1e-05), Cluster(set(['34003']), 907.896066895, 202.302470427, 884118, 6.9e-05), Cluster(set(['36085']), 908.749199508, 211.307161341, 443728, 7e-05), Cluster(set(['34017']), 909.08042421, 207.462937763, 608975, 9.1e-05), Cluster(set(['36061']), 911.072622034, 205.783086757, 1537195, 0.00015), Cluster(set(['36047']), 911.595580089, 208.928374072, 2465326, 9.8e-05), Cluster(set(['36119']), 912.141547823, 196.592589736, 923459, 6.5e-05), Cluster(set(['36005']), 912.315497328, 203.674106811, 1332650, 0.00011), Cluster(set(['36081']), 913.462051588, 207.615750359, 2229379, 8.9e-05), Cluster(set(['36059']), 917.384980291, 205.43647538, 1334544, 7.6e-05), Cluster(set(['09003']), 925.917212741, 177.152290276, 857183, 5.7e-05), Cluster(set(['36103']), 929.241649488, 199.278463003, 1419369, 6.3e-05), Cluster(set(['25017']), 943.405755498, 156.504310828, 1465396, 5.6e-05), Cluster(set(['25025']), 950.299079197, 158.007070966, 689807, 7e-05)])
     
     self.assertAlmostEqual(distance,1.2662160020181641)
     self.assertEqual(idx1, 79)
     self.assertEqual(idx2, 81)
 def test01(self):
     (distance,idx1,idx2) = fast_closest_pair([Cluster(set([]), 0, 0, 1, 0), Cluster(set([]), 1, 0, 1, 0), Cluster(set([]), 2, 0, 1, 0), Cluster(set([]), 3, 0, 1, 0)])
     
     self.assertAlmostEqual(distance, 1.0)
示例#15
0
from core_symbol import CORE_SYMBOL

args = TestHelper.parse_args({
    "--defproducera_prvt_key", "--dump-error-details", "--dont-launch",
    "--keep-logs", "-v", "--leave-running", "--clean-run"
})
debug = args.v
defproduceraPrvtKey = args.defproducera_prvt_key
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontLaunch = args.dont_launch
dontKill = args.leave_running
killAll = args.clean_run

Utils.Debug = debug
cluster = Cluster(walletd=True, defproduceraPrvtKey=defproduceraPrvtKey)
walletMgr = WalletMgr(True)
testSuccessful = False
killscsInstances = not dontKill
killWallet = not dontKill

WalletdName = Utils.scsWalletName
ClientName = "clscs"
timeout = .5 * 12 * 2 + 60  # time for finalization with 1 producer + 60 seconds padding
Utils.setIrreversibleTimeout(timeout)

try:
    TestHelper.printSystemInfo("BEGIN")

    cluster.setWalletMgr(walletMgr)
示例#16
0
class Ui_MainWindow(QtWidgets.QMainWindow):
    # raw=0
    raw = pd.DataFrame(index=[],
                       columns=[])  # raw is initialized as a blank dataframe

    Language = ''

    app = QtWidgets.QApplication(sys.argv)
    myStyle = MyProxyStyle(
        'Fusion')  # The proxy style should be based on an existing style,
    # like 'Windows', 'Motif', 'Plastique', 'Fusion', ...
    app.setStyle(myStyle)

    trans = QtCore.QTranslator()

    talk = ''

    targetversion = '0'

    def __init__(self):

        super(Ui_MainWindow, self).__init__()
        self.setObjectName('MainWindow')
        self.resize(800, 600)

        _translate = QtCore.QCoreApplication.translate
        self.setWindowTitle(_translate('MainWindow', u'GeoPython'))
        self.setWindowIcon(QIcon(LocationOfMySelf + '/geopython.png'))
        self.talk = _translate(
            'MainWindow',
            'You are using GeoPython ') + version + '\n' + _translate(
                'MainWindow', 'released on ') + date

        self.model = PandasModel(self.raw)

        self.main_widget = QWidget(self)

        self.centralwidget = QtWidgets.QWidget()
        self.centralwidget.setObjectName('centralwidget')
        self.setCentralWidget(self.centralwidget)

        self.tableView = CustomQTableView(self.centralwidget)

        self.tableView.setObjectName('tableView')
        self.tableView.setSortingEnabled(True)

        self.pushButtonOpen = QtWidgets.QPushButton(self.centralwidget)
        self.pushButtonOpen.setObjectName('pushButtonOpen')

        self.pushButtonSave = QtWidgets.QPushButton(self.centralwidget)
        self.pushButtonSave.setObjectName('pushButtonSave')

        self.pushButtonSort = QtWidgets.QPushButton(self.centralwidget)
        self.pushButtonSort.setObjectName('pushButtonSort')

        self.pushButtonQuit = QtWidgets.QPushButton(self.centralwidget)
        self.pushButtonQuit.setObjectName('pushButtonQuit')

        self.pushButtonUpdate = QtWidgets.QPushButton(self.centralwidget)
        self.pushButtonUpdate.setObjectName('pushButtonUpdate')

        w = self.width()
        h = self.height()

        if h < 360:
            h = 360
            self.resize(w, h)

        if w < 640:
            w = 640
            self.resize(w, h)

        step = (w * 94 / 100) / 5
        foot = h * 3 / 48

        #if foot<=10: foot=10

        self.tableView.setGeometry(
            QtCore.QRect(w / 100, h / 48, w * 98 / 100, h * 38 / 48))

        self.pushButtonOpen.setGeometry(
            QtCore.QRect(w / 100, h * 40 / 48, step, foot))

        self.pushButtonSave.setGeometry(
            QtCore.QRect(2 * w / 100 + step, h * 40 / 48, step, foot))

        self.pushButtonSort.setGeometry(
            QtCore.QRect(3 * w / 100 + step * 2, h * 40 / 48, step, foot))

        self.pushButtonQuit.setGeometry(
            QtCore.QRect(4 * w / 100 + step * 3, h * 40 / 48, step, foot))

        self.pushButtonUpdate.setGeometry(
            QtCore.QRect(5 * w / 100 + step * 4, h * 40 / 48, step, foot))

        self.menubar = QtWidgets.QMenuBar(self)
        self.menubar.setGeometry(QtCore.QRect(0, 0, 1000, 22))
        self.menubar.setNativeMenuBar(True)
        self.menubar.setObjectName('menubar')

        self.menuFile = QtWidgets.QMenu(self.menubar)
        self.menuFile.setObjectName('menuFile')

        self.menuGeoChem = QtWidgets.QMenu(self.menubar)
        self.menuGeoChem.setObjectName('menuGeoChem')

        self.menuStructure = QtWidgets.QMenu(self.menubar)
        self.menuStructure.setObjectName('menuStructure')

        self.menuCalc = QtWidgets.QMenu(self.menubar)
        self.menuCalc.setObjectName('menuCalc')

        self.menuStat = QtWidgets.QMenu(self.menubar)
        self.menuStat.setObjectName('menuStat')

        self.menuMore = QtWidgets.QMenu(self.menubar)
        self.menuMore.setObjectName('menuMore')

        self.menuHelp = QtWidgets.QMenu(self.menubar)
        self.menuHelp.setObjectName('menuHelp')

        self.menuLanguage = QtWidgets.QMenu(self.menubar)
        self.menuLanguage.setObjectName('menuLanguage')

        self.setMenuBar(self.menubar)
        self.statusbar = QtWidgets.QStatusBar(self)
        self.statusbar.setObjectName('statusbar')
        self.setStatusBar(self.statusbar)

        self.actionOpen = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/open.png'), u'Open', self)
        self.actionOpen.setObjectName('actionOpen')
        self.actionOpen.setShortcut('Ctrl+O')

        self.actionSave = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/save.png'), u'Save', self)
        self.actionSave.setObjectName('actionSave')
        self.actionSave.setShortcut('Ctrl+S')

        self.actionCnWeb = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/forum.png'), u'Chinese Forum', self)
        self.actionCnWeb.setObjectName('actionCnWeb')

        self.actionEnWeb = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/forum.png'), u'English Forum', self)
        self.actionEnWeb.setObjectName('actionEnWeb')

        self.actionGoGithub = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/github.png'), u'GitHub', self)
        self.actionGoGithub.setObjectName('actionGoGithub')

        self.actionVersionCheck = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/update.png'), u'Version', self)
        self.actionVersionCheck.setObjectName('actionVersionCheck')

        self.actionCnS = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/cns.png'), u'Simplified Chinese', self)
        self.actionCnS.setObjectName('actionCnS')

        self.actionCnT = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/cnt.png'), u'Traditional Chinese', self)
        self.actionCnT.setObjectName('actionCnT')

        self.actionEn = QtWidgets.QAction(QIcon(LocationOfMySelf + '/en.png'),
                                          u'English', self)
        self.actionEn.setObjectName('actionEn')

        self.actionLoadLanguage = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/lang.png'), u'Load Language', self)
        self.actionLoadLanguage.setObjectName('actionLoadLanguage')

        self.actionTAS = QtWidgets.QAction(QIcon(LocationOfMySelf + '/xy.png'),
                                           u'TAS', self)
        self.actionTAS.setObjectName('actionTAS')

        self.actionTrace = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/spider2.png'), u'Trace', self)
        self.actionTrace.setObjectName('actionTrace')

        self.actionRee = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/spider2.png'), u'Ree', self)
        self.actionRee.setObjectName('actionRee')

        self.actionPearce = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/spider.png'), u'Pearce', self)
        self.actionPearce.setObjectName('actionPearce')

        self.actionHarker = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/spider.png'), u'Harker', self)
        self.actionHarker.setObjectName('actionHarker')

        self.actionStereo = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/structure.png'), u'Stereo', self)
        self.actionStereo.setObjectName('actionStereo')

        self.actionRose = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/rose.png'), u'Rose', self)
        self.actionRose.setObjectName('actionRose')

        self.actionQFL = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/triangular.png'), u'QFL', self)
        self.actionQFL.setObjectName('actionQFL')

        self.actionQmFLt = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/triangular.png'), u'QmFLt', self)
        self.actionQmFLt.setObjectName('actionQmFLt')

        self.actionCIPW = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/calc.png'), u'CIPW', self)
        self.actionCIPW.setObjectName('actionCIPW')

        self.actionZirconCe = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/calc.png'), u'ZirconCe', self)
        self.actionZirconCe.setObjectName('actionZirconCe')

        self.actionZirconTiTemp = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/temperature.png'), u'ZirconTiTemp',
            self)
        self.actionZirconTiTemp.setObjectName('actionZirconTiTemp')

        self.actionRutileZrTemp = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/temperature.png'), u'RutileZrTemp',
            self)
        self.actionRutileZrTemp.setObjectName('actionRutileZrTemp')

        self.actionCluster = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/cluster.png'), u'Cluster', self)
        self.actionCluster.setObjectName('actionCluster')

        self.actionMultiDimention = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/multiple.png'), u'MultiDimention', self)
        self.actionMultiDimention.setObjectName('actionMultiDimention')

        self.actionQAPF = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/qapf.png'), u'QAPF', self)
        self.actionQAPF.setObjectName('actionQAPF')

        self.actionMudStone = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/triangular.png'), u'MudStone', self)
        self.actionMudStone.setObjectName('actionMudStone')

        self.actionXY = QtWidgets.QAction(QIcon(LocationOfMySelf + '/xy.png'),
                                          u'X-Y', self)
        self.actionXY.setObjectName('actionXY')

        self.actionXYZ = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/triangular.png'), u'Triangular', self)
        self.actionXYZ.setObjectName('actionXYZ')

        self.actionMagic = QtWidgets.QAction(
            QIcon(LocationOfMySelf + '/magic.png'), u'Magic', self)
        self.actionMagic.setObjectName('actionMagic')

        self.menuFile.addAction(self.actionOpen)
        self.menuFile.addAction(self.actionSave)

        self.menuGeoChem.addAction(self.actionTAS)
        self.menuGeoChem.addAction(self.actionTrace)
        self.menuGeoChem.addAction(self.actionRee)
        self.menuGeoChem.addAction(self.actionPearce)
        self.menuGeoChem.addAction(self.actionHarker)

        self.menuStructure.addAction(self.actionStereo)
        self.menuStructure.addAction(self.actionRose)
        self.menuStructure.addAction(self.actionQFL)
        self.menuStructure.addAction(self.actionQmFLt)

        self.menuCalc.addAction(self.actionCIPW)
        self.menuCalc.addAction(self.actionZirconCe)
        self.menuCalc.addAction(self.actionZirconTiTemp)
        self.menuCalc.addAction(self.actionRutileZrTemp)

        self.menuStat.addAction(self.actionCluster)
        self.menuStat.addAction(self.actionMultiDimention)

        self.menuMore.addAction(self.actionMudStone)
        self.menuMore.addAction(self.actionQAPF)

        self.menuMore.addAction(self.actionXY)
        self.menuMore.addAction(self.actionXYZ)
        self.menuMore.addAction(self.actionMagic)

        self.menuHelp.addAction(self.actionCnWeb)
        self.menuHelp.addAction(self.actionEnWeb)

        self.menuHelp.addAction(self.actionGoGithub)
        self.menuHelp.addAction(self.actionVersionCheck)

        self.menuLanguage.addAction(self.actionCnS)
        self.menuLanguage.addAction(self.actionCnT)
        self.menuLanguage.addAction(self.actionEn)
        self.menuLanguage.addAction(self.actionLoadLanguage)

        self.menubar.addAction(self.menuFile.menuAction())
        self.menubar.addSeparator()

        self.menubar.addAction(self.menuGeoChem.menuAction())
        self.menubar.addSeparator()

        self.menubar.addAction(self.menuStructure.menuAction())
        self.menubar.addSeparator()

        self.menubar.addAction(self.menuCalc.menuAction())
        self.menubar.addSeparator()

        self.menubar.addAction(self.menuStat.menuAction())
        self.menubar.addSeparator()

        self.menubar.addAction(self.menuMore.menuAction())
        self.menubar.addSeparator()

        self.menubar.addAction(self.menuHelp.menuAction())
        self.menubar.addSeparator()

        self.menubar.addAction(self.menuLanguage.menuAction())
        self.menubar.addSeparator()

        self.actionTAS.triggered.connect(self.TAS)
        self.actionTrace.triggered.connect(self.Trace)
        self.actionRee.triggered.connect(self.REE)
        self.actionPearce.triggered.connect(self.Pearce)
        self.actionHarker.triggered.connect(self.Harker)
        self.actionQAPF.triggered.connect(self.QAPF)

        self.actionStereo.triggered.connect(self.Stereo)
        self.actionRose.triggered.connect(self.Rose)
        self.actionQFL.triggered.connect(self.QFL)
        self.actionQmFLt.triggered.connect(self.QmFLt)

        self.actionCIPW.triggered.connect(self.CIPW)
        self.actionZirconCe.triggered.connect(self.ZirconCe)
        self.actionZirconTiTemp.triggered.connect(self.ZirconTiTemp)
        self.actionRutileZrTemp.triggered.connect(self.RutileZrTemp)
        self.actionCluster.triggered.connect(self.Cluster)
        self.actionMultiDimention.triggered.connect(self.MultiDimension)

        self.actionOpen.triggered.connect(self.getDataFile)
        self.actionSave.triggered.connect(self.saveDataFile)

        self.actionCnWeb.triggered.connect(self.goCnBBS)
        self.actionEnWeb.triggered.connect(self.goEnBBS)
        self.actionGoGithub.triggered.connect(self.goGitHub)
        self.actionVersionCheck.triggered.connect(self.checkVersion)

        self.actionCnS.triggered.connect(self.to_ChineseS)
        self.actionCnT.triggered.connect(self.to_ChineseT)
        self.actionEn.triggered.connect(self.to_English)
        self.actionLoadLanguage.triggered.connect(self.to_LoadLanguage)

        self.actionXY.triggered.connect(self.XY)
        self.actionXYZ.triggered.connect(self.XYZ)
        self.actionMagic.triggered.connect(self.Magic)
        self.actionMudStone.triggered.connect(self.Mud)

        self.pushButtonOpen.clicked.connect(self.getDataFile)
        self.pushButtonSave.clicked.connect(self.saveDataFile)
        self.pushButtonSort.clicked.connect(self.SetUpDataFile)
        self.pushButtonQuit.clicked.connect(qApp.quit)
        self.pushButtonUpdate.clicked.connect(self.checkVersion)

        self.actionQuit = QtWidgets.QAction('Quit', self)
        self.actionQuit.setShortcut('Ctrl+Q')
        self.actionQuit.setObjectName('actionQuit')
        self.actionQuit.triggered.connect(qApp.quit)

        self.pushButtonOpen.setText(_translate('MainWindow', u'Open Data'))
        self.pushButtonSave.setText(_translate('MainWindow', u'Save Data'))
        self.pushButtonSort.setText(_translate('MainWindow', u'Set Format'))
        self.pushButtonQuit.setText(_translate('MainWindow', u'Quit App'))
        self.pushButtonUpdate.setText(_translate('MainWindow',
                                                 u'Check Update'))

        self.pushButtonOpen.setIcon(QtGui.QIcon(LocationOfMySelf +
                                                '/open.png'))
        self.pushButtonSave.setIcon(QtGui.QIcon(LocationOfMySelf +
                                                '/save.png'))
        self.pushButtonSort.setIcon(QtGui.QIcon(LocationOfMySelf + '/set.png'))
        self.pushButtonQuit.setIcon(QtGui.QIcon(LocationOfMySelf +
                                                '/quit.png'))
        self.pushButtonUpdate.setIcon(
            QtGui.QIcon(LocationOfMySelf + '/update.png'))

        self.menuFile.setTitle(_translate('MainWindow', u'Data File'))

        self.menuGeoChem.setTitle(_translate('MainWindow', u'Geochemistry'))

        self.menuStructure.setTitle(_translate('MainWindow', u'Structure'))

        self.menuCalc.setTitle(_translate('MainWindow', u'Calculation'))

        self.menuStat.setTitle(_translate('MainWindow', u'Statistics'))

        self.menuMore.setTitle(_translate('MainWindow', u'Others'))

        self.menuHelp.setTitle(_translate('MainWindow', u'Help'))

        self.menuLanguage.setTitle(_translate('MainWindow', u'Language'))

        self.actionOpen.setText(_translate('MainWindow', u'Open Data'))
        self.actionSave.setText(_translate('MainWindow', u'Save Data'))

        self.actionTAS.setText(_translate('MainWindow', u'TAS'))
        self.actionTrace.setText(_translate('MainWindow', u'Trace'))
        self.actionRee.setText(_translate('MainWindow', u'REE'))
        self.actionPearce.setText(_translate('MainWindow', u'Pearce'))
        self.actionHarker.setText(_translate('MainWindow', u'Harker'))

        self.actionQAPF.setText(_translate('MainWindow', u'QAPF'))

        self.actionStereo.setText(_translate('MainWindow', u'Stereo'))
        self.actionRose.setText(_translate('MainWindow', u'Rose'))
        self.actionQFL.setText(_translate('MainWindow', u'QFL'))
        self.actionQmFLt.setText(_translate('MainWindow', u'QmFLt'))

        self.actionCIPW.setText(_translate('MainWindow', u'CIPW'))

        self.actionZirconCe.setText(_translate('MainWindow', u'ZirconCe'))
        self.actionZirconTiTemp.setText(
            _translate('MainWindow', u'ZirconTiTemp'))
        self.actionRutileZrTemp.setText(
            _translate('MainWindow', u'RutileZrTemp'))
        self.actionCluster.setText(_translate('MainWindow', u'Cluster'))
        self.actionMultiDimention.setText(
            _translate('MainWindow', u'MultiDimention'))

        self.actionXY.setText(_translate('MainWindow', u'X-Y plot'))
        self.actionXYZ.setText(_translate('MainWindow', u'X-Y-Z plot'))

        self.actionMagic.setText(_translate('MainWindow', u'Magic'))

        self.actionMudStone.setText(_translate('MainWindow', u'Sand-Silt-Mud'))

        self.actionVersionCheck.setText(_translate('MainWindow', u'Version'))
        self.actionCnWeb.setText(_translate('MainWindow', u'Chinese Forum'))
        self.actionEnWeb.setText(_translate('MainWindow', u'English Forum'))
        self.actionGoGithub.setText(_translate('MainWindow', u'Github'))

        self.actionCnS.setText(_translate('MainWindow', u'Simplified Chinese'))
        self.actionCnT.setText(_translate('MainWindow',
                                          u'Traditional Chinese'))
        self.actionEn.setText(_translate('MainWindow', u'English'))
        self.actionLoadLanguage.setText(
            _translate('MainWindow', u'Load Language'))

        self.ReadConfig()

        self.trans.load(LocationOfMySelf + '/' + self.Language)
        self.app.installTranslator(self.trans)
        self.retranslateUi()

    def retranslateUi(self):

        _translate = QtCore.QCoreApplication.translate

        self.talk = _translate(
            'MainWindow',
            'You are using GeoPython ') + version + '\n' + _translate(
                'MainWindow', 'released on ') + date + '\n'

        self.pushButtonOpen.setText(_translate('MainWindow', u'Open Data'))
        self.pushButtonSave.setText(_translate('MainWindow', u'Save Data'))
        self.pushButtonSort.setText(_translate('MainWindow', u'Set Format'))
        self.pushButtonQuit.setText(_translate('MainWindow', u'Quit App'))
        self.pushButtonUpdate.setText(_translate('MainWindow',
                                                 u'Check Update'))

        self.menuFile.setTitle(_translate('MainWindow', u'Data File'))

        self.menuGeoChem.setTitle(_translate('MainWindow', u'Geochemistry'))

        self.menuStructure.setTitle(_translate('MainWindow', u'Structure'))

        self.menuCalc.setTitle(_translate('MainWindow', u'Calculation'))

        self.menuStat.setTitle(_translate('MainWindow', u'Statistics'))

        self.menuMore.setTitle(_translate('MainWindow', u'Others'))

        self.menuHelp.setTitle(_translate('MainWindow', u'Help'))
        self.menuLanguage.setTitle(_translate('MainWindow', u'Language'))

        self.actionOpen.setText(_translate('MainWindow', u'Open Data'))
        self.actionSave.setText(_translate('MainWindow', u'Save Data'))

        self.actionTAS.setText(_translate('MainWindow', u'TAS'))
        self.actionTrace.setText(_translate('MainWindow', u'Trace'))
        self.actionRee.setText(_translate('MainWindow', u'REE'))
        self.actionPearce.setText(_translate('MainWindow', u'Pearce'))
        self.actionHarker.setText(_translate('MainWindow', u'Harker'))

        self.actionQAPF.setText(_translate('MainWindow', u'QAPF'))

        self.actionStereo.setText(_translate('MainWindow', u'Stereo'))
        self.actionRose.setText(_translate('MainWindow', u'Rose'))
        self.actionQFL.setText(_translate('MainWindow', u'QFL'))
        self.actionQmFLt.setText(_translate('MainWindow', u'QmFLt'))

        self.actionCIPW.setText(_translate('MainWindow', u'CIPW'))

        self.actionZirconCe.setText(_translate('MainWindow', u'ZirconCe'))
        self.actionZirconTiTemp.setText(
            _translate('MainWindow', u'ZirconTiTemp'))
        self.actionRutileZrTemp.setText(
            _translate('MainWindow', u'RutileZrTemp'))
        self.actionCluster.setText(_translate('MainWindow', u'Cluster'))
        self.actionMultiDimention.setText(
            _translate('MainWindow', u'MultiDimention'))

        self.actionXY.setText(_translate('MainWindow', u'X-Y plot'))
        self.actionXYZ.setText(_translate('MainWindow', u'X-Y-Z plot'))

        self.actionMagic.setText(_translate('MainWindow', u'Magic'))

        self.actionMudStone.setText(_translate('MainWindow', u'Sand-Silt-Mud'))

        self.actionVersionCheck.setText(
            _translate('MainWindow', u'Check Update'))
        self.actionCnWeb.setText(_translate('MainWindow', u'Chinese Forum'))
        self.actionEnWeb.setText(_translate('MainWindow', u'English Forum'))
        self.actionGoGithub.setText(_translate('MainWindow', u'Github'))

        self.actionCnS.setText(_translate('MainWindow', u'Simplified Chinese'))
        self.actionCnT.setText(_translate('MainWindow',
                                          u'Traditional Chinese'))
        self.actionEn.setText(_translate('MainWindow', u'English'))
        self.actionLoadLanguage.setText(
            _translate('MainWindow', u'Load Language'))

    def resizeEvent(self, evt=None):

        w = self.width()
        h = self.height()
        '''
        if h<=360:
            h=360
            self.resize(w,h)
        if w<=640:
            w = 640
            self.resize(w, h)
        '''

        step = (w * 94 / 100) / 5
        foot = h * 3 / 48

        #if foot<=10: foot=10

        self.tableView.setGeometry(
            QtCore.QRect(w / 100, h / 48, w * 98 / 100, h * 38 / 48))

        self.pushButtonOpen.setGeometry(
            QtCore.QRect(w / 100, h * 40 / 48, step, foot))

        self.pushButtonSave.setGeometry(
            QtCore.QRect(2 * w / 100 + step, h * 40 / 48, step, foot))

        self.pushButtonSort.setGeometry(
            QtCore.QRect(3 * w / 100 + step * 2, h * 40 / 48, step, foot))

        self.pushButtonUpdate.setGeometry(
            QtCore.QRect(4 * w / 100 + step * 3, h * 40 / 48, step, foot))

        self.pushButtonQuit.setGeometry(
            QtCore.QRect(5 * w / 100 + step * 4, h * 40 / 48, step, foot))

    def getfile(self):
        _translate = QtCore.QCoreApplication.translate
        fileName, filetype = QFileDialog.getOpenFileName(
            self, _translate('MainWindow', u'Choose Data File'), '~/',
            'All Files (*);;Text Files (*.txt)')  # 设置文件扩展名过滤,注意用双分号间隔

    def goGitHub(self):
        webbrowser.open('https://github.com/chinageology/GeoPython/wiki')

    def goCnBBS(self):
        webbrowser.open('http://bbs.geopython.com/-f2.html')

    def goEnBBS(self):
        webbrowser.open('http://bbs.geopython.com/English-Forum-f3.html')

    def checkVersion(self):

        #reply = QMessageBox.information(self, 'Version', self.talk)

        _translate = QtCore.QCoreApplication.translate

        url = 'https://raw.githubusercontent.com/chinageology/GeoPython/master/SourceCode/CustomClass.py'

        r = 0
        try:
            r = requests.get(url, allow_redirects=True)
            r.raise_for_status()
            NewVersion = 'self.target' + r.text.splitlines()[0]

        except requests.exceptions.ConnectionError as err:
            print(err)
            r = 0
            buttonReply = QMessageBox.information(
                self, _translate('MainWindow', u'NetWork Error'),
                _translate('MainWindow', u'Net work unavailable.'))
            NewVersion = "targetversion = '0'"

        except requests.exceptions.HTTPError as err:
            print(err)
            r = 0
            buttonReply = QMessageBox.information(
                self, _translate('MainWindow', u'NetWork Error'),
                _translate('MainWindow', u'Net work unavailable.'))
            NewVersion = "targetversion = '0'"

        exec(NewVersion)
        print('web is', self.targetversion)
        print(NewVersion)

        self.talk = _translate(
            'MainWindow',
            'Version Online is ') + self.targetversion + '\n' + _translate(
                'MainWindow',
                'You are using GeoPython ') + version + '\n' + _translate(
                    'MainWindow', 'released on ') + date + '\n'

        if r != 0:

            print('now is', version)
            if (version < self.targetversion):

                buttonReply = QMessageBox.question(
                    self, _translate('MainWindow', u'Version'),
                    self.talk + _translate(
                        'MainWindow',
                        'New version available.\n Download and update?'),
                    QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
                if buttonReply == QMessageBox.Yes:
                    print('Yes clicked.')
                    webbrowser.open(
                        'https://github.com/chinageology/GeoPython/blob/master/Download.md'
                    )
                else:
                    print('No clicked.')
            else:
                buttonReply = QMessageBox.information(
                    self, _translate('MainWindow', u'Version'), self.talk +
                    _translate('MainWindow', 'This is the latest version.'))

    def Update(self):
        webbrowser.open(
            'https://github.com/chinageology/GeoPython/wiki/Download')

    def ReadConfig(self):
        if (os.path.isfile('config.ini')):

            try:
                with open('config.ini', 'rt') as f:
                    try:
                        data = f.read()
                    except:
                        data = 'Language = \'en\''
                        pass

                    print(data)
                    try:
                        print("self." + data)
                        exec("self." + data)
                    except:
                        pass
                    print(self.Language)

            except ():
                pass

    def WriteConfig(self, text=LocationOfMySelf + '/en'):
        try:
            with open('config.ini', 'wt') as f:
                f.write(text)
        except ():
            pass

    def to_ChineseS(self):

        self.trans.load(LocationOfMySelf + '/cns')
        self.app.installTranslator(self.trans)
        self.retranslateUi()

        self.WriteConfig('Language = \'cns\'')

    def to_ChineseT(self):

        self.trans.load(LocationOfMySelf + '/cnt')
        self.app.installTranslator(self.trans)
        self.retranslateUi()

        self.WriteConfig('Language = \'cnt\'')

    def to_English(self):

        self.trans.load(LocationOfMySelf + '/en')
        self.app.installTranslator(self.trans)
        self.retranslateUi()
        self.WriteConfig('Language = \'en\'')

    def to_LoadLanguage(self):

        _translate = QtCore.QCoreApplication.translate
        fileName, filetype = QFileDialog.getOpenFileName(
            self, _translate('MainWindow', u'Choose Language File'), '~/',
            'Language Files (*.qm)')  # 设置文件扩展名过滤,注意用双分号间隔

        print(fileName)

        self.trans.load(fileName)
        self.app.installTranslator(self.trans)
        self.retranslateUi()

    def ErrorEvent(self):

        reply = QMessageBox.information(
            self, _translate('MainWindow', 'Warning'),
            _translate(
                'MainWindow',
                'Your Data mismatch this Plot.\n Some Items missing?\n Or maybe there are blanks in items names?\n Or there are nonnumerical value?'
            ))

    def SetUpDataFile(self):

        flag = 0
        ItemsAvalibale = self.model._df.columns.values.tolist()

        ItemsToTest = [
            'Label', 'Marker', 'Color', 'Size', 'Alpha', 'Style', 'Width'
        ]

        LabelList = []
        MarkerList = []
        ColorList = []
        SizeList = []
        AlphaList = []
        StyleList = []
        WidthList = []

        for i in range(len(self.model._df)):
            LabelList.append('Group1')
            MarkerList.append('o')
            ColorList.append('red')
            SizeList.append(10)
            AlphaList.append(0.6)
            StyleList.append('-')
            WidthList.append(1)

        data = {
            'Label': LabelList,
            'Marker': MarkerList,
            'Color': ColorList,
            'Size': SizeList,
            'Alpha': AlphaList,
            'Style': StyleList,
            'Width': WidthList
        }

        for i in ItemsToTest:
            if i not in ItemsAvalibale:
                # print(i)
                flag = flag + 1
                tmpdftoadd = pd.DataFrame({i: data[i]})

                self.model._df = pd.concat([tmpdftoadd, self.model._df],
                                           axis=1)

        self.model = PandasModel(self.model._df)

        self.tableView.setModel(self.model)

        if flag == 0:
            reply = QMessageBox.information(
                self, _translate('MainWindow', 'Ready'),
                _translate('MainWindow',
                           'Everything fine and no need to set up.'))

        else:
            reply = QMessageBox.information(
                self, _translate('MainWindow', 'Ready'),
                _translate(
                    'MainWindow',
                    'Items added, Modify in the Table to set up details.'))

    def getDataFile(self):
        _translate = QtCore.QCoreApplication.translate
        DataFileInput, filetype = QFileDialog.getOpenFileName(
            self, _translate('MainWindow', u'Choose Data File'), '~/',
            'Excel Files (*.xlsx);;Excel 2003 Files (*.xls);;CSV Files (*.csv)'
        )  # 设置文件扩展名过滤,注意用双分号间隔

        # #print(DataFileInput,filetype)

        if ('csv' in DataFileInput):
            self.raw = pd.read_csv(DataFileInput)
        elif ('xls' in DataFileInput):
            self.raw = pd.read_excel(DataFileInput)
        # #print(self.raw)

        self.model = PandasModel(self.raw)
        self.tableView.setModel(self.model)

    def saveDataFile(self):

        # if self.model._changed == True:
        # print('changed')
        # print(self.model._df)

        DataFileOutput, ok2 = QFileDialog.getSaveFileName(
            self, _translate('MainWindow', u'Save Data File'), 'C:/',
            'Excel Files (*.xlsx);;CSV Files (*.csv)')  # 数据文件保存输出

        if (DataFileOutput != ''):

            if ('csv' in DataFileOutput):
                self.model._df.to_csv(DataFileOutput,
                                      sep=',',
                                      encoding='utf-8')

            elif ('xls' in DataFileOutput):
                self.model._df.to_excel(DataFileOutput, encoding='utf-8')

    def CIPW(self):
        self.cipwpop = CIPW(df=self.model._df)
        try:
            self.cipwpop.CIPW()
            self.cipwpop.show()
        except (KeyError):
            self.ErrorEvent()

    def ZirconTiTemp(self):
        self.ztpop = ZirconTiTemp(df=self.model._df)
        try:
            self.ztpop.ZirconTiTemp()
            self.ztpop.show()
        except (KeyError):
            self.ErrorEvent()

    def RutileZrTemp(self):
        self.rzpop = RutileZrTemp(df=self.model._df)
        try:
            self.rzpop.RutileZrTemp()
            self.rzpop.show()
        except (KeyError):
            self.ErrorEvent()

    def Cluster(self):

        self.clusterpop = Cluster(df=self.model._df)
        self.clusterpop.Cluster()
        self.clusterpop.show()

        try:
            self.clusterpop.Cluster()
            self.clusterpop.show()
        except (KeyError):
            pass
            # self.ErrorEvent()

    def TAS(self):

        self.pop = TAS(df=self.model._df)
        try:
            self.pop.TAS()
            self.pop.show()
        except (KeyError):
            self.ErrorEvent()

    def REE(self):
        self.reepop = REE(df=self.model._df)
        try:
            self.reepop.REE()
            self.reepop.show()
        except (KeyError):
            self.ErrorEvent()

    def Trace(self):
        self.tracepop = Trace(df=self.model._df)
        try:
            self.tracepop.Trace()
            self.tracepop.show()
        except (KeyError):
            self.ErrorEvent()

    def Pearce(self):
        self.pearcepop = Pearce(df=self.model._df)

        try:
            self.pearcepop.Pearce()
            self.pearcepop.show()
        except (KeyError):
            self.ErrorEvent()

    def Harker(self):
        self.harkerpop = Harker(df=self.model._df)
        try:
            self.harkerpop.Harker()
            self.harkerpop.show()
        except (KeyError):
            self.ErrorEvent()

    def Stereo(self):
        self.stereopop = Stereo(df=self.model._df)
        try:
            self.stereopop.Stereo()
            self.stereopop.show()
        except (KeyError):
            self.ErrorEvent()

    def Rose(self):
        self.rosepop = Rose(df=self.model._df)
        try:
            self.rosepop.Rose()
            self.rosepop.show()
        except (KeyError):
            self.ErrorEvent()

    def QFL(self):
        self.qflpop = QFL(df=self.model._df)
        try:
            self.qflpop.Tri()
            self.qflpop.show()
        except (KeyError):
            self.ErrorEvent()

    def QmFLt(self):
        self.qmfltpop = QmFLt(df=self.model._df)
        try:
            self.qmfltpop.Tri()
            self.qmfltpop.show()
        except (KeyError):
            self.ErrorEvent()

    def QAPF(self):
        self.qapfpop = QAPF(df=self.model._df)
        try:
            self.qapfpop.QAPF()
            self.qapfpop.show()
        except (KeyError):
            self.ErrorEvent()

    def Mud(self):
        self.mudpop = MudStone(df=self.model._df)
        try:
            self.mudpop.Tri()
            self.mudpop.show()
        except (KeyError):
            self.ErrorEvent()

    def ZirconCe(self):
        # print('Opening a new popup window...')
        self.zirconpop = ZirconCe(df=self.model._df)
        try:
            self.zirconpop.MultiBallard()
            self.zirconpop.show()
        except (KeyError, ValueError):
            self.ErrorEvent()

    def XY(self):
        self.xypop = XY(df=self.model._df)
        try:
            self.xypop.Magic()
            self.xypop.show()
        except (KeyError):
            self.ErrorEvent()

    def XYZ(self):
        self.xyzpop = XYZ(df=self.model._df)
        try:
            self.xyzpop.Magic()
            self.xyzpop.show()
        except (KeyError):
            self.ErrorEvent()

    def Magic(self):
        self.magicpop = Magic(df=self.model._df)
        try:
            self.magicpop.Magic()
            self.magicpop.show()
        except (KeyError):
            self.ErrorEvent()

    def MultiDimension(self):
        self.mdpop = MultiDimension(df=self.model._df)
        try:
            self.mdpop.Magic()
            self.mdpop.show()
        except (KeyError):
            self.ErrorEvent()

    def Tri(self):
        pass

    def Auto(self):
        pass
示例#17
0
enableMongo = args.mongodb
defproduceraPrvtKey = args.defproducera_prvt_key
defproducerbPrvtKey = args.defproducerb_prvt_key
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontLaunch = args.dont_launch
dontKill = args.leave_running
prodCount = args.prod_count
onlyBios = args.only_bios
killAll = args.clean_run
sanityTest = args.sanity_test

Utils.Debug = debug
localTest = True if server == TestHelper.LOCAL_HOST else False
cluster = Cluster(walletd=True,
                  enableMongo=enableMongo,
                  defproduceraPrvtKey=defproduceraPrvtKey,
                  defproducerbPrvtKey=defproducerbPrvtKey)
walletMgr = WalletMgr(True)
testSuccessful = False
killEnuInstances = not dontKill
killWallet = not dontKill
dontBootstrap = sanityTest

WalletdName = "enuwallet"
ClientName = "enucli"
timeout = .5 * 12 * 2 + 60  # time for finalization with 1 producer + 60 seconds padding
Utils.setIrreversibleTimeout(timeout)

try:
    TestHelper.printSystemInfo("BEGIN")
    Print("SERVER: %s" % (server))
示例#18
0
class MyTestCase(unittest.TestCase):
    global DATASET
    DATASET = "../dataSet/DSclustering/DS_3Clusters_999Points.txt"

    global point
    point = Point(np.array([2, 2]))

    global listPoints
    listPoints = [
        Point(np.array([1, 1])),
        Point(np.array([1, 3])),
        Point(np.array([3, 1])),
        Point(np.array([3, 3]))
    ]
    global cluster
    cluster = Cluster(listPoints, len(listPoints))

    # Check point dimension

    def test_dimension_point(self):
        self.assertEqual(point.dimension, 2)
        self.assertNotEquals(point.dimension, 1)

    # Check cluster dimension
    def test_dimension_cluster(self):
        self.assertEquals(cluster.dimension, 2)
        self.assertNotEquals(cluster.dimension, 3)

    # Check mean and  calculation
    def test_mean_std_cluster(self):
        mean = cluster.mean
        std = cluster.std
        self.assertEquals(mean[0], 2)
        self.assertEquals(mean[1], 2)
        self.assertEquals(std[0] - std[1], 0)

    # Check read data set file
    def test_read_file_points(self):
        points = EM.dataset_to_list_points(DATASET)
        self.assertTrue(len(points) > 0)
        self.assertTrue(points[0].dimension == 2)

    # Check probabilityCluster
    def test_get_probability_cluster(self):
        self.assertEquals(
            EM.get_probability_cluster(point, Cluster([point], 1)), 1)

    # Check cluster's method
    def test_cluster(self):
        cluster_test = Cluster([point], 1)
        self.assertEquals(cluster_test.dimension, 2)
        self.assertFalse(cluster_test.converge)
        np.testing.assert_array_equal(cluster_test.mean, np.array([2, 2]))
        np.testing.assert_array_equal(cluster_test.std, np.array([1, 1]))
        self.assertEquals(cluster_test.cluster_probability, 1)
        cluster_test.update_cluster(listPoints, 4)
        self.assertEquals(cluster_test.dimension, 2)
        self.assertTrue(cluster_test.converge)
        np.testing.assert_array_equal(cluster_test.mean, np.array([2, 2]))
        self.assertEquals(cluster_test.std[0] - cluster_test.std[1], 0)
        self.assertEquals(cluster_test.cluster_probability, 1)
示例#19
0
defproducerbPrvtKey = args.defproducerb_prvt_key
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontLaunch = args.dont_launch
dontKill = args.leave_running
prodCount = 2
onlyBios = args.only_bios
killAll = args.clean_run
sanityTest = args.sanity_test
walletPort = args.wallet_port

Utils.Debug = debug
localTest = True
cluster = Cluster(host=server,
                  port=port,
                  walletd=True,
                  defproduceraPrvtKey=defproduceraPrvtKey,
                  defproducerbPrvtKey=defproducerbPrvtKey)
walletMgr = WalletMgr(True, port=walletPort)
testSuccessful = False
killEosInstances = not dontKill
killWallet = not dontKill
dontBootstrap = sanityTest

WalletdName = Utils.EosWalletName
ClientName = "cleos"

try:
    TestHelper.printSystemInfo("BEGIN prod_preactivation_test.py")
    cluster.setWalletMgr(walletMgr)
    Print("SERVER: %s" % (server))
示例#20
0
 def test_get_probability_cluster(self):
     self.assertEquals(
         EM.get_probability_cluster(point, Cluster([point], 1)), 1)
示例#21
0
def create_system(options, system, piobus, dma_devices, ruby_system):

    if not buildEnv['GPGPU_SIM']:
        m5.util.panic("This script requires GPGPU-Sim integration to be built.")

    # Run the protocol script to setup CPU cluster, directory and DMA
    (all_sequencers, dir_cntrls, dma_cntrls, cpu_cluster) = \
                                        VI_hammer.create_system(options,
                                                                system,
                                                                piobus,
                                                                dma_devices,
                                                                ruby_system)

    cpu_cntrl_count = len(cpu_cluster) + len(dir_cntrls)

    #
    # Build GPU cluster
    #
    gpu_cluster = Cluster(intBW = 32, extBW = 32)
    gpu_cluster.disableConnectToParent()

    l2_bits = int(math.log(options.num_l2caches, 2))
    block_size_bits = int(math.log(options.cacheline_size, 2))
    # This represents the L1 to L2 interconnect latency
    # NOTE! This latency is in Ruby (cache) cycles, not SM cycles
    per_hop_interconnect_latency = 45 # ~15 GPU cycles
    num_dance_hall_hops = int(math.log(options.num_sc, 2))
    if num_dance_hall_hops == 0:
        num_dance_hall_hops = 1
    l1_to_l2_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops

    #
    # Caches for GPU cores
    #
    for i in xrange(options.num_sc):
        #
        # First create the Ruby objects associated with the GPU cores
        #
        cache = L1Cache(size = options.sc_l1_size,
                            assoc = options.sc_l1_assoc,
                            replacement_policy = "LRU",
                            start_index_bit = block_size_bits,
                            dataArrayBanks = 4,
                            tagArrayBanks = 4,
                            dataAccessLatency = 4,
                            tagAccessLatency = 4,
                            resourceStalls = False)

        l1_cntrl = GPUL1Cache_Controller(version = i,
                                  cntrl_id = cpu_cntrl_count + len(gpu_cluster),
                                  cache = cache,
                                  l2_select_num_bits = l2_bits,
                                  num_l2 = options.num_l2caches,
                                  issue_latency = l1_to_l2_noc_latency,
                                  number_of_TBEs = options.gpu_l1_buf_depth,
                                  ruby_system = ruby_system)

        gpu_seq = RubySequencer(version = options.num_cpus + i,
                            icache = cache,
                            dcache = cache,
                            access_phys_mem = True,
                            max_outstanding_requests = options.gpu_l1_buf_depth,
                            ruby_system = ruby_system,
                            deadlock_threshold = 2000000)

        l1_cntrl.sequencer = gpu_seq

        if piobus != None:
            gpu_seq.pio_port = piobus.slave

        exec("ruby_system.l1_cntrl_sp%02d = l1_cntrl" % i)

        #
        # Add controllers and sequencers to the appropriate lists
        #
        all_sequencers.append(gpu_seq)
        gpu_cluster.add(l1_cntrl)

    l2_index_start = block_size_bits + l2_bits
    # Use L2 cache and interconnect latencies to calculate protocol latencies
    # NOTE! These latencies are in Ruby (cache) cycles, not SM cycles
    l2_cache_access_latency = 30 # ~10 GPU cycles
    l2_to_l1_noc_latency = per_hop_interconnect_latency * num_dance_hall_hops
    l2_to_mem_noc_latency = 125 # ~40 GPU cycles

    l2_clusters = []
    for i in xrange(options.num_l2caches):
        #
        # First create the Ruby objects associated with this cpu
        #
        l2_cache = L2Cache(size = options.sc_l2_size,
                           assoc = options.sc_l2_assoc,
                           start_index_bit = l2_index_start,
                           replacement_policy = "LRU",
                           dataArrayBanks = 4,
                           tagArrayBanks = 4,
                           dataAccessLatency = 4,
                           tagAccessLatency = 4,
                           resourceStalls = options.gpu_l2_resource_stalls)

        l2_cntrl = GPUL2Cache_Controller(version = i,
                                cntrl_id = cpu_cntrl_count + len(gpu_cluster),
                                L2cache = l2_cache,
                                l2_response_latency = l2_cache_access_latency +
                                                      l2_to_l1_noc_latency,
                                l2_request_latency = l2_to_mem_noc_latency,
                                ruby_system = ruby_system)

        exec("ruby_system.l2_cntrl%d = l2_cntrl" % i)
        l2_cluster = Cluster(intBW = 32, extBW = 32)
        l2_cluster.add(l2_cntrl)
        gpu_cluster.add(l2_cluster)
        l2_clusters.append(l2_cluster)

    ############################################################################
    # Pagewalk cache
    # NOTE: We use a CPU L1 cache controller here. This is to facilatate MMU
    #       cache coherence (as the GPU L1 caches are incoherent without flushes
    #       The L2 cache is small, and should have minimal affect on the 
    #       performance (see Section 6.2 of Power et al. HPCA 2014).
    pwd_cache = L1Cache(size = options.pwc_size,
                            assoc = 16, # 64 is fully associative @ 8kB
                            replacement_policy = "LRU",
                            start_index_bit = block_size_bits,
                            latency = 8,
                            resourceStalls = False)
    # Small cache since CPU L1 requires I and D
    pwi_cache = L1Cache(size = "512B",
                            assoc = 2,
                            replacement_policy = "LRU",
                            start_index_bit = block_size_bits,
                            latency = 8,
                            resourceStalls = False)

    # Small cache since CPU L1 controller requires L2
    l2_cache = L2Cache(size = "512B",
                           assoc = 2,
                           start_index_bit = block_size_bits,
                           latency = 1,
                           resourceStalls = False)

    l1_cntrl = L1Cache_Controller(version = options.num_cpus,
                                  cntrl_id = len(cpu_cluster)+len(gpu_cluster)+
                                             len(dir_cntrls),
                                  L1Icache = pwi_cache,
                                  L1Dcache = pwd_cache,
                                  L2cache = l2_cache,
                                  send_evictions = False,
                                  issue_latency = l1_to_l2_noc_latency,
                                  cache_response_latency = 1,
                                  l2_cache_hit_latency = 1,
                                  number_of_TBEs = options.gpu_l1_buf_depth,
                                  ruby_system = ruby_system)

    cpu_seq = RubySequencer(version = options.num_cpus + options.num_sc,
                            icache = pwd_cache, # Never get data from pwi_cache
                            dcache = pwd_cache,
                            access_phys_mem = True,
                            max_outstanding_requests = options.gpu_l1_buf_depth,
                            ruby_system = ruby_system,
                            deadlock_threshold = 2000000)

    l1_cntrl.sequencer = cpu_seq


    ruby_system.l1_pw_cntrl = l1_cntrl
    all_sequencers.append(cpu_seq)

    gpu_cluster.add(l1_cntrl)


    #
    # Create controller for the copy engine to connect to in GPU cluster
    # Cache is unused by controller
    #
    cache = L1Cache(size = "4096B", assoc = 2)

    gpu_ce_seq = RubySequencer(version = options.num_cpus + options.num_sc+1,
                               icache = cache,
                               dcache = cache,
                               access_phys_mem = True,
                               max_outstanding_requests = 64,
                               support_inst_reqs = False,
                               ruby_system = ruby_system)

    gpu_ce_cntrl = GPUCopyDMA_Controller(version = 0,
                                  cntrl_id = cpu_cntrl_count + len(gpu_cluster),
                                  sequencer = gpu_ce_seq,
                                  number_of_TBEs = 256,
                                  ruby_system = ruby_system)

    ruby_system.l1_cntrl_ce = gpu_ce_cntrl

    all_sequencers.append(gpu_ce_seq)

    complete_cluster = Cluster(intBW = 32, extBW = 32)
    complete_cluster.add(gpu_ce_cntrl)
    complete_cluster.add(cpu_cluster)
    complete_cluster.add(gpu_cluster)

    for cntrl in dir_cntrls:
        complete_cluster.add(cntrl)

    for cntrl in dma_cntrls:
        cntrl.cntrl_id = len(complete_cluster)
        complete_cluster.add(cntrl)

    for cluster in l2_clusters:
        complete_cluster.add(cluster)

    return (all_sequencers, dir_cntrls, complete_cluster)
示例#22
0
debug = args.v
enableMongo = args.mongodb
defproduceraPrvtKey = args.defproducera_prvt_key
defproducerbPrvtKey = args.defproducerb_prvt_key
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontLaunch = args.dont_launch
dontKill = args.leave_running
prodCount = args.prod_count
onlyBios = args.only_bios
killAll = args.clean_run

Utils.Debug = debug
localTest = True if server == TestHelper.LOCAL_HOST else False
cluster = Cluster(mykeosdd=True,
                  enableMongo=enableMongo,
                  defproduceraPrvtKey=defproduceraPrvtKey,
                  defproducerbPrvtKey=defproducerbPrvtKey)
walletMgr = WalletMgr(True)
testSuccessful = False
killEnuInstances = not dontKill
killWallet = not dontKill

WalletdName = "mykeosd"
ClientName = "mycleos"
# Utils.setMongoSyncTime(50)

try:
    Print("BEGIN")
    Print("SERVER: %s" % (server))
    Print("PORT: %d" % (port))
示例#23
0
    return (headBlockNum, libNum)


args = TestHelper.parse_args({
    "--prod-count", "--dump-error-details", "--keep-logs", "-v",
    "--leave-running", "--clean-run", "--p2p-plugin", "--wallet-port",
    "--mongodb"
})
Utils.Debug = args.v
totalProducerNodes = 2
totalNonProducerNodes = 1
totalNodes = totalProducerNodes + totalNonProducerNodes
maxActiveProducers = 21
totalProducers = maxActiveProducers
enableMongo = args.mongodb
cluster = Cluster(walletd=True, enableMongo=enableMongo)
dumpErrorDetails = args.dump_error_details
keepLogs = args.keep_logs
dontKill = args.leave_running
prodCount = args.prod_count
killAll = args.clean_run
p2pPlugin = args.p2p_plugin
walletPort = args.wallet_port

walletMgr = WalletMgr(True, port=walletPort)
testSuccessful = False
killEosInstances = not dontKill
killWallet = not dontKill

WalletdName = Utils.EosWalletName
ClientName = "cleos"
示例#24
0
dumpErrorDetails = args.dump_error_details
onlyBios = args.only_bios
killAll = args.clean_run

Utils.Debug = debug

killEosInstances = not dontKill
topo = "mesh"
delay = 1
prodCount = 1  # producers per producer node
pnodes = 1
total_nodes = pnodes
actualTest = "tests/nodeos_run_test.py"
testSuccessful = False

cluster = Cluster()
try:
    Print("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    Print(
        "producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d"
        % (pnodes, total_nodes - pnodes, topo, delay))
    Print("Stand up cluster")
    if cluster.launch(pnodes,
                      total_nodes,
                      prodCount,
                      topo,
                      delay,
                      onlyBios=onlyBios,
 def __get_cluster_lock_type(self):
     return Cluster().get_lock_type()
示例#26
0
def create_system(options, full_system, system, dma_ports, ruby_system):

    if 'VI_hammer' not in buildEnv['PROTOCOL']:
        panic("This script requires the VI_hammer protocol to be built.")

    options.access_backing_store = True

    cpu_sequencers = []

    topology = Cluster(intBW=32, extBW=32)

    #
    # Must create the individual controllers before the network to ensure the
    # controller constructors are called before the network constructor
    #
    l2_bits_float = math.log(options.num_l2caches, 2)
    l2_bits = int(l2_bits_float)
    if l2_bits_float > l2_bits:
        l2_bits += 1
    block_size_bits = int(math.log(options.cacheline_size, 2))

    for i in xrange(options.num_cpus):
        #
        # First create the Ruby objects associated with this cpu
        #
        l1i_cache = L1Cache(size=options.l1i_size,
                            assoc=options.l1i_assoc,
                            start_index_bit=block_size_bits,
                            is_icache=True)
        l1d_cache = L1Cache(size=options.l1d_size,
                            assoc=options.l1d_assoc,
                            start_index_bit=block_size_bits)
        l2_cache = L2Cache(size=options.l2_size,
                           assoc=options.l2_assoc,
                           start_index_bit=block_size_bits)

        l1_cntrl = L1Cache_Controller(version = i,
                                      L1Icache = l1i_cache,
                                      L1Dcache = l1d_cache,
                                      L2cache = l2_cache,
                                      no_mig_atomic = not \
                                        options.allow_atomic_migration,
                                      send_evictions = send_evicts(options),
                                      transitions_per_cycle = options.ports,
                                      ruby_system = ruby_system)

        cpu_seq = RubySequencer(version=i,
                                icache=l1i_cache,
                                dcache=l1d_cache,
                                ruby_system=ruby_system)

        l1_cntrl.sequencer = cpu_seq

        if options.recycle_latency:
            l1_cntrl.recycle_latency = options.recycle_latency

        exec("ruby_system.l1_cntrl%d = l1_cntrl" % i)
        #
        # Add controllers and sequencers to the appropriate lists
        #
        cpu_sequencers.append(cpu_seq)
        topology.add(l1_cntrl)

        # Connect the L1 controller and the network
        # Connect the buffers from the controller to network
        l1_cntrl.requestFromCache = MessageBuffer()
        l1_cntrl.requestFromCache.master = ruby_system.network.slave
        l1_cntrl.responseFromCache = MessageBuffer()
        l1_cntrl.responseFromCache.master = ruby_system.network.slave
        l1_cntrl.unblockFromCache = MessageBuffer()
        l1_cntrl.unblockFromCache.master = ruby_system.network.slave

        # Connect the buffers from the network to the controller
        l1_cntrl.forwardToCache = MessageBuffer()
        l1_cntrl.forwardToCache.slave = ruby_system.network.master
        l1_cntrl.responseToCache = MessageBuffer()
        l1_cntrl.responseToCache.slave = ruby_system.network.master

        l1_cntrl.mandatoryQueue = MessageBuffer()
        l1_cntrl.triggerQueue = MessageBuffer()

    cpu_mem_range = AddrRange(options.total_mem_size)
    mem_module_size = cpu_mem_range.size() / options.num_dirs

    #
    # determine size and index bits for probe filter
    # By default, the probe filter size is configured to be twice the
    # size of the L2 cache.
    #
    pf_size = MemorySize(options.l2_size)
    pf_size.value = pf_size.value * 2
    dir_bits = int(math.log(options.num_dirs, 2))
    pf_bits = int(math.log(pf_size.value, 2))
    if options.numa_high_bit:
        if options.pf_on or options.dir_on:
            # if numa high bit explicitly set, make sure it does not overlap
            # with the probe filter index
            assert (options.numa_high_bit - dir_bits > pf_bits)

        # set the probe filter start bit to just above the block offset
        pf_start_bit = block_size_bits
    else:
        if dir_bits > 0:
            pf_start_bit = dir_bits + block_size_bits - 1
        else:
            pf_start_bit = block_size_bits

    dir_cntrl_nodes = []
    for i in xrange(options.num_dirs):
        #
        # Create the Ruby objects associated with the directory controller
        #

        dir_size = MemorySize('0B')
        dir_size.value = mem_module_size

        pf = ProbeFilter(size=pf_size, assoc=4, start_index_bit=pf_start_bit)

        dir_cntrl = Directory_Controller(version = i,
                                         directory = \
                                         RubyDirectoryMemory( \
                                                    version = i,
                                                    size = dir_size,
                                                    numa_high_bit = \
                                                      options.numa_high_bit),
                                         probeFilter = pf,
                                         probe_filter_enabled = options.pf_on,
                                         full_bit_dir_enabled = options.dir_on,
                                         transitions_per_cycle = options.ports,
                                         ruby_system = ruby_system)

        if options.recycle_latency:
            dir_cntrl.recycle_latency = options.recycle_latency

        exec("ruby_system.dir_cntrl%d = dir_cntrl" % i)
        dir_cntrl_nodes.append(dir_cntrl)

        # Connect the directory controller to the network
        dir_cntrl.forwardFromDir = MessageBuffer()
        dir_cntrl.forwardFromDir.master = ruby_system.network.slave
        dir_cntrl.responseFromDir = MessageBuffer()
        dir_cntrl.responseFromDir.master = ruby_system.network.slave
        dir_cntrl.dmaResponseFromDir = MessageBuffer(ordered=True)
        dir_cntrl.dmaResponseFromDir.master = ruby_system.network.slave

        dir_cntrl.unblockToDir = MessageBuffer()
        dir_cntrl.unblockToDir.slave = ruby_system.network.master
        dir_cntrl.responseToDir = MessageBuffer()
        dir_cntrl.responseToDir.slave = ruby_system.network.master
        dir_cntrl.requestToDir = MessageBuffer()
        dir_cntrl.requestToDir.slave = ruby_system.network.master
        dir_cntrl.dmaRequestToDir = MessageBuffer(ordered=True)
        dir_cntrl.dmaRequestToDir.slave = ruby_system.network.master

        dir_cntrl.triggerQueue = MessageBuffer(ordered=True)
        dir_cntrl.responseFromMemory = MessageBuffer()

    dma_cntrl_nodes = []
    for i, dma_port in enumerate(dma_ports):
        #
        # Create the Ruby objects associated with the dma controller
        #
        dma_seq = DMASequencer(version=i, ruby_system=ruby_system)

        dma_cntrl = DMA_Controller(version=i,
                                   dma_sequencer=dma_seq,
                                   transitions_per_cycle=options.ports,
                                   ruby_system=ruby_system)

        exec("ruby_system.dma_cntrl%d = dma_cntrl" % i)
        exec("ruby_system.dma_cntrl%d.dma_sequencer.slave = dma_port" % i)
        dma_cntrl_nodes.append(dma_cntrl)

        if options.recycle_latency:
            dma_cntrl.recycle_latency = options.recycle_latency

        # Connect the dma controller to the network
        dma_cntrl.responseFromDir = MessageBuffer(ordered=True)
        dma_cntrl.responseFromDir.slave = ruby_system.network.master
        dma_cntrl.requestToDir = MessageBuffer()
        dma_cntrl.requestToDir.master = ruby_system.network.slave

        dma_cntrl.mandatoryQueue = MessageBuffer()

    # Create the io controller and the sequencer
    if full_system:
        io_seq = DMASequencer(version=len(dma_ports), ruby_system=ruby_system)
        ruby_system._io_port = io_seq
        io_controller = DMA_Controller(version=len(dma_ports),
                                       dma_sequencer=io_seq,
                                       transitions_per_cycle=options.ports,
                                       ruby_system=ruby_system)
        ruby_system.io_controller = io_controller

        # Connect the dma controller to the network
        io_controller.responseFromDir = MessageBuffer(ordered=True)
        io_controller.responseFromDir.slave = ruby_system.network.master
        io_controller.requestToDir = MessageBuffer()
        io_controller.requestToDir.master = ruby_system.network.slave

        io_controller.mandatoryQueue = MessageBuffer()

        dma_cntrl_nodes.append(io_controller)

    return (cpu_sequencers, dir_cntrl_nodes, dma_cntrl_nodes, topology)
 def __is_cluster_running(self, clustername):
     if clustername == None:
         return False
     return Cluster().running()
示例#28
0
"""
Solves the N-body problem using RungeKutta or Velocity-Verlet
"""
from Cluster import Cluster
if __name__ == "__main__":

    test = Cluster(N=40, n_steps=100)
    test.initialize(radius=20)
    test.animate()
示例#29
0
dumpErrorDetails = args.dump_error_details
onlyBios = args.only_bios
killAll = args.clean_run

Utils.Debug = debug

killEosInstances = not dontKill
topo = "mesh"
delay = 1
prodCount = 1  # producers per producer node
pnodes = 1
total_nodes = pnodes
actualTest = "tests/nodeos_run_test.py"
testSuccessful = False

cluster = Cluster(walletd=True)
try:
    Print("BEGIN")
    cluster.killall(allInstances=killAll)
    cluster.cleanup()

    Print(
        "producing nodes: %s, non-producing nodes: %d, topology: %s, delay between nodes launch(seconds): %d"
        % (pnodes, total_nodes - pnodes, topo, delay))
    Print("Stand up cluster")

    if cluster.launch(pnodes=pnodes,
                      totalNodes=total_nodes,
                      prodCount=prodCount,
                      topo=topo,
                      delay=delay,
 def test01(self):
     (distance,idx1,idx2) = closest_pair_strip([Cluster(set([]), 0, 0, 1, 0), Cluster(set([]), 1, 0, 1, 0), Cluster(set([]), 2, 0, 1, 0), Cluster(set([]), 3, 0, 1, 0)], 1.5, 1.0)
     
     self.assertAlmostEqual(distance, 1.0)
     self.assertEqual(idx1, 1)
     self.assertEqual(idx2, 2)