示例#1
0
 def setUp(self):
     spec_ep_brr = ExecutionProfile(
         load_balancing_policy=BadRoundRobinPolicy(),
         speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
             .01, 20))
     spec_ep_rr = ExecutionProfile(
         speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
             .01, 20))
     spec_ep_rr_lim = ExecutionProfile(
         load_balancing_policy=BadRoundRobinPolicy(),
         speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
             .01, 1))
     self.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
     self.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
     self.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
    def setUpClass(cls):
        cls.common_setup(1)

        spec_ep_brr = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
        spec_ep_rr = ExecutionProfile(speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 20))
        spec_ep_rr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.01, 1))
        spec_ep_brr_lim = ExecutionProfile(load_balancing_policy=BadRoundRobinPolicy(), speculative_execution_policy=ConstantSpeculativeExecutionPolicy(0.4, 10))

        cls.cluster.add_execution_profile("spec_ep_brr", spec_ep_brr)
        cls.cluster.add_execution_profile("spec_ep_rr", spec_ep_rr)
        cls.cluster.add_execution_profile("spec_ep_rr_lim", spec_ep_rr_lim)
        cls.cluster.add_execution_profile("spec_ep_brr_lim", spec_ep_brr_lim)
示例#3
0
import random
import decimal

from dse.cluster import Cluster, ExecutionProfile, EXEC_PROFILE_DEFAULT
from dse.policies import DCAwareRoundRobinPolicy, TokenAwarePolicy, ConstantSpeculativeExecutionPolicy
from dse import ConsistencyLevel

#Configuration
contactpoints = ['172.31.13.134', '172.31.4.17']
localDC = "dc1"
keyspace = "cme"
CL = ConsistencyLevel.ONE
profile1 = ExecutionProfile(
    load_balancing_policy=DCAwareRoundRobinPolicy(local_dc=localDC,
                                                  used_hosts_per_remote_dc=3),
    speculative_execution_policy=ConstantSpeculativeExecutionPolicy(.1, 20),
    consistency_level=CL)

print "Connecting to cluster"

cluster = Cluster(
    contact_points=contactpoints,
    execution_profiles={EXEC_PROFILE_DEFAULT: profile1},
)

session = cluster.connect(keyspace)

c = 0
x = 0
while 1:
    quotetime = calendar.timegm(time.gmtime())
示例#4
0
    def readStream():
        coordinator = dc
        last_c = coordinator
        used_dc = dc
        current = time.localtime()
        bucket = str(current.tm_year) + str(current.tm_mon) + str(
            current.tm_mday) + str(current.tm_hour) + str(current.tm_min)

        profile1 = ExecutionProfile(
            load_balancing_policy=DCAwareRoundRobinPolicy(
                local_dc=dc, used_hosts_per_remote_dc=3),
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                .05, 20),
            consistency_level=CL)

        print "Connecting to cluster"

        cluster = Cluster(
            contact_points=contactpoints,
            auth_provider=auth_provider,
            ssl_options=ssl_opts,
            execution_profiles={EXEC_PROFILE_DEFAULT: profile1},
        )

        session = cluster.connect()

        x = 0
        y = 0
        while x <= count:
            r = {}  #Results Dictionary
            current = time.localtime()
            bucket = str(current.tm_year) + str(current.tm_mon) + str(
                current.tm_mday) + str(current.tm_hour) + str(current.tm_min)
            #r["d"] = time.strftime('%Y-%m-%dT%H:%M:%S', current)
            query = """ select * from demo.table2 where bucket = '%s' limit 1 """ % (
                bucket)
            readfail = 0
            r["result"] = "Successful"
            try:
                results = session.execute(query)
            except Exception as e:
                print("Read failed.")
                readfail = 1
                for i in e:
                    errormsg = i
                    errormsg = str(errormsg).replace('"', '')
                r["count"] = x
                r["dc"] = used_dc
                r["result"] = errormsg
                r["d"] = "00:00:00"
                yield json.dumps(r) + "\r\n"
            if readfail == 1:
                cluster.shutdown()
                return
                yield

            for row in results:
                r["d"] = row.d

            if (y == rowcount):
                y = 0
                try:
                    future = session.execute_async(query, trace=True)
                    result = future.result()
                    try:
                        trace = future.get_query_trace(1)
                        coordinator = trace.coordinator
                    except:
                        coordinator = last_c
                    for h in session.hosts:
                        if h.address == coordinator:
                            used_dc = h.datacenter
                    r["count"] = x
                    r["dc"] = used_dc
                    yield json.dumps(r) + "\r\n"
                except Exception as e:
                    for i in e:
                        errormsg = i
                        errormsg = str(errormsg).replace('"', '')
                    print("Read trace failed.")
                    r["count"] = x
                    r["dc"] = used_dc
                    r["result"] = errormsg
                    yield json.dumps(r) + "\r\n"
                    cluster.shutdown()

            time.sleep(.03)  # an artificial delay
            x = x + 1
            y = y + 1
        cluster.shutdown()
示例#5
0
    def writeStream(targetCluster):
        coordinator = dc
        last_c = coordinator
        used_dc = dc
        #current = time.localtime()

        profile1 = ExecutionProfile(
            load_balancing_policy=DCAwareRoundRobinPolicy(
                local_dc=dc, used_hosts_per_remote_dc=3),
            speculative_execution_policy=ConstantSpeculativeExecutionPolicy(
                .05, 20),
            consistency_level=CL)

        print("Connecting to cluster")

        if (targetCluster == "DDAC"):
            contactpoints = ddaccontactpoints
        else:
            contactpoints = osscontactpoints

        cluster = Cluster(
            contact_points=contactpoints,
            auth_provider=auth_provider,
            ssl_options=ssl_opts,
            execution_profiles={EXEC_PROFILE_DEFAULT: profile1},
        )

        session = cluster.connect()

        x = 0
        y = 0
        while x <= count:
            r = {}  #Results Dictionary
            current = time.localtime()
            bucket = str(current.tm_year) + str(current.tm_mon) + str(
                current.tm_mday) + str(current.tm_hour) + str(current.tm_min)
            r["d"] = time.strftime('%Y-%m-%dT%H:%M:%S', current)
            data1 = randint(1, 100)
            data2 = randint(1, 100)
            data3 = randint(1, 100)
            query = """ INSERT INTO demo.table2 (bucket, ts, d, data1, data2, data3) VALUES ('%s', now(), '%s', '%s', '%s', '%s') """ % (
                str(bucket), str(r["d"]), str(data1), str(data2), str(data3))
            writefail = 0
            r["result"] = "Successful"
            try:
                session.execute(query)
            except Exception as e:
                print("Write failed.")
                writefail = 1
                for i in e:
                    errormsg = i
                    errormsg = str(errormsg).replace('"', '')
                r["count"] = x
                r["dc"] = used_dc
                r["result"] = errormsg
                yield json.dumps(r) + "\r\n"
            if writefail == 1:
                cluster.shutdown()
                return
                yield
            if (y == rowcount):
                y = 0
                try:
                    future = session.execute_async(query, trace=True)
                    result = future.result()
                    try:
                        trace = future.get_query_trace(1)
                        coordinator = trace.coordinator
                    except:
                        coordinator = last_c
                    for h in session.hosts:
                        if h.address == coordinator:
                            used_dc = h.datacenter
                    r["count"] = x
                    r["dc"] = used_dc
                    yield json.dumps(r) + "\r\n"
                except Exception as e:
                    for i in e:
                        errormsg = i
                        errormsg = str(errormsg).replace('"', '')
                    print("Trace failed.")
                    r["count"] = x
                    r["dc"] = used_dc
                    r["result"] = errormsg
                    yield json.dumps(r) + "\r\n"
                    cluster.shutdown()

            time.sleep(.03)  # an artificial delay
            x = x + 1
            y = y + 1
        cluster.shutdown()