Ejemplo n.º 1
0
    def generateFromServerConfig(self):
        """
        Generate the client config file from the servers
        """
        clusterId = self._clusterId
        fn = '/'.join([X.cfgDir, 'arakoonclusters'])
        p = X.getConfig(fn)
        clusterExists = p.has_section(clusterId)

        if not clusterExists:
            X.raiseError("No server cluster '%s' is defined." % clusterId)

        serverConfigDir = p.get(clusterId, "path")
        serverConfigPath = '/'.join([serverConfigDir, clusterId])
        serverConfig = X.getConfig(serverConfigPath)
        if serverConfig.has_section("global"):
            nodes = self.__getNodes(serverConfig)

            for name in nodes:
                if name in self.getNodes():
                    self.removeNode(name)

                ips = serverConfig.get(name, 'ip')
                ip_list = map(lambda x: x.strip(), ips.split(','))
                self.addNode(name, ip_list,
                             serverConfig.get(name, "client_port"))
Ejemplo n.º 2
0
    def generateFromServerConfig(self):
        """
        Generate the client config file from the servers
        """
        clusterId = self._clusterId
        fn = '/'.join([X.cfgDir, 'arakoonclusters'])
        p = X.getConfig(fn)
        clusterExists = p.has_section(clusterId)

        if not clusterExists:
            X.raiseError("No server cluster '%s' is defined." % clusterId)

        serverConfigDir = p.get(clusterId, "path")
        serverConfigPath = '/'.join([serverConfigDir, clusterId])
        serverConfig = X.getConfig(serverConfigPath)
        if serverConfig.has_section("global"):
            nodes = self.__getNodes(serverConfig)

            for name in nodes:
                if name in self.getNodes():
                    self.removeNode(name)

                ips = serverConfig.get(name, 'ip')
                ip_list = map(lambda x: x.strip(), ips.split(','))
                self.addNode(name,
                             ip_list,
                             serverConfig.get(name, "client_port"))
Ejemplo n.º 3
0
def inspect_cluster(cluster):

    # cleanup inspect dumps
    cluster_id = cluster._getClusterId()
    node_names = cluster.listLocalNodes()
    for node_name in node_names:
        dn = '%s/%s/' % (
            cluster_id,
            node_name,
        )
        logging.info("deleting %s", dn)
        X.removeDirTree(dn)

    cfg_fn = cluster._getConfigFileName()
    r = call_arakoon("--inspect-cluster", "-config", "%s.cfg" % cfg_fn)

    dump_paths = []
    for node_name in node_names:
        dump_path = './%s/%s/store.db.dump' % (cluster_id, node_name)
        dump_paths.append(dump_path)

    size = len(node_names)
    for i in xrange(size - 1):
        for j in xrange(i + 1, size):
            fn0 = dump_paths[i]
            fn1 = dump_paths[j]
            compare_files(fn0, fn1)
            logging.info("%s == %s", fn0, fn1)
    logging.info("inspect_cluster(%s): ok", cluster_id)
Ejemplo n.º 4
0
    def testAddLocalNode(self):

        cid = self._clusterId
        cluster = self._getCluster()
        for i in range(0, 3):
            cluster.addNode("%s_%s" % (cid, i))

        n0 = '%s_0' % cid
        n1 = '%s_1' % cid

        cluster.addLocalNode(n1)

        sn = self.__servernodes()

        cfgPath = '/'.join(
            [X.cfgDir, "qconfig", "arakoon", cid,
             "%s_local_nodes" % cid])
        config = X.getConfig(cfgPath)
        assert_equals(Compat.sectionAsDict(config, "global"), {'cluster': n1})

        cluster.addLocalNode(n0)

        config = X.getConfig(cfgPath)
        assert_equals(Compat.sectionAsDict(config, "global"),
                      {'cluster': '%s,%s' % (n1, n0)})
Ejemplo n.º 5
0
    def testNameDiffersFromId(self):
        name = self._clusterId
        id_inside = "id_inside"
        cluster = C._getCluster(name)
        cfg_name = cluster._getConfigFileName()
        logging.debug("cfg_name = %s", cfg_name)
        #/opt/qbase3/cfg//qconfig/arakoon/cluster_name
        ok = cfg_name.endswith(name)
        assert_true(ok)
        cluster.addNode('node_0')

        cfg = X.getConfig(cfg_name)
        logging.debug("cfg = %s", X.cfg2str(cfg))
        id0 = cfg.get('global', 'cluster_id')
        assert_equals(id0, name)
        # now set it to id
        cfg.set('global', 'cluster_id', id_inside)
        X.writeConfig(cfg, cfg_name)
        logging.debug('cfg_after = %s', X.cfg2str(cfg))

        cluster = C._getCluster(name)
        #ccfg = cluster.getClientConfig()
        #print ccfg

        client = cluster.getClient()
        ccfg2 = client._config
        logging.debug("ccfg2=%s", ccfg2)
        ccfg_id = ccfg2.getClusterId()

        assert_equals(ccfg_id, id_inside)
Ejemplo n.º 6
0
def test_sabotage():
    pass
    """
    scenario countering a sysadmin removing files (s)he shouldn't (eta : 16s)
    """
    clu = Common._getCluster()
    tlog_size = 1000
    num_tlogs = 2
    test_size = num_tlogs * tlog_size
    Common.iterate_n_times(test_size, Common.simple_set)
    time.sleep(10)
    clu.stop()
    node_id = Common.node_names[0]
    node_cfg = clu.getNodeConfig(node_id)
    node_home_dir = node_cfg ['home']
    node_tlf_dir = node_cfg ['tlf_dir']
    logging.debug("node_tlf_dir=%s", node_tlf_dir)
    files = map(lambda x : "%s/%s" % (node_home_dir, x),
                [ "002.tlog",
                  "%s.db" % (node_id,),
                  #"%s.db.wal" % (node_id,), # should not exist after a `proper` close
                  ])
    for f in files:
        print f
        X.removeFile(f)

    cmd = clu._cmd('sturdy_0')
    returncode = X.subprocess.call(cmd)
    assert_equals(returncode, 50)
Ejemplo n.º 7
0
    def shift_logs ( ) :
        log_to_remove = old_log_fmt % (max_logs_to_keep - 1)
        if X.fileExists ( log_to_remove ) :
            fs.unlink(log_to_remove)

        for i in range( 1, max_logs_to_keep - 1) :
            j = max_logs_to_keep - 1 - i
            log_to_move = old_log_fmt % j
            new_log_name = old_log_fmt % (j + 1)
            if X.fileExists( log_to_move ) :
                os.rename ( log_to_move, new_log_name )
Ejemplo n.º 8
0
 def testRemoveDirs(self):
     cid = self._clusterId
     n0 = '%s_%i' % (cid, 0)
     cluster = self._getCluster()
     cluster.addNode(n0)
     cluster.createDirs(n0)
     cluster.removeDirs(n0)
     p0 = '/'.join([X.logDir, cid, n0])
     p1 = '/'.join([X.varDir, "db", cid, n0])
     assert_false(X.fileExists(p0))
     assert_false(X.fileExists(p1))
Ejemplo n.º 9
0
    def setUp(self):
        logging.debug("setUp")
        for node in self._nodes:
            home = '%s/%s' % (self._root, node)
            X.removeDirTree(home)
            X.createDir(home)

        self._server.start()
        cfg = self._config()
        cfg_s = X.cfg2str(cfg)
        logging.debug("cfg_s='%s'", cfg_s)
        self._etcdClient.set(self._cluster_id, cfg_s)
Ejemplo n.º 10
0
def regenerateClientConfig( cluster_id ):
    h = '/'.join([X.cfgDir,'arakoonclients'])
    p = X.getConfig(h)

    if cluster_id in p.sections():
        clusterDir = p.get(cluster_id, "path")
        clientCfgFile = '/'.join([clusterDir, "%s_client.cfg" % cluster_id])
        if X.fileExists(clientCfgFile):
            X.removeFile(clientCfgFile)

    client = ArakoonClient.ArakoonClient()
    cliCfg = client.getClientConfig( cluster_id )
    cliCfg.generateFromServerConfig()
Ejemplo n.º 11
0
def test_issue125():
    n = 31234
    Common.iterate_n_times(n, Common.simple_set)
    slave = Common.CONFIG.node_names[1]

    Common.collapse(slave, 10)
    Common.stopOne(slave)

    Common.iterate_n_times(n, Common.simple_set)  # others move forward

    dir_names = Common.build_node_dir_names(slave)
    db_dir = dir_names[0]
    log_dir = dir_names[1]
    tlx_dir = dir_names[2]

    X.removeFile('%s/035.tlog' % db_dir)
    X.removeFile('%s/034.tlx' % tlx_dir)
    X.removeFile('%s/%s.log' % (log_dir, slave))
    X.removeFile('%s/%s.db' % (db_dir, slave))
    # we have a cluster with head.db, no db and no tlog file.

    # can we launch ?

    Common.startOne(slave, ['-autofix'])
    time.sleep(20)
    Common.assert_running_nodes(3)
Ejemplo n.º 12
0
def _check_tlog_dirs(node, n):
    (home_dir, _, tlf_dir, head_dir) = Common.build_node_dir_names(node)

    tlogs = X.listFilesInDir(home_dir, filter="*.tlog")
    tlxs = X.listFilesInDir(tlf_dir, filter="*.tlx")
    logging.info("tlogs:%s", tlogs)
    logging.info("tlxs:%s", tlxs)
    print tlxs

    assert_equals(len(tlogs) + len(tlxs),
                  n,
                  msg="(%s + %s) should have %i file(s)" % (tlogs, tlxs, n))
    assert_true(X.fileExists(head_dir + "/head.db"))
    logging.info("tlog_dirs are as expected")
Ejemplo n.º 13
0
 def setup(c_id, tlog_max_entries):
     node_0 = "%s_0" % c_id
     c_nodes = [node_0]
     c_home = "/".join([Common.data_base_dir, c_id])
     node_dir = "/".join([Common.data_base_dir, node_0])
     X.removeDirTree(node_dir)
     c = Common.setup_n_nodes_base(
         c_id,
         c_nodes,
         False,
         c_home,
         Common.node_msg_base_port,
         Common.node_client_base_port,
         extra={'tlog_max_entries': str(tlog_max_entries)})
     return c
Ejemplo n.º 14
0
def test_copy_db_to_head2():
    logging.info("test_copy_db_to_head")
    zero = C.node_names[0]
    one = C.node_names[1]
    n = 29876
    C.iterate_n_times(n, C.simple_set)
    logging.info("did %i sets, now copying db to head" % n)
    C.copyDbToHead(one,1)

    head_name = '%s/%s/head/head.db' %(C.data_base_dir, one)

    logging.info(head_name)
    assert_true(X.fileExists(head_name))
    C.stopOne(zero)
    C.wipe(zero)
    C.startOne(zero)
    cli = C.get_client()
    logging.info("cli class:%s", cli.__class__)
    assert_false(cli.expectProgressPossible())
    up2date = False
    counter = 0
    while not up2date and counter < 100:
        time.sleep(1.0)
        counter = counter + 1
        up2date = cli.expectProgressPossible()
    logging.info("catchup from 'collapsed' node finished")
Ejemplo n.º 15
0
def destroy_ram_fs( node_index ) :
    (mount_target,log_dir,tlf_dir,head_dir) = build_node_dir_names( node_names[node_index] )
    if os.path.isdir(mount_target) and os.path.ismount(mount_target):
        cmd = ["sudo", "/bin/umount", mount_target]
        (rc,out,err) = X.run(cmd)
        if rc:
            raise Exception("cmd:%s failed (%s,%s,%s)" % (str(cmd), rc,out,err))
Ejemplo n.º 16
0
    def _assert_n_running(self, n):
        cluster = self._getCluster()
        status = cluster.getStatus()
        logging.debug('status=%s', status)
        c = 0
        for key in status.keys():
            if status[key] == X.AppStatusType.RUNNING:
                c = c + 1
        if c != n:
            for node in status.keys():
                if status[node] == X.AppStatusType.HALTED:
                    cfg = cluster._getConfigFile()
                    logDir = cfg.get(node, "log_dir")
                    fn = "%s/%s.log" % (logDir, node)
                    logging.info("fn=%s", fn)

                    def cat_log(fn):
                        with open(fn, 'r') as f:
                            lines = f.readlines()
                            logging.info("fn:%s for %s", fn, node)
                            for l in lines:
                                ls = l.strip()
                                logging.info(ls)

                    #crash log ?
                    cat_log(fn)
                    filter = '%s.debug.*' % node
                    crash = X.listFilesInDir(logDir, filter=filter)
                    if len(crash):
                        crash_fn = crash[0]
                        cat_log(crash_fn)

        msg = "expected %i running nodes, but got %i" % (n, c)
        assert_equals(c, n, msg=msg)
Ejemplo n.º 17
0
def test_delete_non_existing_with_catchup ():
    pass

    """
    catchup after deleting a non existing value (eta: 6s)
    """
    Common.stopOne( Common.node_names[1] )
    key='key'
    value='value'
    cli = Common.get_client()
    try:
        cli.delete( key )
    except:
        pass
    cli.set(key,value)
    cli.set(key,value)
    cli.set(key,value)

    slave = Common.node_names[1]
    Common.startOne( slave )
    time.sleep(2.0)
    cluster = Common._getCluster()
    log_dir = cluster.getNodeConfig(slave ) ['log_dir']
    log_file = "%s/%s.log" % (log_dir, slave)
    log = X.getFileContents( log_file )
    assert_equals( log.find( "don't fit" ), -1, "Store counter out of sync" )
Ejemplo n.º 18
0
def test_local_collapse():
    logging.info("starting test_local_collapse")

    zero = Common.node_names[0]
    one = Common.node_names[1]
    n = 29876
    Common.iterate_n_times(n, Common.simple_set)
    logging.info("did %i sets, now going into collapse scenario" % n)
    rc = Common.local_collapse(zero, 1)
    assert_equal(rc, 0)
    head_name = '%s/%s/head/head.db' % (Common.data_base_dir, zero)
    logging.info(head_name)
    assert_true(X.fileExists(head_name))
    #
    logging.info("collapsing done")
    Common.stopOne(one)
    Common.wipe(one)
    Common.startOne(one)
    cli = Common.get_client()
    logging.info("cli class:%s", cli.__class__)
    assert_false(cli.expectProgressPossible())
    up2date = False
    counter = 0
    while not up2date and counter < 100:
        time.sleep(1.0)
        counter = counter + 1
        up2date = cli.expectProgressPossible()
    logging.info("catchup from collapsed node finished")
Ejemplo n.º 19
0
def test_learner():
    op_count = 54321
    Common.iterate_n_times(op_count, Common.simple_set)
    cluster = Common._getCluster(Common.cluster_id)
    logging.info("adding learner")
    name = Common.node_names[2]
    (db_dir, log_dir, tlf_dir, head_dir) = Common.build_node_dir_names(name)
    cluster.addNode(name,
                    Common.node_ips[2],
                    clientPort=Common.node_client_base_port + 2,
                    messagingPort=Common.node_msg_base_port + 2,
                    logDir=log_dir,
                    tlfDir=tlf_dir,
                    headDir=head_dir,
                    logLevel='debug',
                    home=db_dir,
                    isLearner=True,
                    targets=[Common.node_names[0]])
    cfg = cluster._getConfigFile()
    logging.info("cfg=%s", X.cfg2str(cfg))
    cluster.disableFsync([name])
    cluster.addLocalNode(name)
    cluster.createDirs(name)
    cluster.startOne(name)

    time.sleep(1.0)

    Common.assert_running_nodes(3)
    time.sleep(op_count / 1000 + 1)  # 1000/s in catchup should be no problem
    #use a client ??"
    Common.stop_all()
    i2 = int(Common.get_last_i_tlog(name))
    assert_true(i2 >= op_count - 1)
Ejemplo n.º 20
0
def test_max_value_size_tinkering():
    cluster = C._getCluster()
    C.assert_running_nodes(1)
    key = "key_not_so_big_v"
    value = "xxxxxxxxxx" * 2000
    client = C.get_client()
    client.set(key, value)
    cluster.stop()
    logging.debug("set succeeded")
    cfg = cluster._getConfigFile()
    cfg.set("global", "__tainted_max_value_size", "1024")
    X.writeConfig(cfg, cluster._getConfigFileName())
    cluster.start()
    time.sleep(1.0)
    C.assert_running_nodes(1)
    client = C.get_client()
    assert_raises(X.arakoon_client.ArakoonException, client.set, key, value)
Ejemplo n.º 21
0
    def getClientConfig(self, clusterName, configName=None):
        """
        Adds an Arakoon client to the configuration.
        @type clusterName: string
        @param clusterName: the name of the cluster for which you want to add a client
        @type configName: optional string
        @param configName: the name of the client configuration for this cluster
        """
        fn = '/'.join([X.cfgDir, 'arakoonclients'])
        p = X.getConfig(fn)
        if not p.has_section(clusterName):
            p.add_section(clusterName)
            cfgDir = '/'.join([X.cfgDir, "qconfig", "arakoon", clusterName])
            p.set(clusterName, "path", cfgDir)
            X.writeConfig(p, fn)

        cfgFile = self._getConfig(clusterName, configName)
        return ArakoonClientExtConfig(clusterName, cfgFile)
Ejemplo n.º 22
0
    def getClientConfig (self, clusterName, configName = None):
        """
        Adds an Arakoon client to the configuration.
        @type clusterName: string
        @param clusterName: the name of the cluster for which you want to add a client
        @type configName: optional string
        @param configName: the name of the client configuration for this cluster
        """
        fn = '/'.join([X.cfgDir, 'arakoonclients'])
        p = X.getConfig(fn)
        if not p.has_section(clusterName):
            p.add_section(clusterName)
            cfgDir = '/'.join([X.cfgDir, "qconfig", "arakoon", clusterName])
            p.set(clusterName, "path", cfgDir)
            X.writeConfig(p, fn)

        cfgFile = self._getConfig(clusterName, configName)
        return ArakoonClientExtConfig(clusterName, cfgFile)
Ejemplo n.º 23
0
def test_sabotage():
    pass
    """
    scenario countering a sysadmin removing files (s)he shouldn't (eta : 16s)
    """
    clu = Common._getCluster()
    tlog_size = 1000
    num_tlogs = 2
    test_size = num_tlogs * tlog_size
    Common.iterate_n_times(test_size, Common.simple_set)
    time.sleep(10)
    print "stopping"
    clu.stop()
    
    node_id = Common.node_names[0]
    node_cfg = clu.getNodeConfig(node_id)
    node_home_dir = node_cfg ['home']
    node_tlf_dir = node_cfg ['tlf_dir']
    logging.debug("node_tlf_dir=%s", node_tlf_dir)
    files = map(lambda x : "%s/%s" % (node_home_dir, x),
                [ "002.tlog",
                  "%s.db" % (node_id,),
                  #"%s.db.wal" % (node_id,), # should not exist after a `proper` close
                  ])
    for f in files:
        print "removing", f
        X.removeFile(f)
    print "starting"
    cmd = clu._cmd('sturdy_0')
    print cmd
    
    def start_node():
        returncode = X.subprocess.call(cmd)
        assert_equals(returncode, 50)
        print "startup failure + correct returncode"
    try:
        
        t = threading.Thread(target = start_node)
        t.start()
        t.join(10.0)
        
    except Exception,e:
        print e
        assert_true(False)
Ejemplo n.º 24
0
def test_mixed_tlog_formats():
    cluster = C._getCluster()
    cluster.disableFsync(C.node_names[:2])
    s0 = 10500
    logging.info("going to do %i sets",s0)
    C.iterate_n_times(s0,C.simple_set)
    C.stop_all()
    cluster.enableTlogCompression(compressor = 'bz2')

    C.start_all()
    logging.info("another %i sets", s0)
    C.iterate_n_times(s0,C.simple_set)
    C.stop_all()

    # do we have both .tlf and .tlx files?
    n0 = C.node_names[0]
    n1 = C.node_names[1]
    config = C.getConfig(n0)

    tlx_dir = config.get('tlf_dir')
    if not tlx_dir:
        tlx_dir = config.get('home')
    files = os.listdir(tlx_dir)

    tls = filter(lambda x:x.endswith(".tlx"), files)
    tlf = filter(lambda x:x.endswith(".tlf"), files)
    assert_true(len(tls) > 5, "we should have .tlx files" )
    assert_true(len(tlf) > 5, "we should have .tlf files" )

    # does catchup still work?

    C.wipe(n0)
    C.startOne(n1)

    #wait for n1 to respond to client requests...
    time.sleep(5)


    rc = cluster.catchupOnly(n0)
    logging.info("catchup had rc=%i", rc)

    C.flush_store(n1)
    C.stop_all()


    C.compare_stores(n0,n1)

    C.start_all()
    rc = C.collapse(name=n1,n = 2)
    logging.info("collapse had rc=%i", rc)
    assert_true(rc == 0, "this should not have failed")
    head_dir = C.build_node_dir_names(n1)[3]

    db_file = head_dir + "/head.db"
    time.sleep(1.0) # give it time to move
    assert_true(X.fileExists(db_file), "%s should exist" % db_file)
Ejemplo n.º 25
0
    def start(self):
        logging.debug("ETCD:start on %s", self._home)
        X.removeDirTree(self._home)
        X.createDir(self._home)
        address = "http://%s:%i" % (self._host, self._port)
        real_cmd = [
            'nohup', 'etcd',
            '-advertise-client-urls=%s' % address,
            '-listen-client-urls=%s' % address, '-data-dir', self._home,
            '>> %s/stdout' % self._home, '2>&1 &'
        ]
        real_cmd_s = ' '.join(real_cmd)
        fn = '%s/start.sh' % self._home
        with open(fn, 'w') as f:
            print >> f, real_cmd_s
        os.chmod(fn, 0755)
        os.system(fn)

        time.sleep(5)
Ejemplo n.º 26
0
    def _getConfig(clusterName, configName):
        fn = '/'.join([X.cfgDir, 'arakoonclients'])
        p = X.getConfig(fn)
        clusterDir = p.get(clusterName, "path")
        last = None
        if configName is None:
            last = "%s_client" % clusterName
        else:
            last = "%s_client_%s" % (clusterName, configName)

        return '/'.join([clusterDir, last])
Ejemplo n.º 27
0
    def _getConfig(clusterName, configName):
        fn = '/'.join([X.cfgDir, 'arakoonclients'])
        p = X.getConfig(fn)
        clusterDir = p.get(clusterName, "path")
        last = None
        if configName is None:
            last = "%s_client" % clusterName
        else:
            last = "%s_client_%s" % (clusterName, configName)

        return '/'.join([clusterDir, last])
Ejemplo n.º 28
0
def setup_3_nodes_ram_fs(home_dir):
    cluster = Common._getCluster(Common.cluster_id)
    cluster.remove()

    cluster = Common._getCluster(Common.cluster_id)

    logging.info("Creating data base dir %s" % home_dir)

    X.createDir(home_dir)

    try:
        for i in range(len(Common.node_names)):
            mount_ram_fs(i)
            nodeName = Common.node_names[i]
            (db_dir, log_dir, tlf_dir,
             head_dir) = Common.build_node_dir_names(Common.node_names[i])
            cluster.addNode(nodeName,
                            Common.node_ips[i],
                            clientPort=Common.node_client_base_port + i,
                            messagingPort=Common.node_msg_base_port + i,
                            logDir=log_dir,
                            tlfDir=tlf_dir,
                            home=db_dir,
                            headDir=head_dir)
            cluster.addLocalNode(nodeName)
            cluster.createDirs(nodeName)

    except Exception as ex:
        teardown_ram_fs(True)
        (a, b, c) = sys.exc_info()
        raise a, b, c

    logging.info("Changing log level to debug for all nodes")
    cluster.setLogLevel("debug")
    cluster.setMasterLease(int(CONFIG.lease_duration))

    logging.info("Creating client config")
    Common.regenerateClientConfig(Common.cluster_id)

    Common.start_all()
Ejemplo n.º 29
0
 def _getClientConfig(clusterName, configName=None):
     """
     Gets an Arakoon client object for an existing cluster
     @type clusterName: string
     @param clusterName: the name of the cluster for which you want to get a client
     @return arakoon client object
     """
     clientConfig = X.getConfig('/'.join([X.cfgDir, "arakoonclients"]))
     if not clientConfig.has_section(clusterName):
         X.raiseError("No such client configured for cluster [%s]" %
                      clusterName)
     else:
         node_dict = {}
         clientCfg = ArakoonClient._getConfig(clusterName, configName)
         cfgFile = X.getConfig(clientCfg)
         if not cfgFile.has_section("global"):
             if configName is not None:
                 msg = "Named client '%s' for cluster '%s' does not exist" % (
                     configName, clusterName)
             else:
                 msg = "No client available for cluster '%s'" % clusterName
             X.raiseError(msg)
         clusterParam = cfgFile.get("global", "cluster")
         for node in clusterParam.split(","):
             node = node.strip()
             ips = cfgFile.get(node, "ip")
             ip_list = map(lambda x: x.strip(), ips.split(','))
             port = cfgFile.get(node, "client_port")
             ip_port = (ip_list, port)
             node_dict.update({node: ip_port})
         clusterId = cfgFile.get('global', 'cluster_id')
         config = X.arakoon_client.ArakoonClientConfig(clusterId, node_dict)
         return config
Ejemplo n.º 30
0
 def _getClientConfig(clusterName, configName = None):
     """
     Gets an Arakoon client object for an existing cluster
     @type clusterName: string
     @param clusterName: the name of the cluster for which you want to get a client
     @return arakoon client object
     """
     clientConfig = X.getConfig('/'.join ([X.cfgDir,"arakoonclients"]))
     if not clientConfig.has_section(clusterName):
         X.raiseError("No such client configured for cluster [%s]" % clusterName)
     else:
         node_dict = {}
         clientCfg = ArakoonClient._getConfig(clusterName, configName)
         cfgFile = X.getConfig( clientCfg )
         if not cfgFile.has_section("global") :
             if configName is not None:
                 msg = "Named client '%s' for cluster '%s' does not exist" % (configName, clusterName)
             else :
                 msg = "No client available for cluster '%s'" % clusterName
             X.raiseError(msg )
         clusterParam = cfgFile.get("global", "cluster")
         for node in clusterParam.split(",") :
             node = node.strip()
             ips = cfgFile.get(node, "ip")
             ip_list = map(lambda x: x.strip(), ips.split(','))
             port = cfgFile.get(node, "client_port")
             ip_port = (ip_list, port)
             node_dict.update({node: ip_port})
         clusterId = cfgFile.get('global', 'cluster_id')
         config = X.arakoon_client.ArakoonClientConfig(clusterId, node_dict)
         return config
Ejemplo n.º 31
0
def test_copy_db_to_head():
    # fill cluster until they have at least 10 tlogs
    C.iterate_n_times(5000, C.set_get_and_delete)

    slave = C.node_names[1]
    # n < 1 fails
    assert_raises( Exception, lambda: C.copyDbToHead(slave, 0))
    # fails on master
    assert_raises( Exception, lambda: C.copyDbToHead(C.node_names[0], 2))

    C.copyDbToHead(slave, 1)

    C.stop_all()

    (home_dir, _, tlf_dir, head_dir) = C.build_node_dir_names(slave)
    tlogs_count = len(X.listFilesInDir( home_dir, filter="*.tlog" ))
    tlf_count = len(X.listFilesInDir( tlf_dir, filter="*.tlf" ))
    assert(tlogs_count + tlf_count < 5)
    assert(X.fileExists(head_dir + "/head.db"))
    a = C.get_i(slave, True)
    logging.info("slave_head_i='%s'", a)
    assert(a >= 5000)
Ejemplo n.º 32
0
def test_start_stop_wrapper():
    cluster = C._getCluster()
    fn = "%s/my_wrapper.sh" % X.tmpDir
    X.createDir(X.tmpDir)
    try:
        with open(fn, 'w') as f:
            f.write('#!/bin/bash -xue\n')
            f.write("logger wrapper called with '$@'\n")
            f.write('$@\n')
        subprocess.call(['chmod', '+x', fn])
        nn = "wrapper"
        cluster.addNode(nn, "127.0.0.1", 8000, wrapper=fn)
        cluster.addLocalNode(nn)
        cluster.createDirs(nn)
        C.assert_running_nodes(0)
        cluster.start()
        time.sleep(1)

        C.assert_running_nodes(1)
        cluster.stop()
        C.assert_running_nodes(0)
    finally:
        cluster.remove()
Ejemplo n.º 33
0
    def addNode(self, name, ip, clientPort):
        """
        Add a node to the client configuration

        @param name: the name of the node
        @param ip: the ip  the node
        @param clientPort: the port of the node
        """
        self.__validateName(name)

        if isinstance(ip, basestring):
            ip = [ip]

        clusterId = self._clusterId
        inifile_path = self._configPath

        config = X.getConfig(inifile_path)

        if not config.has_section("global"):
            config.add_section("global")
            config.set("global", "cluster_id", clusterId)
            config.set("global", "cluster", "")

        nodes = self.__getNodes(config)

        if name in nodes:
            raise Exception("There is already a node with name %s configured" %
                            name)

        nodes.append(name)
        config.add_section(name)
        config.set(name, "ip", ', '.join(ip))
        config.set(name, "client_port", clientPort)

        config.set("global", "cluster", ",".join(nodes))

        X.writeConfig(config, inifile_path)
Ejemplo n.º 34
0
    def addNode(self, name, ip, clientPort):
        """
        Add a node to the client configuration

        @param name: the name of the node
        @param ip: the ip  the node
        @param clientPort: the port of the node
        """
        self.__validateName(name)

        if isinstance(ip, basestring):
            ip = [ip]

        clusterId = self._clusterId
        inifile_path = self._configPath

        config = X.getConfig(inifile_path)

        if not config.has_section("global"):
            config.add_section("global")
            config.set("global", "cluster_id", clusterId)
            config.set("global","cluster", "")

        nodes = self.__getNodes(config)

        if name in nodes:
            raise Exception("There is already a node with name %s configured" % name)

        nodes.append(name)
        config.add_section(name)
        config.set(name, "ip", ', '.join(ip))
        config.set(name, "client_port", clientPort)

        config.set("global","cluster", ",".join(nodes))

        X.writeConfig(config,inifile_path)
Ejemplo n.º 35
0
        def decorate(*args,**kwargs):

            global data_base_dir
            data_base_dir = '/'.join([X.tmpDir,'arakoon_system_tests' , func.func_name])
            global test_failed
            test_failed = False
            fatal_ex = None
            home_dir = data_base_dir
            if X.fileExists( data_base_dir):
                remove_dirs ()
            self.__setup( home_dir )
            try:
                func(*args,**kwargs)
            except Exception, outer :
                tb = traceback.format_exc()
                logging.fatal( tb )
                fatal_ex = outer
Ejemplo n.º 36
0
    def getNodes(self):
        """
        Get an object that contains all node information
        @return dict the dict can be used as param for the ArakoonConfig object
        """

        config = X.getConfig(self._configPath)

        clientconfig = {}

        if config.has_section("global"):
            nodes = self.__getNodes(config)

            for name in nodes:
                ips = config.get(name, 'ip')
                ip_list = map(lambda x: x.strip(), ips.split(','))
                clientconfig[name] = (ip_list,
                                      config.get(name, "client_port"))

        return clientconfig
Ejemplo n.º 37
0
 def listClients(self):
     """
     Returns a list with the existing clients.
     """
     config = X.getConfig("arakoonclients")
     return config.sections()