Esempio n. 1
0
    def testNameDiffersFromId(self):
        name = self._clusterId
        id_inside = "id_inside"
        cluster = C._getCluster(name)
        cfg_name = cluster._getConfigFileName()
        logging.debug("cfg_name = %s", cfg_name)
        #/opt/qbase3/cfg//qconfig/arakoon/cluster_name
        ok = cfg_name.endswith(name)
        assert_true(ok)
        cluster.addNode('node_0')

        cfg = X.getConfig(cfg_name)
        logging.debug("cfg = %s", X.cfg2str(cfg))
        id0 = cfg.get('global', 'cluster_id')
        assert_equals(id0, name)
        # now set it to id
        cfg.set('global', 'cluster_id', id_inside)
        X.writeConfig(cfg, cfg_name)
        logging.debug('cfg_after = %s', X.cfg2str(cfg))

        cluster = C._getCluster(name)
        #ccfg = cluster.getClientConfig()
        #print ccfg

        client = cluster.getClient()
        ccfg2 = client._config
        logging.debug("ccfg2=%s", ccfg2)
        ccfg_id = ccfg2.getClusterId()

        assert_equals(ccfg_id, id_inside)
Esempio n. 2
0
def test_max_value_size_tinkering():
    cluster = C._getCluster()
    C.assert_running_nodes(1)
    key = "key_not_so_big_v"
    value = "xxxxxxxxxx" * 2000
    client = C.get_client()
    client.set(key, value)
    cluster.stop()
    logging.debug("set succeeded")
    cfg = cluster._getConfigFile()
    cfg.set("global", "__tainted_max_value_size", "1024")
    X.writeConfig(cfg, cluster._getConfigFileName())
    cluster.start()
    time.sleep(1.0)
    C.assert_running_nodes(1)
    client = C.get_client()
    assert_raises(X.arakoon_client.ArakoonException, client.set, key, value)
Esempio n. 3
0
    def getClientConfig (self, clusterName, configName = None):
        """
        Adds an Arakoon client to the configuration.
        @type clusterName: string
        @param clusterName: the name of the cluster for which you want to add a client
        @type configName: optional string
        @param configName: the name of the client configuration for this cluster
        """
        fn = '/'.join([X.cfgDir, 'arakoonclients'])
        p = X.getConfig(fn)
        if not p.has_section(clusterName):
            p.add_section(clusterName)
            cfgDir = '/'.join([X.cfgDir, "qconfig", "arakoon", clusterName])
            p.set(clusterName, "path", cfgDir)
            X.writeConfig(p, fn)

        cfgFile = self._getConfig(clusterName, configName)
        return ArakoonClientExtConfig(clusterName, cfgFile)
Esempio n. 4
0
    def getClientConfig(self, clusterName, configName=None):
        """
        Adds an Arakoon client to the configuration.
        @type clusterName: string
        @param clusterName: the name of the cluster for which you want to add a client
        @type configName: optional string
        @param configName: the name of the client configuration for this cluster
        """
        fn = '/'.join([X.cfgDir, 'arakoonclients'])
        p = X.getConfig(fn)
        if not p.has_section(clusterName):
            p.add_section(clusterName)
            cfgDir = '/'.join([X.cfgDir, "qconfig", "arakoon", clusterName])
            p.set(clusterName, "path", cfgDir)
            X.writeConfig(p, fn)

        cfgFile = self._getConfig(clusterName, configName)
        return ArakoonClientExtConfig(clusterName, cfgFile)
Esempio n. 5
0
    def addNode(self, name, ip, clientPort):
        """
        Add a node to the client configuration

        @param name: the name of the node
        @param ip: the ip  the node
        @param clientPort: the port of the node
        """
        self.__validateName(name)

        if isinstance(ip, basestring):
            ip = [ip]

        clusterId = self._clusterId
        inifile_path = self._configPath

        config = X.getConfig(inifile_path)

        if not config.has_section("global"):
            config.add_section("global")
            config.set("global", "cluster_id", clusterId)
            config.set("global", "cluster", "")

        nodes = self.__getNodes(config)

        if name in nodes:
            raise Exception("There is already a node with name %s configured" %
                            name)

        nodes.append(name)
        config.add_section(name)
        config.set(name, "ip", ', '.join(ip))
        config.set(name, "client_port", clientPort)

        config.set("global", "cluster", ",".join(nodes))

        X.writeConfig(config, inifile_path)
Esempio n. 6
0
    def addNode(self, name, ip, clientPort):
        """
        Add a node to the client configuration

        @param name: the name of the node
        @param ip: the ip  the node
        @param clientPort: the port of the node
        """
        self.__validateName(name)

        if isinstance(ip, basestring):
            ip = [ip]

        clusterId = self._clusterId
        inifile_path = self._configPath

        config = X.getConfig(inifile_path)

        if not config.has_section("global"):
            config.add_section("global")
            config.set("global", "cluster_id", clusterId)
            config.set("global","cluster", "")

        nodes = self.__getNodes(config)

        if name in nodes:
            raise Exception("There is already a node with name %s configured" % name)

        nodes.append(name)
        config.add_section(name)
        config.set(name, "ip", ', '.join(ip))
        config.set(name, "client_port", clientPort)

        config.set("global","cluster", ",".join(nodes))

        X.writeConfig(config,inifile_path)
def setup_n_nodes_base(c_id, node_names, force_master,
                       base_dir, base_msg_port, base_client_port,
                       extra = None, witness_nodes = False, useIPV6=False,
                       slowCollapser = False):

    X.subprocess.check_call("sudo /sbin/iptables -F".split(' ') )

    cluster = _getCluster( c_id )
    cluster.tearDown()

    cluster = _getCluster(c_id)
    logging.info( "Creating data base dir %s" % base_dir )
    X.createDir ( base_dir )

    n = len(node_names)
    ip = "127.0.0.1"
    if useIPV6:
        ip = "::1"

    for i in range (n) :
        is_witness = witness_nodes & (i % 2 != 0)
        nodeName = node_names[ i ]
        (db_dir,log_dir,tlf_dir,head_dir) = build_node_dir_names( nodeName )
        if slowCollapser and (i % 2 == 1):
            collapseSlowdown = 3
        else:
            collapseSlowdown = None
        cluster.addNode(name=nodeName,
                        ip = ip,
                        clientPort = base_client_port+i,
                        messagingPort = base_msg_port+i,
                        logDir = log_dir,
                        home = db_dir,
                        tlfDir = tlf_dir,
                        headDir = head_dir,
                        isWitness = is_witness,
                        collapseSlowdown = collapseSlowdown)

        cluster.addLocalNode(nodeName)
        cluster.createDirs(nodeName)

    cluster.disableFsync()

    if force_master:
        logging.info( "Forcing master to %s", node_names[0] )
        cluster.forceMaster(node_names[0] )
    else :
        logging.info( "Using master election" )
        cluster.forceMaster(None )

    config = cluster._getConfigFile()
    for i in range (n):
        nodeName = node_names[ i ]
        config.set(nodeName, '__tainted_fsync_tlog_dir', 'false')

    #
    #
    #
    if extra :
        logging.info("EXTRA!")
        for k,v in extra.items():
            logging.info("%s -> %s", k, v)
            config.set("global", k, v)

    fn = cluster._getConfigFileName()
    X.writeConfig(config, fn)


    logging.info( "Creating client config" )
    regenerateClientConfig( c_id )

    logging.info( "Changing log level to debug for all nodes" )
    cluster.setLogLevel("debug")

    lease = int(lease_duration)
    logging.info( "Setting lease expiration to %d" % lease)
    cluster.setMasterLease( lease )
Esempio n. 8
0
 def setUp(self):  #pylint: disable-msg=C0103
     self.cluster = ArakoonManagement.ArakoonCluster('tls_test')
     config = self.cluster._getConfigFile()  #pylint: disable-msg=W0212
     config.add_section('global')
     config.set('global', 'cluster_id', 'tls_test')
     X.writeConfig(config, self.cluster._getConfigFileName())