예제 #1
0
class RemoteClientTestCase(unittest.TestCase):
    def setUp(self):
        with patch("clustermgr.core.remote.SSHClient") as mock_client:
            self.sshclient = mock_client.return_value
            # self.sshclient.open_sftp.return_value = MagicMock(name="sftp")
            self.rc = RemoteClient('server')
            self.rc.startup()

    @patch('clustermgr.core.remote.SSHClient')
    def test_starup_falls_back_to_ip(self, mock_client):
        instance = mock_client.return_value
        instance.connect = MagicMock(side_effect=[SSHException, None])
        RemoteClient('server', ip='0.0.0.0').startup()
        instance.connect.assert_called_with('0.0.0.0',
                                            port=22,
                                            username='******')

    def test_download_calls_sftpclient_get(self):
        rv = self.rc.download('remote', 'local')
        self.rc.sftpclient.get.assert_called_with('remote', 'local')
        assert "successful" in rv

    def test_download_returns_errors_when_get_throws_errors(self):
        self.rc.sftpclient.get.side_effect = OSError
        rv = self.rc.download('remote', 'local_1')
        self.assertIn('Error: Local', rv)

        self.rc.sftpclient.get.side_effect = IOError
        rv = self.rc.download('remote', 'local_2')
        self.assertIn('Error: Remote', rv)

    def test_upload_calls_sftpclient_put(self):
        rv = self.rc.upload('local', 'remote')
        self.rc.sftpclient.put.assert_called_with('local', 'remote')
        assert "successful" in rv

    def test_upload_returns_errors_when_put_throws_errors(self):
        self.rc.sftpclient.put.side_effect = IOError
        rv = self.rc.upload('local', 'remote')
        self.assertIn('Error: Remote', rv)

        self.rc.sftpclient.put.side_effect = OSError
        rv = self.rc.upload('local_2', 'remote_2')
        self.assertIn('Error: Local', rv)

    def test_upload_and_download_throws_error_if_sftpclient_is_none(self):
        self.rc.sftpclient = None
        with self.assertRaises(ClientNotSetupException):
            self.rc.upload('s', 't')
        with self.assertRaises(ClientNotSetupException):
            self.rc.download('s', 't')

    def test_exists_and_run_throws_error_if_client_is_none(self):
        self.rc.client = None
        with self.assertRaises(ClientNotSetupException):
            self.rc.exists('s')
        with self.assertRaises(ClientNotSetupException):
            self.rc.run('s')
예제 #2
0
파일: cache.py 프로젝트: quanah/cluster-mgr
def setup_redis_cluster(tid):
    servers = Server.query.filter(Server.redis.is_(True)).filter(
        Server.stunnel.is_(True)).all()
    appconf = AppConfiguration.query.first()

    master_conf = [
        "port 7000", "cluster-enabled yes",
        "cluster-config-file nodes_7000.conf", "cluster-node-timeout 5000",
        "appendonly yes"
    ]
    slave_conf = [
        "port 7001", "cluster-enabled yes",
        "cluster-config-file nodes_7001.conf", "cluster-node-timeout 5000",
        "appendonly yes"
    ]
    for server in servers:
        rc = RemoteClient(server.hostname, ip=server.ip)
        try:
            rc.startup()
            wlogger.log(tid,
                        "Connecting to server ...",
                        "success",
                        server_id=server.id)
        except Exception as e:
            wlogger.log(tid,
                        "Could not connect to the server over SSH. Error:"
                        "{0}\nRedis configuration failed.".format(e),
                        "error",
                        server_id=server.id)
            continue

        chdir = '/'
        if server.gluu_server:
            chdir = "/opt/gluu-server-{0}".format(appconf.gluu_version)
        # upload the conf files
        wlogger.log(tid,
                    "Uploading redis conf files...",
                    "debug",
                    server_id=server.id)
        rc.put_file(os.path.join(chdir, "etc/redis/redis_7000.conf"),
                    "\n".join(master_conf))
        rc.put_file(os.path.join(chdir, "etc/redis/redis_7001.conf"),
                    "\n".join(slave_conf))
        # upload the modified init.d file
        rc.upload(
            os.path.join(app.root_path, "templates", "redis", "redis-server"),
            os.path.join(chdir, "etc/init.d/redis-server"))
        wlogger.log(tid,
                    "Configuration upload complete.",
                    "success",
                    server_id=server.id)

    return True
예제 #3
0
def install_cache_components(self, method):
    """Celery task that installs the redis, stunnel and twemproxy applications
    in the required servers.

    Redis and stunnel are installed in all the servers in the cluster.
    Twemproxy is installed in the load-balancer/proxy server

    :param self: the celery task
    :param method: either STANDALONE, SHARDED

    :return: the number of servers where both stunnel and redis were installed
        successfully
    """
    tid = self.request.id
    installed = 0
    servers = Server.query.all()
    for server in servers:
        wlogger.log(tid, "Installing Redis in server {0}".format(
            server.hostname), "info", server_id=server.id)
        ri = RedisInstaller(server, tid)
        redis_installed = ri.install()
        if redis_installed:
            server.redis = True
            wlogger.log(tid, "Redis install successful", "success",
                        server_id=server.id)
        else:
            server.redis = False
            wlogger.log(tid, "Redis install failed", "fail",
                        server_id=server.id)

        wlogger.log(tid, "Installing Stunnel", "info", server_id=server.id)
        si = StunnelInstaller(server, tid)
        stunnel_installed = si.install()
        if stunnel_installed:
            server.stunnel = True
            wlogger.log(tid, "Stunnel install successful", "success",
                        server_id=server.id)
        else:
            server.stunnel = False
            wlogger.log(tid, "Stunnel install failed", "fail",
                        server_id=server.id)
        # Save the redis and stunnel install situation to the db
        db.session.commit()

        if redis_installed and stunnel_installed:
            installed += 1

    if method != 'STANDALONE':
        # No need to install twemproxy for "SHARDED" configuration
        return True

    # Install twemproxy in the Nginx load balancing proxy server
    app_conf = AppConfiguration.query.first()
    host = app_conf.nginx_host
    rc = RemoteClient(host)
    try:
        rc.startup()
    except Exception as e:
        wlogger.log(tid, "Could not connect to {0}".format(e), "error")
        return False

    server_os = get_os_type(rc)

    mock_server = Server()
    mock_server.hostname = host
    wlogger.log(tid, "Installing Stunnel in proxy server")
    si = StunnelInstaller(mock_server, tid)
    stunnel_installed = si.install()
    if stunnel_installed:
        wlogger.log(tid, "Stunnel install successful", "success")
    else:
        wlogger.log(tid, "Stunnel install failed", "fail")

    wlogger.log(tid, "Cluster manager will now try to build Twemproxy")
    # 1. Setup the development tools for installation
    if server_os in ["Ubuntu 16", "Ubuntu 14"]:
        run_and_log(rc, "apt-get update", tid)
        run_and_log(rc, "apt-get install -y build-essential autoconf libtool",
                    tid)
    elif server_os in ["CentOS 6", "CentOS 7", "RHEL 7"]:
        run_and_log(rc, "yum install -y wget", tid)
        run_and_log(rc, "yum groupinstall -y 'Development tools'", tid)

    if server_os == "CentOS 6":
        run_and_log(rc, "wget http://ftp.gnu.org/gnu/autoconf/autoconf-2.69.tar.gz",
                    tid)
        run_and_log(rc, "tar xvfvz autoconf-2.69.tar.gz", tid)
        run_and_log(rc, "cd autoconf-2.69 && ./configure", tid)
        run_and_log(rc, "cd autoconf-2.69 && make", tid)
        run_and_log(rc, "cd autoconf-2.69 && make install", tid)

    # 2. Get the source, build & install the nutcracker binaries
    run_and_log(rc, "wget https://github.com/twitter/twemproxy/archive/v0.4.1.tar.gz",
                tid)
    run_and_log(rc, "tar -xf v0.4.1.tar.gz", tid)
    run_and_log(rc, "cd twemproxy-0.4.1", tid)
    run_and_log(rc, "cd twemproxy-0.4.1 && autoreconf -fvi", tid)
    run_and_log(rc, "cd twemproxy-0.4.1 && ./configure --prefix=/usr", tid)
    run_and_log(rc, "cd twemproxy-0.4.1 && make", tid)
    run_and_log(rc, "cd twemproxy-0.4.1 && make install", tid)

    # 3. Post installation - setup user and logging
    run_and_log(rc, "useradd nutcracker", tid)
    run_and_log(rc, "mkdir /var/log/nutcracker", tid)
    run_and_log(rc, "touch /var/log/nutcracker/nutcracker.log", tid)
    run_and_log(rc, "chown -R nutcracker:nutcracker /var/log/nutcracker",
                tid)
    logrotate_conf = ["/var/log/nutcracker/nutcracker*.log {", "\tweekly",
                      "\tmissingok", "\trotate 12", "\tcompress",
                      "\tnotifempty", "}"]
    rc.put_file("/etc/logrotate.d/nutcracker", "\n".join(logrotate_conf))

    # 4. Add init/service scripts to run nutcracker as a service
    if server_os in ["Ubuntu 16", "CentOS 7", "RHEL 7"]:
        local = os.path.join(app.root_path, "templates", "twemproxy",
                             "twemproxy.service")
        remote = "/lib/systemd/system/nutcracker.service"
        rc.upload(local, remote)
        run_and_log(rc, "systemctl enable nutcracker", tid, None)
    elif server_os == "Ubuntu 14":
        local = os.path.join(app.root_path, "templates", "twemproxy",
                             "nutcracker.init")
        remote = "/etc/init.d/nutcracker"
        rc.upload(local, remote)
        run_and_log(rc, 'chmod +x /etc/init.d/nutcracker', tid)
        run_and_log(rc, "update-rc.d nutcracker defaults", tid)
    elif server_os == "CentOS 6":
        local = os.path.join(app.root_path, "templates", "twemproxy",
                             "nutcracker.centos.init")
        remote = "/etc/rc.d/init.d/nutcracker"
        rc.upload(local, remote)
        run_and_log(rc, "chmod +x /etc/init.d/nutcracker", tid)
        run_and_log(rc, "chkconfig --add nutcracker", tid)
        run_and_log(rc, "chkconfig nutcracker on", tid)

    # 5. Create the default configuration file referenced in the init scripts
    run_and_log(rc, "mkdir -p /etc/nutcracker", tid)
    run_and_log(rc, "touch /etc/nutcracker/nutcracker.yml", tid)

    rc.close()
    return installed
예제 #4
0
def _rotate_keys(kr, javalibs_dir, jks_path):
    pub_keys = []
    openid_jks_pass = random_chars()

    if kr.type == "oxeleven":
        token = decrypt_text(kr.oxeleven_token, kr.oxeleven_token_key,
                             kr.oxeleven_token_iv)

        try:
            # delete old keys first
            print "deleting old keys"
            for key_id in OxelevenKeyID.query:
                status_code, out = delete_key(kr.oxeleven_url, key_id.kid,
                                              token)
                if status_code == 200 and out["deleted"]:
                    db.session.delete(key_id)
                    db.session.commit()
                elif status_code == 401:
                    print "insufficient access to call oxEleven API"

            # obtain new keys
            print "obtaining new keys"
            for algo in ["RS256", "RS384", "RS512", "ES256", "ES384", "ES512"]:
                status_code, out = generate_key(kr.oxeleven_url,
                                                algo,
                                                token=token)
                if status_code == 200:
                    key_id = OxelevenKeyID()
                    key_id.kid = out["kid"]
                    db.session.add(key_id)
                    db.session.commit()
                    pub_keys.append(out)
                elif status_code == 401:
                    print "insufficient access to call oxEleven API"
                else:
                    print "unable to obtain the keys from oxEleven; " \
                        "status code={}".format(status_code)
        except requests.exceptions.ConnectionError:
            print "unable to establish connection to oxEleven; skipping task"
    else:
        out, err, retcode = generate_jks(
            openid_jks_pass,
            javalibs_dir,
            jks_path,
        )
        if retcode == 0:
            json_out = json.loads(out)
            pub_keys = json_out["keys"]
        else:
            print err

    # update LDAP entry
    if pub_keys and modify_oxauth_config(kr, pub_keys, openid_jks_pass):
        print "pub keys has been updated"
        kr.rotated_at = datetime.utcnow()
        db.session.add(kr)
        db.session.commit()

        if kr.type == "jks":
            from clustermgr.core.remote import RemoteClient
            for server in Server.query:
                c = RemoteClient(server.hostname)
                try:
                    c.startup()
                except Exception:
                    print "Couldn't connect to server %s. Can't copy JKS" % server.hostname
                    continue
                c.upload(jks_path, server.jks_path)
                c.close()
예제 #5
0
파일: cache.py 프로젝트: quanah/cluster-mgr
def setup_sharded(tid):
    servers = Server.query.filter(Server.redis.is_(True)).filter(
        Server.stunnel.is_(True)).all()
    appconf = AppConfiguration.query.first()
    # Store the redis server info in the LDAP
    ports_count = len(servers) - 1
    for server in servers:
        wlogger.log(tid,
                    "Updating oxCacheConfiguration ...",
                    "debug",
                    server_id=server.id)

        server_string = ",".join(
            ['localhost:6379'] +
            ["localhost:700{0}".format(i) for i in xrange(ports_count)])

        try:
            dbm = DBManager(
                server.hostname,
                1636,
                server.ldap_password,
                ssl=True,
                ip=server.ip,
            )
        except Exception as e:
            wlogger.log(tid,
                        "Couldn't connect to LDAP. Error: {0}".format(e),
                        "error",
                        server_id=server.id)
            wlogger.log(tid, "Make sure your LDAP server is listening to "
                        "connections from outside",
                        "debug",
                        server_id=server.id)
            continue
        entry = dbm.get_appliance_attributes('oxCacheConfiguration')
        cache_conf = json.loads(entry.oxCacheConfiguration.value)
        cache_conf['cacheProviderType'] = 'REDIS'
        cache_conf['redisConfiguration']['redisProviderType'] = 'SHARDED'
        cache_conf['redisConfiguration']['servers'] = server_string

        result = dbm.set_applicance_attribute('oxCacheConfiguration',
                                              [json.dumps(cache_conf)])
        if not result:
            wlogger.log(tid,
                        "oxCacheConfigutaion update failed",
                        "fail",
                        server_id=server.id)
            continue

        # generate stunnel configuration and upload it to the server
        wlogger.log(tid, "Setting up stunnel", "info", server_id=server.id)
        chdir = '/'
        if server.gluu_server:
            chdir = "/opt/gluu-server-{0}".format(appconf.gluu_version)

        rc = RemoteClient(server.hostname, ip=server.ip)
        try:
            rc.startup()
        except:
            wlogger.log(tid, "Could not connect to the server over SSH. "
                        "Stunnel setup failed.",
                        "error",
                        server_id=server.id)
            continue

        wlogger.log(tid,
                    "Enable stunnel start on system boot",
                    "debug",
                    server_id=server.id)
        # replace the /etc/default/stunnel4 to enable start on system startup
        local = os.path.join(app.root_path, 'templates', 'stunnel',
                             'stunnel4.default')
        remote = os.path.join(chdir, 'etc/default/stunnel4')
        rc.upload(local, remote)

        # setup the certificate file
        wlogger.log(tid,
                    "Generating certificate for stunnel ...",
                    "debug",
                    server_id=server.id)
        prop_buffer = StringIO()
        propsfile = os.path.join(chdir, "install", "community-edition-setup",
                                 "setup.properties.last")
        rc.sftpclient.getfo(propsfile, prop_buffer)
        prop_buffer.seek(0)
        props = dict()
        prop_in = lambda line: line.split("=")[1].strip()
        for line in prop_buffer:
            if re.match('^countryCode', line):
                props['country'] = prop_in(line)
            if re.match('^state', line):
                props['state'] = prop_in(line)
            if re.match('^city', line):
                props['city'] = prop_in(line)
            if re.match('^orgName', line):
                props['org'] = prop_in(line)
            if re.match('^hostname', line):
                props['cn'] = prop_in(line)
            if re.match('^admin_email', line):
                props['email'] = prop_in(line)

        subject = "'/C={country}/ST={state}/L={city}/O={org}/CN={cn}" \
                  "/emailAddress={email}'".format(**props)
        cert_path = os.path.join(chdir, "etc", "stunnel", "server.crt")
        key_path = os.path.join(chdir, "etc", "stunnel", "server.key")
        pem_path = os.path.join(chdir, "etc", "stunnel", "cert.pem")
        cmd = [
            "/usr/bin/openssl", "req", "-subj", subject, "-new", "-newkey",
            "rsa:2048", "-sha256", "-days", "365", "-nodes", "-x509",
            "-keyout", key_path, "-out", cert_path
        ]
        cin, cout, cerr = rc.run(" ".join(cmd))
        rc.run("cat {cert} {key} > {pem}".format(cert=cert_path,
                                                 key=key_path,
                                                 pem=pem_path))
        # verify certificate
        cin, cout, cerr = rc.run("/usr/bin/openssl verify " + pem_path)
        if props['cn'] in cout and props['org'] in cout:
            wlogger.log(tid,
                        "Certificate generated successfully",
                        "success",
                        server_id=server.id)
        else:
            wlogger.log(tid, "Certificate generation failed. Add a SSL "
                        "certificate at /etc/stunnel/cert.pem",
                        "error",
                        server_id=server.id)

        # Generate stunnel config
        wlogger.log(tid,
                    "Setup stunnel listening and forwarding",
                    "debug",
                    server_id=server.id)
        sconf = [
            "cert = /etc/stunnel/cert.pem", "pid = /var/run/stunnel.pid",
            "[redis-server]", "client = no",
            "accept = {0}:7777".format(server.ip), "connect = 127.0.0.1:6379"
        ]
        listen_port = 7000
        for s in servers:
            if s.id != server.id:
                sconf.append("[client{0}]".format(s.id))
                sconf.append("client = yes")
                sconf.append("accept = 127.0.0.1:{0}".format(listen_port))
                sconf.append("connect = {0}:7777".format(s.ip))
                listen_port += 1

        rc.put_file(os.path.join(chdir, "etc/stunnel/redis-gluu.conf"),
                    "\n".join(sconf))
    return True
예제 #6
0
def install_monitoring(self):
    """Celery task that installs monitoring components to remote server.

    :param self: the celery task

    :return: wether monitoring were installed successfully
    """

    tid = self.request.id
    installed = 0
    servers = Server.query.all()
    app_config = AppConfiguration.query.first()

    for server in servers:
        # 1. Make SSH Connection to the remote server
        wlogger.log(tid,
                    "Making SSH connection to the server {0}".format(
                        server.hostname),
                    "info",
                    server_id=server.id)

        c = RemoteClient(server.hostname, ip=server.ip)
        try:
            c.startup()
        except Exception as e:
            wlogger.log(tid,
                        "Cannot establish SSH connection {0}".format(e),
                        "warning",
                        server_id=server.id)
            wlogger.log(tid,
                        "Ending server setup process.",
                        "error",
                        server_id=server.id)
            return False

        # 2. create monitoring directory
        result = c.run('mkdir -p /var/monitoring/scripts')

        ctext = "\n".join(result)
        if ctext.strip():
            wlogger.log(tid, ctext, "debug", server_id=server.id)

        wlogger.log(tid, "Directory /var/monitoring/scripts directory "
                    "was created",
                    "success",
                    server_id=server.id)

        # 3. Upload scripts

        scripts = ('cron_data_sqtile.py', 'get_data.py',
                   'sqlite_monitoring_tables.py')

        for scr in scripts:

            local_file = os.path.join(app.root_path, 'monitoring_scripts', scr)

            remote_file = '/var/monitoring/scripts/' + scr

            result = c.upload(local_file, remote_file)

            if result.startswith("Upload successful"):
                wlogger.log(tid,
                            "File {} was uploaded".format(scr),
                            "success",
                            server_id=server.id)
            else:
                wlogger.log(tid,
                            "File {} could not "
                            "be uploaded: {}".format(scr, result),
                            "error",
                            server_id=server.id)
                return False

        # 4. Upload gluu version, no need to determine gluu version each time

        result = c.put_file('/var/monitoring/scripts/gluu_version.txt',
                            app_config.gluu_version)

        # 5. Upload crontab entry to collect data in every 5 minutes
        crontab_entry = ('*/5 * * * *    root    python '
                         '/var/monitoring/scripts/cron_data_sqtile.py\n')

        result = c.put_file('/etc/cron.d/monitoring', crontab_entry)

        if not result[0]:
            wlogger.log(tid,
                        "An errorr occurred while uploading crontab entry"
                        ": {}".format(result[1]),
                        "error",
                        server_id=server.id)
        else:
            wlogger.log(tid,
                        "Crontab entry was uploaded",
                        "success",
                        server_id=server.id)

        # 6. Installing packages.
        # 6a. First determine commands for each OS type
        if ('CentOS' in server.os) or ('RHEL' in server.os):
            package_cmd = [
                'yum install -y epel-release', 'yum repolist',
                'yum install -y gcc', 'yum install -y python-devel',
                'yum install -y python-pip', 'service crond restart'
            ]

        else:
            package_cmd = [
                'DEBIAN_FRONTEND=noninteractive apt-get install -y gcc',
                'DEBIAN_FRONTEND=noninteractive apt-get install -y python-dev',
                'DEBIAN_FRONTEND=noninteractive apt-get install -y python-pip',
                'service cron restart',
            ]
        # 6b. These commands are common for all OS types
        package_cmd += [
            'pip install ldap3', 'pip install psutil', 'pip install pyDes',
            'python /var/monitoring/scripts/'
            'sqlite_monitoring_tables.py'
        ]
        # 6c. Executing commands
        wlogger.log(tid,
                    "Installing Packages and Running Commands",
                    "info",
                    server_id=server.id)

        for cmd in package_cmd:

            result = c.run(cmd)

            wlogger.log(tid, "\n".join(result), "debug", server_id=server.id)

            err = False

            if result[2].strip():
                print "Writing error", cmd
                if not ("pip install --upgrade pip" in result[2]
                        or 'Redirecting to /bin/systemctl' in result[2]):
                    wlogger.log(tid,
                                "An error occurrued while executing "
                                "{}: {}".format(cmd, result[2]),
                                "error",
                                server_id=server.id)
                    err = True

            if not err:
                wlogger.log(tid,
                            "Command was run successfully: {}".format(cmd),
                            "success",
                            server_id=server.id)
        server.monitoring = True

    db.session.commit()
    return True