Ejemplo n.º 1
0
    def configure_general_environment(self, cluster_context, instances=None):
        LOG.debug('Executing post configure hooks')

        if not instances:
            instances = cluster_context.get_instances()

        def set_user_password(instance):
            LOG.debug('Setting password for user "mapr"')
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command(
                        'echo "%s:%s"|chpasswd' % ('mapr', 'mapr'),
                        run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        def create_home_mapr(instance):
            target_path = '/home/mapr'
            LOG.debug("Creating home directory for user 'mapr'")
            args = {'path': target_path}
            cmd = 'mkdir -p %(path)s && chown mapr:mapr %(path)s' % args
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command(cmd, run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        util.execute_on_instances(instances, set_user_password)
        util.execute_on_instances(instances, create_home_mapr)
Ejemplo n.º 2
0
    def configure_general_environment(self, cluster_context, instances=None):
        LOG.debug('Executing post configure hooks')
        mapr_user_pass = pu.get_mapr_password(cluster_context.cluster)

        if not instances:
            instances = cluster_context.get_instances()

        def set_user_password(instance):
            LOG.debug('Setting password for user "%s"' % pu.MAPR_USER_NAME)
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command(
                        'echo "%s:%s"|chpasswd' %
                        (pu.MAPR_USER_NAME, mapr_user_pass),
                        run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        def create_home_mapr(instance):
            target_path = '/home/%s' % pu.MAPR_USER_NAME
            LOG.debug("Creating home directory for user '%s'" %
                      pu.MAPR_USER_NAME)
            args = {'path': target_path,
                    'user': pu.MAPR_USER_NAME,
                    'group': _MAPR_GROUP_NAME}
            cmd = ('mkdir -p %(path)s && chown %(user)s:%(group)s %(path)s'
                   % args)
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command(cmd, run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        util.execute_on_instances(instances, set_user_password)
        util.execute_on_instances(instances, create_home_mapr)
    def configure_general_environment(self, cluster_context, instances=None):
        LOG.debug('Executing post configure hooks')
        mapr_user_pass = pu.get_mapr_password(cluster_context.cluster)

        if not instances:
            instances = cluster_context.get_instances()

        def set_user_password(instance):
            LOG.debug('Setting password for user "mapr"')
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command('echo "%s:%s"|chpasswd' %
                                      ('mapr', mapr_user_pass),
                                      run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        def create_home_mapr(instance):
            target_path = '/home/mapr'
            LOG.debug("Creating home directory for user 'mapr'")
            args = {'path': target_path, 'user': '******', 'group': 'mapr'}
            cmd = ('mkdir -p %(path)s && chown %(user)s:%(group)s %(path)s' %
                   args)
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command(cmd, run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        util.execute_on_instances(instances, set_user_password)
        util.execute_on_instances(instances, create_home_mapr)
Ejemplo n.º 4
0
    def _install_security_repos(self, cluster_context, instances):
        LOG.debug("Installing security repos")

        @el.provision_event()
        def install_security_repos(instance):
            return util.run_script(instance, ADD_SECURITY_REPO_SCRIPT, "root")

        util.execute_on_instances(instances, install_security_repos)
Ejemplo n.º 5
0
    def _install_security_repos(self, cluster_context, instances):
        LOG.debug("Installing security repos")

        @el.provision_event()
        def install_security_repos(instance):
            return util.run_script(instance, ADD_SECURITY_REPO_SCRIPT, "root")

        util.execute_on_instances(instances, install_security_repos)
Ejemplo n.º 6
0
 def _install_ssh_keys(self, cluster_context, instances):
     slaves = cluster_context.filter_instances(instances, SPARK_SLAVE)
     masters = cluster_context.filter_instances(instances, SPARK_MASTER)
     instances = g.unique_list(masters + slaves)
     private_key = cluster_context.cluster.management_private_key
     public_key = cluster_context.cluster.management_public_key
     g.execute_on_instances(
         instances, g.install_ssh_key, 'mapr', private_key, public_key)
     g.execute_on_instances(instances, g.authorize_key, 'mapr', public_key)
Ejemplo n.º 7
0
 def _install_ssh_keys(self, cluster_context, instances):
     slaves = cluster_context.filter_instances(instances, SPARK_SLAVE)
     masters = cluster_context.filter_instances(instances, SPARK_MASTER)
     instances = g.unique_list(masters + slaves)
     private_key = cluster_context.cluster.management_private_key
     public_key = cluster_context.cluster.management_public_key
     g.execute_on_instances(
         instances, g.install_ssh_key, 'mapr', private_key, public_key)
     g.execute_on_instances(instances, g.authorize_key, 'mapr', public_key)
Ejemplo n.º 8
0
    def _install_mapr_repo(self, cluster_context, instances):
        distro_name = cluster_context.distro.name

        @el.provision_event()
        def install_mapr_repos(instance):
            return util.run_script(instance, ADD_MAPR_REPO_SCRIPT, "root",
                                   distro_name, **cluster_context.mapr_repos)

        util.execute_on_instances(instances, install_mapr_repos)
Ejemplo n.º 9
0
    def _configure_ssh_connection(self, cluster_context, instances):
        def keep_alive_connection(instance):
            echo_param = 'echo "KeepAlive yes" >> ~/.ssh/config'
            echo_timeout = 'echo "ServerAliveInterval 60" >> ~/.ssh/config'
            with instance.remote() as r:
                r.execute_command(echo_param)
                r.execute_command(echo_timeout)

        util.execute_on_instances(instances, keep_alive_connection)
Ejemplo n.º 10
0
    def _configure_ssh_connection(self, cluster_context, instances):
        def keep_alive_connection(instance):
            echo_param = 'echo "KeepAlive yes" >> ~/.ssh/config'
            echo_timeout = 'echo "ServerAliveInterval 60" >> ~/.ssh/config'
            with instance.remote() as r:
                r.execute_command(echo_param)
                r.execute_command(echo_timeout)

        util.execute_on_instances(instances, keep_alive_connection)
Ejemplo n.º 11
0
    def _install_mapr_repo(self, cluster_context, instances):
        distro_name = cluster_context.distro.name

        @el.provision_event()
        def install_mapr_repos(instance):
            return util.run_script(instance, ADD_MAPR_REPO_SCRIPT, "root",
                                   distro_name, **cluster_context.mapr_repos)

        util.execute_on_instances(instances, install_mapr_repos)
Ejemplo n.º 12
0
    def _install_java(self, cluster_context, instances):
        LOG.debug("Installing Java")
        distro_name = cluster_context.distro.name

        @el.provision_event()
        def install_java(instance):
            return util.run_script(instance, INSTALL_JAVA_SCRIPT, "root",
                                   distro_name)

        util.execute_on_instances(instances, install_java)
Ejemplo n.º 13
0
    def _install_mysql_client(self, cluster_context, instances):
        LOG.debug("Installing MySQL client")
        distro_name = cluster_context.distro.name

        @el.provision_event()
        def install_mysql_client(instance):
            return util.run_script(instance, INSTALL_MYSQL_CLIENT, "root",
                                   distro_name)

        util.execute_on_instances(instances, install_mysql_client)
Ejemplo n.º 14
0
    def _install_mysql_client(self, cluster_context, instances):
        LOG.debug("Installing MySQL client")
        distro_name = cluster_context.distro.name

        @el.provision_event()
        def install_mysql_client(instance):
            return util.run_script(instance, INSTALL_MYSQL_CLIENT,
                                   "root", distro_name)

        util.execute_on_instances(instances, install_mysql_client)
Ejemplo n.º 15
0
    def _install_java(self, cluster_context, instances):
        LOG.debug("Installing Java")
        distro_name = cluster_context.distro.name

        @el.provision_event()
        def install_java(instance):
            return util.run_script(instance, INSTALL_JAVA_SCRIPT,
                                   "root", distro_name)

        util.execute_on_instances(instances, install_java)
Ejemplo n.º 16
0
    def _set_cluster_mode(self, cluster_context, instances):
        cluster_mode = cluster_context.cluster_mode
        if not cluster_mode:
            return

        command = "maprcli cluster mapreduce set -mode %s" % cluster_mode

        @el.provision_event()
        def set_cluster_mode(instance):
            return util.execute_command([instance], command, run_as="mapr")

        util.execute_on_instances(instances, set_cluster_mode)
Ejemplo n.º 17
0
    def _set_cluster_mode(self, cluster_context, instances):
        cluster_mode = cluster_context.cluster_mode
        if not cluster_mode:
            return

        command = "maprcli cluster mapreduce set -mode %s" % cluster_mode

        @el.provision_event()
        def set_cluster_mode(instance):
            return util.execute_command([instance], command, run_as="mapr")

        util.execute_on_instances(instances, set_cluster_mode)
Ejemplo n.º 18
0
    def _wait_for_status(self, instances, status, sleep=3, timeout=60):
        def poll_status(instance):
            operation_name = _('Wait for {node_process} on {instance}'
                               ' to change status to "{status}"')
            args = {
                'node_process': self.ui_name,
                'instance': instance.instance_name,
                'status': status.name,
            }
            return polls.poll(
                get_status=lambda: self.status(instance) == status,
                operation_name=operation_name.format(**args),
                timeout=timeout,
                sleep=sleep,
            )

        util.execute_on_instances(instances, poll_status)
Ejemplo n.º 19
0
    def _write_config_files(self, cluster_context, instances):
        LOG.debug('Writing config files')

        @el.provision_event()
        def write_config_files(instance, config_files):
            for file in config_files:
                util.write_file(instance, file.path, file.data, mode=file.mode,
                                owner="mapr")

        node_groups = util.unique_list(instances, lambda i: i.node_group)
        for node_group in node_groups:
            config_files = cluster_context.get_config_files(node_group)
            ng_instances = [i for i in node_group.instances if i in instances]
            util.execute_on_instances(ng_instances, write_config_files,
                                      config_files=config_files)

        LOG.debug("Config files are successfully written")
Ejemplo n.º 20
0
    def _write_config_files(self, cluster_context, instances):
        LOG.debug('Writing config files')

        @el.provision_event()
        def write_config_files(instance, config_files):
            for path, data in six.iteritems(config_files):
                util.mkdir(instance, os.path.dirname(path), owner="root")
                util.write_file(instance, path, data, owner="root")

        node_groups = util.unique_list(instances, lambda i: i.node_group)
        for node_group in node_groups:
            config_files = cluster_context.get_config_files(node_group)
            ng_instances = [i for i in node_group.instances if i in instances]
            util.execute_on_instances(ng_instances, write_config_files,
                                      config_files=config_files)

        LOG.debug("Config files are successfully written")
Ejemplo n.º 21
0
    def _wait_for_status(self, instances, status, sleep=3, timeout=60):
        def poll_status(instance):
            operation_name = _('Wait for {node_process} on {instance}'
                               ' to change status to "{status}"')
            args = {
                'node_process': self.ui_name,
                'instance': instance.instance_name,
                'status': status.name,
            }
            return polls.poll(
                get_status=lambda: self.status(instance) == status,
                operation_name=operation_name.format(**args),
                timeout=timeout,
                sleep=sleep,
            )

        util.execute_on_instances(instances, poll_status)
Ejemplo n.º 22
0
    def _configure_topology(self, context, instances):
        def write_file(instance, path, data):
            with instance.remote() as r:
                r.write_file_to(path, data, run_as_root=True)

        LOG.debug("Configuring cluster topology")
        is_node_aware = context.is_node_aware
        if is_node_aware:
            topo = th.generate_topology_map(context.cluster, is_node_aware)
            topo = "\n".join(["%s %s" % i for i in six.iteritems(topo)]) + "\n"
            data_path = "%s/topology.data" % context.mapr_home
            script = files.get_file_text(_TOPO_SCRIPT)
            script_path = "%s/topology.sh" % context.mapr_home
            util.execute_on_instances(instances, write_file, data_path, topo)
            util.execute_on_instances(instances, util.write_file, script_path, script, "+x", "root")
        else:
            LOG.debug("Data locality is disabled.")
        LOG.info(_LI("Cluster topology successfully configured"))
Ejemplo n.º 23
0
    def _configure_topology(self, context, instances):
        def write_file(instance, path, data):
            with instance.remote() as r:
                r.write_file_to(path, data, run_as_root=True)

        LOG.debug('Configuring cluster topology')
        is_node_aware = context.is_node_aware
        if is_node_aware:
            topo = th.generate_topology_map(context.cluster, is_node_aware)
            topo = '\n'.join(['%s %s' % i for i in six.iteritems(topo)]) + '\n'
            data_path = '%s/topology.data' % context.mapr_home
            script = files.get_file_text(_TOPO_SCRIPT)
            script_path = '%s/topology.sh' % context.mapr_home
            util.execute_on_instances(instances, write_file, data_path, topo)
            util.execute_on_instances(
                instances, util.write_file, script_path, script, '+x', 'root')
        else:
            LOG.debug('Data locality is disabled.')
        LOG.info(_LI('Cluster topology successfully configured'))
Ejemplo n.º 24
0
    def _configure_topology(self, context, instances):
        def write_file(instance, path, data):
            with instance.remote() as r:
                r.write_file_to(path, data, run_as_root=True)

        LOG.debug('Configuring cluster topology')
        is_node_aware = context.is_node_aware
        if is_node_aware:
            topo = th.generate_topology_map(context.cluster, is_node_aware)
            topo = '\n'.join(['%s %s' % i for i in six.iteritems(topo)]) + '\n'
            data_path = '%s/topology.data' % context.mapr_home
            script = files.get_file_text(_TOPO_SCRIPT)
            script_path = '%s/topology.sh' % context.mapr_home
            util.execute_on_instances(instances, write_file, data_path, topo)
            util.execute_on_instances(
                instances, util.write_file, script_path, script, '+x', 'root')
        else:
            LOG.debug('Data locality is disabled.')
        LOG.info(_LI('Cluster topology successfully configured'))
Ejemplo n.º 25
0
    def _configure_topology(self, context, instances):
        LOG.debug("Configuring cluster topology")

        topology_map = context.topology_map
        topology_map = ("%s %s" % item for item in six.iteritems(topology_map))
        topology_map = "\n".join(topology_map) + "\n"

        data_path = "%s/topology.data" % context.mapr_home
        script = files.get_file_text(_TOPO_SCRIPT)
        script_path = '%s/topology.sh' % context.mapr_home

        @el.provision_event()
        def write_topology_data(instance):
            util.write_file(instance, data_path, topology_map, owner="root")
            util.write_file(instance, script_path, script,
                            mode="+x", owner="root")

        util.execute_on_instances(instances, write_topology_data)

        LOG.info(_LI('Cluster topology successfully configured'))
Ejemplo n.º 26
0
    def _configure_topology(self, context, instances):
        LOG.debug("Configuring cluster topology")

        topology_map = context.topology_map
        topology_map = ("%s %s" % item for item in six.iteritems(topology_map))
        topology_map = "\n".join(topology_map) + "\n"

        data_path = "%s/topology.data" % context.mapr_home
        script = files.get_file_text(_TOPO_SCRIPT)
        script_path = '%s/topology.sh' % context.mapr_home

        @el.provision_event()
        def write_topology_data(instance):
            util.write_file(instance, data_path, topology_map, owner="root")
            util.write_file(instance, script_path, script,
                            mode="+x", owner="root")

        util.execute_on_instances(instances, write_topology_data)

        LOG.info(_LI('Cluster topology successfully configured'))
    def _write_config_files(self, cluster_context, instances):
        LOG.debug('Writing config files')

        @el.provision_event()
        def write_config_files(instance, config_files):
            for file in config_files:
                util.write_file(instance,
                                file.path,
                                file.data,
                                mode=file.mode,
                                owner="mapr")

        node_groups = util.unique_list(instances, lambda i: i.node_group)
        for node_group in node_groups:
            config_files = cluster_context.get_config_files(node_group)
            ng_instances = [i for i in node_group.instances if i in instances]
            util.execute_on_instances(ng_instances,
                                      write_config_files,
                                      config_files=config_files)

        LOG.debug("Config files are successfully written")
Ejemplo n.º 28
0
    def configure_general_environment(self, cluster_context, instances=None):
        LOG.debug('Executing post configure hooks')

        if not instances:
            instances = cluster_context.get_instances()

        def create_user(instance):
            return util.run_script(instance, ADD_MAPR_USER, "root")

        def set_user_password(instance):
            LOG.debug('Setting password for user "mapr"')
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command('echo "%s:%s"|chpasswd' %
                                      ('mapr', 'mapr'),
                                      run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        def create_home_mapr(instance):
            target_path = '/home/mapr'
            LOG.debug("Creating home directory for user 'mapr'")
            args = {'path': target_path}
            cmd = 'mkdir -p %(path)s && chown mapr:mapr %(path)s' % args
            if self.mapr_user_exists(instance):
                with instance.remote() as r:
                    r.execute_command(cmd, run_as_root=True)
            else:
                LOG.warning(_LW('User "mapr" does not exists'))

        util.execute_on_instances(instances, create_user)
        util.execute_on_instances(instances, set_user_password)
        util.execute_on_instances(instances, create_home_mapr)
Ejemplo n.º 29
0
    def _prepare_bare_image(self, cluster_context, instances):
        LOG.debug("Preparing bare image")

        if d.UBUNTU == cluster_context.distro:
            LOG.debug("Installing security repos")
            util.execute_on_instances(instances, util.run_script, ADD_SECURITY_REPO_SCRIPT, "root")

        d_name = cluster_context.distro.name

        LOG.debug("Installing Java")
        util.execute_on_instances(instances, util.run_script, INSTALL_JAVA_SCRIPT, "root", d_name)
        LOG.debug("Installing Scala")
        util.execute_on_instances(instances, util.run_script, INSTALL_SCALA_SCRIPT, "root", d_name)
        LOG.debug("Installing MySQL client")
        util.execute_on_instances(instances, util.run_script, INSTALL_MYSQL_CLIENT, "root", d_name)
        LOG.debug("Bare images successfully prepared")
Ejemplo n.º 30
0
    def _prepare_bare_image(self, cluster_context, instances):
        LOG.debug('Preparing bare image')
        d_name = cluster_context.distro.name

        LOG.debug('Installing Java')
        util.execute_on_instances(
            instances, util.run_script, INSTALL_JAVA_SCRIPT, 'root', d_name)
        LOG.debug('Installing Scala')
        util.execute_on_instances(
            instances, util.run_script, INSTALL_SCALA_SCRIPT, 'root', d_name)
        LOG.debug('Installing MySQL client')
        util.execute_on_instances(
            instances, util.run_script, INSTALL_MYSQL_CLIENT, 'root', d_name)
        LOG.debug('Bare images successfully prepared')
Ejemplo n.º 31
0
    def _prepare_bare_image(self, cluster_context, instances):
        LOG.debug('Preparing bare image')
        d_name = cluster_context.distro.name

        LOG.debug('Installing Java')
        util.execute_on_instances(
            instances, util.run_script, INSTALL_JAVA_SCRIPT, 'root', d_name)
        LOG.debug('Installing Scala')
        util.execute_on_instances(
            instances, util.run_script, INSTALL_SCALA_SCRIPT, 'root', d_name)
        LOG.debug('Installing MySQL client')
        util.execute_on_instances(
            instances, util.run_script, INSTALL_MYSQL_CLIENT, 'root', d_name)
        LOG.debug('Bare images successfully prepared')
Ejemplo n.º 32
0
 def _install(_context, _instances):
     g.execute_on_instances(_instances,
                            self._install_packages_on_instance,
                            _context)
Ejemplo n.º 33
0
 def _install_ui(self, cluster_context, instances):
     OOZIE.stop(filter(OOZIE.is_started, instances))
     g.execute_on_instances(
         instances, self._rebuild_oozie_war, cluster_context)
     OOZIE.start(instances)
Ejemplo n.º 34
0
 def _rebuild(self, cluster_context, instances):
     OOZIE.stop(filter(OOZIE.is_started, instances))
     g.execute_on_instances(
         instances, self._rebuild_oozie_war, cluster_context)
     OOZIE.start(instances)
     con.sleep(OOZIE_START_DELAY)
Ejemplo n.º 35
0
 def _install(_context, _instances):
     g.execute_on_instances(_instances,
                            self._install_packages_on_instance,
                            _context)
Ejemplo n.º 36
0
 def install(self, cluster_context, instances):
     g.execute_on_instances(instances, self._install_packages_on_instance,
                            cluster_context)
Ejemplo n.º 37
0
 def _rebuild(self, cluster_context, instances):
     OOZIE.stop(filter(OOZIE.is_started, instances))
     g.execute_on_instances(instances, self._rebuild_oozie_war,
                            cluster_context)
     OOZIE.start(instances)
     context.sleep(OOZIE_START_DELAY)
Ejemplo n.º 38
0
 def _install_mapr_repo(self, cluster_context, instances):
     d_name = cluster_context.distro.name
     util.execute_on_instances(
         instances, util.run_script, ADD_MAPR_REPO_SCRIPT, 'root', d_name,
         **cluster_context.mapr_repos)
Ejemplo n.º 39
0
 def _install_ui(self, cluster_context, instances):
     OOZIE.stop(filter(OOZIE.is_started, instances))
     g.execute_on_instances(
         instances, self._rebuild_oozie_war, cluster_context)
     OOZIE.start(instances)
Ejemplo n.º 40
0
 def _install_mapr_repo(self, cluster_context, instances):
     d_name = cluster_context.distro.name
     util.execute_on_instances(
         instances, util.run_script, ADD_MAPR_REPO_SCRIPT, 'root', d_name,
         **cluster_context.mapr_repos)