Esempio n. 1
0
    def move_mysqldir_to(self, storage_path):
        LOG.info('Moving mysql dir to %s' % storage_path)
        for directive, dirname in (
                        ('mysqld/log_bin', os.path.join(storage_path,STORAGE_BINLOG)),
                        ('mysqld/datadir', os.path.join(storage_path,STORAGE_DATA_DIR) + '/')
                        ):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info('No need to move %s to %s: already in place.' % (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug('directive %s:%s' % (directive, raw_value))
                if raw_value:
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug('source path: %s' % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        selinuxenabled = software.which('selinuxenabled')
                        if selinuxenabled:
                            if not system2((selinuxenabled, ), raise_exc=False)[2]:
                                if not system2((software.which('getsebool'), 'mysqld_disable_trans'), raise_exc=False)[2]:
                                    LOG.debug('Make SELinux rule for rsync')
                                    system2((software.which('setsebool'), '-P', 'mysqld_disable_trans', '1'))

                        LOG.info('Copying mysql directory \'%s\' to \'%s\'', src_dir, dest)
                        rsync(src_dir, dest, archive=True, exclude=['ib_logfile*', '*.sock'])

            self.my_cnf.set(directive, dirname)
            chown_r(dest, "mysql", "mysql")
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Esempio n. 2
0
    def on_host_init_response(self, message):
        global_variables = message.body.get('global_variables') or []
        for kv in global_variables:
            self._global_variables[kv['name']] = kv['value'].encode(
                'utf-8') if kv['value'] else ''

        if 'chef' in message.body and message.body['chef']:
            if linux.os.windows_family:
                self._chef_client_bin = r'C:\opscode\chef\bin\chef-client.bat'
                self._chef_solo_bin = r'C:\opscode\chef\bin\chef-solo.bat'
            else:
                # Workaround for 'chef' behavior enabled, but chef not installed
                self._chef_client_bin = which('chef-client')
                self._chef_solo_bin = which('chef-solo')

            self._chef_data = message.chef.copy()
            if not self._chef_data.get('node_name'):
                self._chef_data['node_name'] = self.get_node_name()

            self._with_json_attributes = self._chef_data.get(
                'json_attributes', {}) or {}
            if self._with_json_attributes:
                self._with_json_attributes = json.loads(
                    self._with_json_attributes)

            self._run_list = self._chef_data.get('run_list')
            if self._run_list:
                self._with_json_attributes['run_list'] = json.loads(
                    self._run_list)
            elif self._chef_data.get('role'):
                self._with_json_attributes['run_list'] = [
                    "role[%s]" % self._chef_data['role']
                ]

            if linux.os.windows_family:
                # TODO: why not doing the same on linux?
                try:
                    # Set startup type to 'manual' for chef-client service
                    hscm = win32service.OpenSCManager(
                        None, None, win32service.SC_MANAGER_ALL_ACCESS)
                    try:
                        hs = win32serviceutil.SmartOpenService(
                            hscm, WIN_SERVICE_NAME,
                            win32service.SERVICE_ALL_ACCESS)
                        try:
                            snc = win32service.SERVICE_NO_CHANGE
                            # change only startup type
                            win32service.ChangeServiceConfig(
                                hs, snc, win32service.SERVICE_DEMAND_START,
                                snc, None, None, 0, None, None, None, None)
                        finally:
                            win32service.CloseServiceHandle(hs)
                    finally:
                        win32service.CloseServiceHandle(hscm)

                    win32serviceutil.StopService(WIN_SERVICE_NAME)

                except:
                    e = sys.exc_info()[1]
                    self._logger.warning('Could not stop chef service: %s' % e)
Esempio n. 3
0
def get_semanage():
    if linux.os['family'] == 'RedHat':
        semanage = software.which('semanage')
        if not semanage:
            mgr = pkgmgr.package_mgr()
            mgr.install('policycoreutils-python')
            semanage = software.which('semanage')
        return semanage
Esempio n. 4
0
def get_semanage():
    if linux.os['family'] == 'RedHat':
        semanage = software.which('semanage')
        if not semanage:
            mgr = pkgmgr.package_mgr()
            mgr.install('policycoreutils-python')
            semanage = software.which('semanage')
        return semanage
Esempio n. 5
0
    def on_host_init_response(self, message):
        global_variables = message.body.get("global_variables") or []
        for kv in global_variables:
            self._global_variables[kv["name"]] = kv["value"].encode("utf-8") if kv["value"] else ""

        if "chef" in message.body and message.body["chef"]:
            if linux.os.windows_family:
                self._chef_client_bin = r"C:\opscode\chef\bin\chef-client.bat"
                self._chef_solo_bin = r"C:\opscode\chef\bin\chef-solo.bat"
            else:
                # Workaround for 'chef' behavior enabled, but chef not installed
                self._chef_client_bin = which("chef-client")
                self._chef_solo_bin = which("chef-solo")

            self._chef_data = message.chef.copy()
            if not self._chef_data.get("node_name"):
                self._chef_data["node_name"] = self.get_node_name()

            self._with_json_attributes = self._chef_data.get("json_attributes", {}) or {}
            if self._with_json_attributes:
                self._with_json_attributes = json.loads(self._with_json_attributes)

            self._run_list = self._chef_data.get("run_list")
            if self._run_list:
                self._with_json_attributes["run_list"] = json.loads(self._run_list)
            elif self._chef_data.get("role"):
                self._with_json_attributes["run_list"] = ["role[%s]" % self._chef_data["role"]]

            if linux.os.windows_family:
                # TODO: why not doing the same on linux?
                try:
                    # Set startup type to 'manual' for chef-client service
                    hscm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
                    try:
                        hs = win32serviceutil.SmartOpenService(hscm, WIN_SERVICE_NAME, win32service.SERVICE_ALL_ACCESS)
                        try:
                            snc = win32service.SERVICE_NO_CHANGE
                            # change only startup type
                            win32service.ChangeServiceConfig(
                                hs, snc, win32service.SERVICE_DEMAND_START, snc, None, None, 0, None, None, None, None
                            )
                        finally:
                            win32service.CloseServiceHandle(hs)
                    finally:
                        win32service.CloseServiceHandle(hscm)

                    win32serviceutil.StopService(WIN_SERVICE_NAME)

                except:
                    e = sys.exc_info()[1]
                    self._logger.warning("Could not stop chef service: %s" % e)
Esempio n. 6
0
    def restore(self, queue, volume, download_finished):
        device_fp = open(volume.device, 'w')
        if which('pigz'):
            compress_cmd = [which('pigz'), '-d']
        else:
            compress_cmd = ['gzip', '-d']
        compressor = subprocess.Popen(compress_cmd, stdin=subprocess.PIPE, stdout=device_fp, stderr=subprocess.PIPE, close_fds=True)
        self.concat_chunks(queue, download_finished, compressor.stdin)

        compressor.stdin.close()

        ret_code = compressor.wait()
        if ret_code:
            raise StorageError('Snapshot decompression failed.')
Esempio n. 7
0
    def move_mysqldir_to(self, storage_path):
        LOG.info('Moving mysql dir to %s' % storage_path)
        for directive, dirname in (
            ('mysqld/log_bin', os.path.join(storage_path, STORAGE_BINLOG)),
            ('mysqld/datadir',
             os.path.join(storage_path, STORAGE_DATA_DIR) + '/')):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info('No need to move %s to %s: already in place.' %
                         (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug('directive %s:%s' % (directive, raw_value))
                if raw_value and node.__node__['platform'] != 'openstack':
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug('source path: %s' % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        selinuxenabled = software.which('selinuxenabled')
                        if selinuxenabled:
                            if not system2(
                                (selinuxenabled, ), raise_exc=False)[2]:
                                if not system2((software.which('getsebool'),
                                                'mysqld_disable_trans'),
                                               raise_exc=False)[2]:
                                    LOG.debug('Make SELinux rule for rsync')
                                    system2((software.which('setsebool'), '-P',
                                             'mysqld_disable_trans', '1'))
                                else:
                                    semanage = get_semanage()
                                    system2((semanage, 'fcontext', '-a', '-t',
                                             'bin_t', '/usr/bin/rsync'))
                                    system2((software.which('restorecon'),
                                             '-v', '/usr/bin/rsync'))

                        LOG.info('Copying mysql directory \'%s\' to \'%s\'',
                                 src_dir, dest)
                        rsync(src_dir,
                              dest,
                              archive=True,
                              exclude=['ib_logfile*', '*.sock'])

            self.my_cnf.set(directive, dirname)
            chown_r(dest, "mysql", "mysql")
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Esempio n. 8
0
    def _start_stop_reload(self, action):
        chef_client_bin = which('chef-client')
        if action == "start":
            if not self.running:
                # Stop default chef-client init script
                if os.path.exists(self._default_init_script):
                    system2((self._default_init_script, "stop"),
                            close_fds=True,
                            preexec_fn=os.setsid,
                            raise_exc=False)

                cmd = (chef_client_bin, '--daemonize', '--logfile',
                       '/var/log/chef-client.log', '--pid', PID_FILE)
                try:
                    out, err, rcode = system2(cmd,
                                              close_fds=True,
                                              preexec_fn=os.setsid,
                                              env=self._env)
                except PopenError, e:
                    raise initdv2.InitdError('Failed to start chef: %s' % e)

                if rcode:
                    msg = ('Chef failed to start daemonized. '
                           'Return code: %s\nOut:%s\nErr:%s')
                    raise initdv2.InitdError(msg % (rcode, out, err))
Esempio n. 9
0
    def restore(self, queue, volume, download_finished):
        tmp_mpoint = mkdtemp()
        volume.mount(tmp_mpoint)
        try:
            try:
                cmd1 = (which('pigz'), '-d')
            except LookupError:
                cmd1 = ('gzip', '-d')
            cmd2 = ('tar', 'px', '-C', tmp_mpoint)

            compressor = subprocess.Popen(cmd1,
                                          stdin=subprocess.PIPE,
                                          stdout=subprocess.PIPE,
                                          stderr=subprocess.PIPE,
                                          close_fds=True)
            tar = subprocess.Popen(cmd2,
                                   stdin=compressor.stdout,
                                   stdout=subprocess.PIPE,
                                   stderr=subprocess.PIPE,
                                   close_fds=True)
            self.concat_chunks(queue, download_finished, compressor.stdin)

            compressor.stdin.close()
            r_code = compressor.wait()
            if r_code:
                raise Exception('Archiver finished with return code %s' %
                                r_code)

            r_code = tar.wait()
            if r_code:
                raise Exception('Tar finished with return code %s' % r_code)
        finally:
            mount.umount(tmp_mpoint)
Esempio n. 10
0
    def start_nowait(self, *params, **keys):
        try:
            if not self._checked:
                self.check()
            if len(keys) == 1 and 'kwargs' in keys:
                keys = keys['kwargs']
                # Set locale
            if not 'env' in self.subprocess_kwds:
                self.subprocess_kwds['env'] = os.environ
                # Set en_US locale or C
            if not self.subprocess_kwds['env'].get('LANG'):
                default_locale = locale.getdefaultlocale()
                if default_locale == ('en_US', 'UTF-8'):
                    self.subprocess_kwds['env']['LANG'] = 'en_US'
                else:
                    self.subprocess_kwds['env']['LANG'] = 'C'

            cmd_args = self.prepare_args(*params, **keys)

            if not self.subprocess_kwds.get('shell') and not self.executable.startswith('/'):
                self.executable = software.which(self.executable)

            final_args = (self.executable,) + tuple(cmd_args)
            self._check_streams()
            if self.to_log:
                self.logger.debug('Starting subprocess. Args: %s' % ' '.join(final_args))

            popen = subprocess.Popen(final_args, **self.subprocess_kwds)
            process = Process(self.executable, popen)
            return process
        finally:
            for stream in ('stderr, stdout, stdin'):
                self.subprocess_kwds.pop(stream, None)
Esempio n. 11
0
 def selinux_enabled():
     selinuxenabled = software.which('selinuxenabled')
     if selinuxenabled:
         _, _, ret_code = system2((selinuxenabled, ),
                                  raise_exc=False)
         return 0 == ret_code
     # Consider it enabled by default
     return True
Esempio n. 12
0
 def __init__(self):
     try:
         self.bin_path = software.which('mysql-proxy')
     except LookupError:
         raise initdv2.InitdError("Mysql-proxy binary not found. Check your installation")
     version_str = system2((self.bin_path, '-V'))[0].splitlines()[0]
     self.version = tuple(map(int, version_str.split()[1].split('.')))
     self.sock = initdv2.SockParam(4040)
Esempio n. 13
0
    def restore(self, queue, volume, download_finished):
        device_fp = open(volume.device, 'w')
        if which('pigz'):
            compress_cmd = [which('pigz'), '-d']
        else:
            compress_cmd = ['gzip', '-d']
        compressor = subprocess.Popen(compress_cmd,
                                      stdin=subprocess.PIPE,
                                      stdout=device_fp,
                                      stderr=subprocess.PIPE,
                                      close_fds=True)
        self.concat_chunks(queue, download_finished, compressor.stdin)

        compressor.stdin.close()

        ret_code = compressor.wait()
        if ret_code:
            raise StorageError('Snapshot decompression failed.')
Esempio n. 14
0
def get_mx_records(email):
    out = system2('%s -t mx %s' % (which('host'), email.split('@')[-1]), shell=True)[0]
    mxs = [mx.split()[-1][:-1] if mx.endswith('.') else mx for mx in out.split('\n')]
    if '' in mxs: mxs.remove('')
    #from sets import Set
    #return list(Set(mxs))
    temp = {}
    for x in mxs: temp[x] = None
    return list(temp.keys())
Esempio n. 15
0
    def __init__(self):
        try:
            software.which('rabbitmqctl')
        except LookupError:
            raise HandlerError("Rabbitmqctl binary was not found. Check your installation.")

        bus.on("init", self.on_init)

        self._logger = logging.getLogger(__name__)
        self.rabbitmq = rabbitmq_svc.rabbitmq
        self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ)
        self._service_name = BEHAVIOUR
        self.on_reload()

        if 'ec2' == self.platform.name:
            self._logger.debug('Setting hostname_as_pubdns to 0')
            __ec2__ = __node__['ec2']
            __ec2__['hostname_as_pubdns'] = 0
Esempio n. 16
0
 def _change_selinux_ctx(self):
     try:
         chcon = software.which('chcon')
     except LookupError:
         return
     if disttool.is_redhat_based():
         LOG.debug('Changing SELinux file security context for new mysql datadir')
         system2((chcon, '-R', '-u', 'system_u', '-r',
                  'object_r', '-t', 'mysqld_db_t', os.path.dirname(__mysql__['storage_dir'])), raise_exc=False)
Esempio n. 17
0
	def on_reload(self):
		self._chef_client_bin = which('chef-client')
		self._chef_data = None
		self._client_conf_path = '/etc/chef/client.rb'
		self._validator_key_path = '/etc/chef/validation.pem'
		self._client_key_path = '/etc/chef/client.pem'
		self._json_attributes_path = '/etc/chef/first-run.json'
		self._with_json_attributes = False
		self._platform = bus.platform
Esempio n. 18
0
    def __init__(self):
        try:
            software.which('rabbitmqctl')
        except LookupError:
            raise HandlerError("Rabbitmqctl binary was not found. Check your installation.")

        bus.on("init", self.on_init)

        self._logger = logging.getLogger(__name__)
        self.rabbitmq = rabbitmq_svc.rabbitmq
        self.service = initdv2.lookup(BuiltinBehaviours.RABBITMQ)
        self._service_name = BEHAVIOUR
        self.on_reload()

        if 'ec2' == self.platform.name:
            self._logger.debug('Setting hostname_as_pubdns to 0')
            __ec2__ = __node__['ec2']
            __ec2__['hostname_as_pubdns'] = 0
Esempio n. 19
0
 def __init__(self):
     try:
         self.bin_path = software.which('mysql-proxy')
     except LookupError:
         raise initdv2.InitdError(
             "Mysql-proxy binary not found. Check your installation")
     version_str = system2((self.bin_path, '-V'))[0].splitlines()[0]
     self.version = tuple(map(int, version_str.split()[1].split('.')))
     self.sock = initdv2.SockParam(4040)
Esempio n. 20
0
 def on_reload(self):
     self._chef_client_bin = which('chef-client')
     self._chef_data = None
     self._client_conf_path = '/etc/chef/client.rb'
     self._validator_key_path = '/etc/chef/validation.pem'
     self._client_key_path = '/etc/chef/client.pem'
     self._json_attributes_path = '/etc/chef/first-run.json'
     self._with_json_attributes = False
     self._platform = bus.platform
     self._global_variables = {}
Esempio n. 21
0
    def snapshot(self, op, name):
        rebundle_dir = tempfile.mkdtemp()
        archive_path = ''
        try:
            pl = __node__['platform']
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            root_part_path = os.path.realpath('/dev/root')
            root_part_sysblock_path = glob.glob('/sys/block/*/%s' % os.path.basename(root_part_path))[0]
            root_device = '/dev/%s' % os.path.basename(os.path.dirname(root_part_sysblock_path))

            archive_name = '%s.tar.gz' % name.lower()
            archive_path = os.path.join(rebundle_dir, archive_name)

            self._prepare_software()

            gcimagebundle_bin = software.which('gcimagebundle')

            out, err, code = util.system2((gcimagebundle_bin,
                '-d', root_device,
                '-e', ','.join(self.exclude_dirs),
                '-o', rebundle_dir,
                '--output_file_name', archive_name), raise_exc=False)
            if code:
                raise ImageAPIError('Gcimagebundle util returned non-zero code %s. Stderr: %s' % (code, err))

            LOG.info('Uploading compressed image to cloud storage')
            tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time()))
            remote_path = 'gcs://%s/%s' % (tmp_bucket_name, archive_name)
            arch_size = os.stat(archive_path).st_size
            uploader = FileTransfer(src=archive_path, dst=remote_path)

            try:
                upload_result = uploader.run()
                if upload_result['failed']:
                    errors = [str(failed['exc_info'][1]) for failed in upload_result['failed']]
                    raise ImageAPIError('Image upload failed. Errors:\n%s' % '\n'.join(errors))
                assert arch_size == upload_result['completed'][0]['size']
            except:
                self._remove_bucket(tmp_bucket_name, archive_name, cloudstorage)
                raise

        finally:
            shutil.rmtree(rebundle_dir)
            if os.path.exists(archive_path):
                os.remove(archive_path)

        image_name = name.lower().replace('_', '-') + '-' + str(int(time.time()))
        self._register_image(image_name, tmp_bucket_name, archive_name, cloudstorage)

        return '%s/images/%s' % (proj_name, image_name)
Esempio n. 22
0
    def move_mysqldir_to(self, storage_path):
        LOG.info("Moving mysql dir to %s" % storage_path)
        for directive, dirname in (
            ("mysqld/log_bin", os.path.join(storage_path, STORAGE_BINLOG)),
            ("mysqld/datadir", os.path.join(storage_path, STORAGE_DATA_DIR) + "/"),
        ):

            dest = os.path.dirname(dirname)
            if os.path.isdir(dest):
                LOG.info("No need to move %s to %s: already in place." % (directive, dest))
            else:
                os.makedirs(dest)

                raw_value = self.my_cnf.get(directive)
                LOG.debug("directive %s:%s" % (directive, raw_value))
                if raw_value:
                    src_dir = os.path.dirname(raw_value + "/") + "/"
                    LOG.debug("source path: %s" % src_dir)
                    if os.path.isdir(src_dir) and src_dir != dest:
                        try:
                            if not system2((software.which("selinuxenabled"),), raise_exc=False)[2]:
                                if not system2((software.which("getsebool"), "mysqld_disable_trans"), raise_exc=False)[
                                    2
                                ]:
                                    LOG.debug("Make SELinux rule for rsync")
                                    system2((software.which("setsebool"), "-P", "mysqld_disable_trans", "1"))
                        except LookupError:
                            pass

                        LOG.info("Copying mysql directory '%s' to '%s'", src_dir, dest)
                        rsync = filetool.Rsync().archive()
                        rsync.source(src_dir).dest(dest).exclude(["ib_logfile*"])
                        system2(str(rsync), shell=True)
            self.my_cnf.set(directive, dirname)

            rchown("mysql", dest)
            # Adding rules to apparmor config
            if disttool.is_debian_based():
                _add_apparmor_rules(dest)
Esempio n. 23
0
def setup():
    global CONN, SQLITE3, SERVER_THREAD

    SQLITE3 = which('sqlite3')

    def creator():
        return sqlite3.Connection(database=DATABASE)

    t = sqlite_server.SQLiteServerThread(creator)
    t.setDaemon(True)
    t.start()
    sqlite_server.wait_for_server_thread(t)

    CONN = t.connection
Esempio n. 24
0
def get_mx_records(email):
    out = system2('%s -t mx %s' % (which('host'), email.split('@')[-1]),
                  shell=True)[0]
    mxs = [
        mx.split()[-1][:-1] if mx.endswith('.') else mx
        for mx in out.split('\n')
    ]
    if '' in mxs: mxs.remove('')
    #from sets import Set
    #return list(Set(mxs))
    temp = {}
    for x in mxs:
        temp[x] = None
    return list(temp.keys())
Esempio n. 25
0
    def __init__(self):
        __rabbitmq__['rabbitmqctl'] = software.which('rabbitmqctl')
        __rabbitmq__['rabbitmq-server'] = software.which('rabbitmq-server')
        # RabbitMQ from Ubuntu repo puts rabbitmq-plugins in non-obvious place
        __rabbitmq__['rabbitmq-plugins'] = software.which(
            'rabbitmq-plugins', '/usr/lib/rabbitmq/bin/')

        self._logger = logging.getLogger(__name__)
        try:
            self.version = software.rabbitmq_software_info().version
        except:
            self._logger.error("Can't install RabbitMQ")
            raise

        for dirname in os.listdir('/usr/lib/rabbitmq/lib/'):
            if dirname.startswith('rabbitmq_server'):
                self.plugin_dir = os.path.join('/usr/lib/rabbitmq/lib/',
                                               dirname, 'plugins')
                break
        else:
            raise Exception('RabbitMQ plugin directory not found')

        self.service = initdv2.lookup(SERVICE_NAME)
Esempio n. 26
0
def setup():
    global CONN, SQLITE3, SERVER_THREAD

    SQLITE3 = which('sqlite3')

    def creator():
        return sqlite3.Connection(database=DATABASE)

    t = sqlite_server.SQLiteServerThread(creator)
    t.setDaemon(True)
    t.start()
    sqlite_server.wait_for_server_thread(t)

    CONN = t.connection
Esempio n. 27
0
    def check(self):
        if not self.executable.startswith('/'):
            exec_paths = software.which(self.executable)
            exec_path = exec_paths[0] if exec_paths else None
        else:
            exec_path = self.executable

        if not exec_path or not os.access(exec_path, os.X_OK):
            if self.package:
                pkgmgr.installed(self.package)

            else:
                msg = 'Executable %s is not found, you should either ' \
                      'specify `package` attribute or install the software ' \
                      'manually' % self.executable
                raise linux.LinuxError(msg)
Esempio n. 28
0
    def on_host_init_response(self, message):
        global_variables = message.body.get('global_variables') or []
        for kv in global_variables:
            self._global_variables[kv['name']] = kv['value'].encode('utf-8') if kv['value'] else ''

        if 'chef' in message.body and message.body['chef']:
            if linux.os.windows_family:
                self._chef_client_bin = r'C:\opscode\chef\bin\chef-client.bat'
            else:
                self._chef_client_bin = which('chef-client')   # Workaround for 'chef' behavior enabled, but chef not installed

            self._chef_data = message.chef.copy()
            if not self._chef_data.get('node_name'):
                self._chef_data['node_name'] = self.get_node_name()
            self._daemonize = self._chef_data.get('daemonize')

            self._with_json_attributes = self._chef_data.get('json_attributes')
            self._with_json_attributes = json.loads(self._with_json_attributes) if self._with_json_attributes else {}

            self._run_list = self._chef_data.get('run_list')
            if self._run_list:
                self._with_json_attributes['run_list'] = self._run_list
            elif self._chef_data.get('role'):
                self._with_json_attributes['run_list'] = ["role[%s]" % self._chef_data['role']]

            if linux.os.windows_family:
                try:
                    # Set startup type to 'manual' for chef-client service
                    hscm = win32service.OpenSCManager(None, None, win32service.SC_MANAGER_ALL_ACCESS)
                    try:
                        hs = win32serviceutil.SmartOpenService(hscm, WIN_SERVICE_NAME, win32service.SERVICE_ALL_ACCESS)
                        try:
                            snc = win32service.SERVICE_NO_CHANGE
                            # change only startup type
                            win32service.ChangeServiceConfig(hs, snc, win32service.SERVICE_DEMAND_START,
                                                                snc, None, None, 0, None, None, None, None)
                        finally:
                            win32service.CloseServiceHandle(hs)
                    finally:
                        win32service.CloseServiceHandle(hscm)

                    win32serviceutil.StopService(WIN_SERVICE_NAME)

                except:
                    e = sys.exc_info()[1]
                    self._logger.warning('Could not stop chef service: %s' % e)
Esempio n. 29
0
    def _start_stop_reload(self, action):
        chef_client_bin = which("chef-client")
        if action == "start":
            if not self.running:
                # Stop default chef-client init script
                if os.path.exists(self._default_init_script):
                    system2((self._default_init_script, "stop"), close_fds=True, preexec_fn=os.setsid, raise_exc=False)

                cmd = (chef_client_bin, "--daemonize", "--logfile", "/var/log/chef-client.log", "--pid", PID_FILE)
                try:
                    out, err, rcode = system2(cmd, close_fds=True, preexec_fn=os.setsid, env=self._env)
                except PopenError, e:
                    raise initdv2.InitdError("Failed to start chef: %s" % e)

                if rcode:
                    msg = "Chef failed to start daemonized. " "Return code: %s\nOut:%s\nErr:%s"
                    raise initdv2.InitdError(msg % (rcode, out, err))
Esempio n. 30
0
    def move_to(self, dst):
        datadir = os.path.join(__postgresql__['storage_dir'], STORAGE_DATA_DIR)
        centos7 = "centos" in linux.os['name'].lower(
        ) and linux.os["release"].version[0] == 7

        if not os.path.exists(dst):
            LOG.debug("creating %s" % dst)
            os.makedirs(dst)

        for config in ['postgresql.conf', 'pg_ident.conf', 'pg_hba.conf']:
            old_config = os.path.join(self.path, config)
            new_config = os.path.join(dst, config)
            if os.path.exists(
                    old_config) and not os.path.islink(old_config):  #???
                LOG.debug('Moving %s' % config)
                shutil.move(old_config, new_config)
            elif os.path.exists(new_config):
                LOG.debug('%s is already in place. Skipping.' % config)
            else:
                raise BaseException('Postgresql config file not found: %s' %
                                    old_config)
            chown_r(new_config, DEFAULT_USER)
            if centos7:
                new_link = os.path.join(datadir, config)
                if os.path.exists(new_link) and os.path.isfile(new_link):
                    os.remove(new_link)
                    LOG.debug("Duplicate config %s removed." % new_link)
                os.symlink(new_config, new_link)
                chown_r(new_link, DEFAULT_USER)

        #the following block needs revision
        if centos7:
            self._systemd_change_pgdata(datadir)
            system2([software.which("systemctl"),
                     "daemon-reload"])  # [SCALARIZR-1627]
        else:
            self._patch_sysconfig(dst)

        self.path = dst

        LOG.debug("configuring pid")
        conf = PostgresqlConf.find(self)
        if not centos7:
            conf.pid_file = os.path.join(dst,
                                         'postmaster.pid')  # [SCALARIZR-1685]
Esempio n. 31
0
    def _innodb_recovery(self, storage_path=None):
        storage_path = storage_path or __mysql__['storage_dir']
        binlog_path     = os.path.join(storage_path, mysql_svc.STORAGE_BINLOG)
        data_dir = os.path.join(storage_path, mysql_svc.STORAGE_DATA_DIR),
        pid_file = os.path.join(storage_path, 'mysql.pid')
        socket_file = os.path.join(storage_path, 'mysql.sock')
        mysqld_safe_bin = software.which('mysqld_safe')

        LOG.info('Performing InnoDB recovery')
        mysqld_safe_cmd = (mysqld_safe_bin,
                '--socket=%s' % socket_file,
                '--pid-file=%s' % pid_file,
                '--datadir=%s' % data_dir,
                '--log-bin=%s' % binlog_path,
                '--skip-networking',
                '--skip-grant',
                '--bootstrap',
                '--skip-slave-start')
        system2(mysqld_safe_cmd, stdin="select 1;")
Esempio n. 32
0
    def _start_stop_reload(self, action):
        chef_client_bin = which('chef-client')
        if action == "start":
            if not self.running:
                # Stop default chef-client init script
                if os.path.exists(self._default_init_script):
                    system2(
                        (self._default_init_script, "stop"), 
                        close_fds=True, 
                        preexec_fn=os.setsid, 
                        raise_exc=False
                    )

                cmd = (chef_client_bin, '--daemonize', '--logfile', 
                        '/var/log/chef-client.log', '--pid', PID_FILE)
                out, err, rcode = system2(cmd, close_fds=True, 
                            preexec_fn=os.setsid, env=self._env,
                            stdout=open(os.devnull, 'w+'), 
                            stderr=open(os.devnull, 'w+'), 
                            raise_exc=False)
                if rcode == 255:
                    LOG.debug('chef-client daemon already started')
                elif rcode:
                    msg = (
                        'Chef failed to start daemonized. '
                        'Return code: %s\nOut:%s\nErr:%s'
                        )
                    raise initdv2.InitdError(msg % (rcode, out, err))

        elif action == "stop":
            if self.running:
                with open(self.pid_file) as f:
                    pid = int(f.read().strip())
                try:
                    os.getpgid(pid)
                except OSError:
                    os.remove(self.pid_file)
                else:
                    os.kill(pid, signal.SIGTERM)
Esempio n. 33
0
    def _start_stop_reload(self, action):
        chef_client_bin = which('chef-client')
        if action == "start":
            if not self.running:
                # Stop default chef-client init script
                if os.path.exists(self._default_init_script):
                    system2((self._default_init_script, "stop"),
                            close_fds=True,
                            preexec_fn=os.setsid,
                            raise_exc=False)

                cmd = (chef_client_bin, '--daemonize', '--logfile',
                       '/var/log/chef-client.log', '--pid', PID_FILE)
                out, err, rcode = system2(cmd,
                                          close_fds=True,
                                          preexec_fn=os.setsid,
                                          env=self._env,
                                          stdout=open(os.devnull, 'w+'),
                                          stderr=open(os.devnull, 'w+'),
                                          raise_exc=False)
                if rcode == 255:
                    LOG.debug('chef-client daemon already started')
                elif rcode:
                    msg = ('Chef failed to start daemonized. '
                           'Return code: %s\nOut:%s\nErr:%s')
                    raise initdv2.InitdError(msg % (rcode, out, err))

        elif action == "stop":
            if self.running:
                with open(self.pid_file) as f:
                    pid = int(f.read().strip())
                try:
                    os.getpgid(pid)
                except OSError:
                    os.remove(self.pid_file)
                else:
                    os.kill(pid, signal.SIGTERM)
Esempio n. 34
0
    def restore(self, queue, volume, download_finished):
        tmp_mpoint = mkdtemp()
        volume.mount(tmp_mpoint)
        try:
            try:
                cmd1 = (which('pigz'), '-d')
            except LookupError:
                cmd1 = ('gzip', '-d')
            cmd2 = ('tar', 'px', '-C', tmp_mpoint)

            compressor = subprocess.Popen(cmd1, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
            tar      = subprocess.Popen(cmd2, stdin=compressor.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
            self.concat_chunks(queue, download_finished, compressor.stdin)

            compressor.stdin.close()
            r_code = compressor.wait()
            if r_code:
                raise Exception('Archiver finished with return code %s' % r_code)

            r_code = tar.wait()
            if r_code:
                raise Exception('Tar finished with return code %s' % r_code)
        finally:
            mount.umount(tmp_mpoint)
Esempio n. 35
0
log_level :{2}
'''

if linux.os.windows_family:
    CLIENT_CONF_PATH = r'C:\chef\client.rb'
    VALIDATOR_KEY_PATH = r'C:\chef\validation.pem'
    CLIENT_KEY_PATH = r'C:\chef\client.pem'
    JSON_ATTRIBUTES_PATH = r'C:\chef\json_attributes.json'
    CHEF_CLIENT_BIN = r'C:\opscode\chef\bin\chef-client.bat'
    CHEF_SOLO_BIN = r'C:\opscode\chef\bin\chef-solo.bat'
else:
    CLIENT_CONF_PATH = '/etc/chef/client.rb'
    VALIDATOR_KEY_PATH = '/etc/chef/validation.pem'
    CLIENT_KEY_PATH = '/etc/chef/client.pem'
    JSON_ATTRIBUTES_PATH = '/etc/chef/json_attributes.json'
    CHEF_CLIENT_BIN = which('chef-client')
    CHEF_SOLO_BIN = which('chef-solo')

PID_FILE = '/var/run/chef-client.pid'


def extract_json_attributes(chef_data):
    """
    Extract json attributes dictionary from scalr formatted structure
    """
    try:
        json_attributes = json.loads(chef_data.get('json_attributes') or "{}")
    except ValueError, e:
        raise HandlerError(
            "Chef attributes is not a valid JSON: {0}".format(e))
Esempio n. 36
0
    def on_init(self):      
        #temporary fix for starting-after-rebundle issue
        if not os.path.exists(PG_SOCKET_DIR):
            os.makedirs(PG_SOCKET_DIR)
            chown_r(PG_SOCKET_DIR, 'postgres')
            
        bus.on("host_init_response", self.on_host_init_response)
        bus.on("before_host_up", self.on_before_host_up)
        bus.on("before_reboot_start", self.on_before_reboot_start)

        self._insert_iptables_rules()       

        if __node__['state'] == ScalarizrState.BOOTSTRAPPING:
            
            if disttool.is_redhat_based():      
                    
                checkmodule_path = software.which('checkmodule')
                semodule_package_path = software.which('semodule_package')
                semodule_path = software.which('semodule')
            
                if all((checkmodule_path, semodule_package_path, semodule_path)):
                    
                    with open('/tmp/sshkeygen.te', 'w') as fp:
                        fp.write(SSH_KEYGEN_SELINUX_MODULE)
                    
                    self._logger.debug('Compiling SELinux policy for ssh-keygen')
                    system2((checkmodule_path, '-M', '-m', '-o',
                             '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'), logger=self._logger)
                    
                    self._logger.debug('Building SELinux package for ssh-keygen')
                    system2((semodule_package_path, '-o', '/tmp/sshkeygen.pp',
                             '-m', '/tmp/sshkeygen.mod'), logger=self._logger)
                    
                    self._logger.debug('Loading ssh-keygen SELinux package')                    
                    system2((semodule_path, '-i', '/tmp/sshkeygen.pp'), logger=self._logger)


        if __node__['state'] == 'running':

            vol = storage2.volume(__postgresql__['volume'])
            vol.ensure(mount=True)
            
            self.postgresql.service.start()
            self.accept_all_clients()
            
            self._logger.debug("Checking presence of Scalr's PostgreSQL root user.")
            root_password = self.root_password
            
            if not self.postgresql.root_user.exists():
                self._logger.debug("Scalr's PostgreSQL root user does not exist. Recreating")
                self.postgresql.root_user = self.postgresql.create_linux_user(ROOT_USER, root_password)
            else:
                try:
                    self.postgresql.root_user.check_system_password(root_password)
                    self._logger.debug("Scalr's root PgSQL user is present. Password is correct.")              
                except ValueError:
                    self._logger.warning("Scalr's root PgSQL user was changed. Recreating.")
                    self.postgresql.root_user.change_system_password(root_password)
                    
            if self.is_replication_master:  
                #ALTER ROLE cannot be executed in a read-only transaction
                self._logger.debug("Checking password for pg_role scalr.")      
                if not self.postgresql.root_user.check_role_password(root_password):
                    LOG.warning("Scalr's root PgSQL role was changed. Recreating.")
                    self.postgresql.root_user.change_role_password(root_password)
Esempio n. 37
0
    def on_init(self):
        #temporary fix for starting-after-rebundle issue
        if not os.path.exists(PG_SOCKET_DIR):
            os.makedirs(PG_SOCKET_DIR)
            chown_r(PG_SOCKET_DIR, 'postgres')

        bus.on("host_init_response", self.on_host_init_response)
        bus.on("before_host_up", self.on_before_host_up)
        bus.on("before_reboot_start", self.on_before_reboot_start)

        self._insert_iptables_rules()

        if __node__['state'] == ScalarizrState.BOOTSTRAPPING:

            if linux.os.redhat_family:

                def selinux_enabled():
                    selinuxenabled = software.which('selinuxenabled')
                    if selinuxenabled:
                        _, _, ret_code = system2((selinuxenabled, ),
                                                 raise_exc=False)
                        return 0 == ret_code
                    # Consider it enabled by default
                    return True

                checkmodule_path = software.which('checkmodule')
                semodule_package_path = software.which('semodule_package')
                semodule_path = software.which('semodule')

                if all(
                    (checkmodule_path, semodule_package_path, semodule_path)):
                    if selinux_enabled():
                        with open('/tmp/sshkeygen.te', 'w') as fp:
                            fp.write(SSH_KEYGEN_SELINUX_MODULE)

                        self._logger.debug(
                            'Compiling SELinux policy for ssh-keygen')
                        system2((checkmodule_path, '-M', '-m', '-o',
                                 '/tmp/sshkeygen.mod', '/tmp/sshkeygen.te'),
                                logger=self._logger)

                        self._logger.debug(
                            'Building SELinux package for ssh-keygen')
                        system2(
                            (semodule_package_path, '-o', '/tmp/sshkeygen.pp',
                             '-m', '/tmp/sshkeygen.mod'),
                            logger=self._logger)

                        self._logger.debug(
                            'Loading ssh-keygen SELinux package')
                        system2((semodule_path, '-i', '/tmp/sshkeygen.pp'),
                                logger=self._logger)

        if __node__['state'] == 'running':

            vol = storage2.volume(__postgresql__['volume'])
            vol.ensure(mount=True)

            self.postgresql.service.start()
            self.accept_all_clients()

            self._logger.debug(
                "Checking presence of Scalr's PostgreSQL root user.")
            root_password = self.root_password

            if not self.postgresql.root_user.exists():
                self._logger.debug(
                    "Scalr's PostgreSQL root user does not exist. Recreating")
                self.postgresql.root_user = self.postgresql.create_linux_user(
                    ROOT_USER, root_password)
            else:
                try:
                    self.postgresql.root_user.check_system_password(
                        root_password)
                    self._logger.debug(
                        "Scalr's root PgSQL user is present. Password is correct."
                    )
                except ValueError:
                    self._logger.warning(
                        "Scalr's root PgSQL user was changed. Recreating.")
                    self.postgresql.root_user.change_system_password(
                        root_password)

            if self.is_replication_master:
                #ALTER ROLE cannot be executed in a read-only transaction
                self._logger.debug("Checking password for pg_role scalr.")
                if not self.postgresql.root_user.check_role_password(
                        root_password):
                    LOG.warning(
                        "Scalr's root PgSQL role was changed. Recreating.")
                    self.postgresql.root_user.change_role_password(
                        root_password)
Esempio n. 38
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            root_part_path = os.path.realpath('/dev/root')
            root_part_sysblock_path = glob.glob(
                '/sys/block/*/%s' % os.path.basename(root_part_path))[0]
            root_device = '/dev/%s' % os.path.basename(
                os.path.dirname(root_part_sysblock_path))

            arch_name = '%s.tar.gz' % self._role_name.lower()
            arch_path = os.path.join(rebundle_dir, arch_name)

            # update gcimagebundle
            try:
                pkgmgr.latest(self.gcimagebundle_pkg_name)
            except:
                e = sys.exc_info()[1]
                LOG.warn('Gcimagebundle update failed: %s' % e)

            if os_dist.redhat_family:
                semanage = software.which('semanage')
                if not semanage:
                    pkgmgr.installed('policycoreutils-python')
                    semanage = software.which('semanage')

                util.system2((semanage, 'permissive', '-a', 'rsync_t'))

            gc_img_bundle_bin = software.which('gcimagebundle')

            o, e, p = util.system2(
                (gc_img_bundle_bin, '-d', root_device, '-e', ','.join(
                    self.exclude_dirs), '-o', rebundle_dir,
                 '--output_file_name', arch_name),
                raise_exc=False)
            if p:
                raise HandlerError(
                    'Gcimagebundle util returned non-zero code %s. Stderr: %s'
                    % (p, e))

            try:
                LOG.info('Uploading compressed image to cloud storage')
                tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(
                    1, 1000000), int(time.time()))
                remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name)
                arch_size = os.stat(arch_path).st_size
                uploader = FileTransfer(src=arch_path, dst=remote_path)

                try:
                    upload_result = uploader.run()
                    if upload_result['failed']:
                        errors = [
                            str(failed['exc_info'][1])
                            for failed in upload_result['failed']
                        ]
                        raise HandlerError('Image upload failed. Errors:\n%s' %
                                           '\n'.join(errors))
                    assert arch_size == upload_result['completed'][0]['size']
                except:
                    with util.capture_exception(LOG):
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name,
                                    object=arch_name).execute()
                    cloudstorage.buckets().delete(
                        bucket=tmp_bucket_name).execute()
            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        goog_image_name = self._role_name.lower().replace(
            '_', '-') + '-' + str(int(time.time()))
        try:
            LOG.info('Registering new image %s' % goog_image_name)
            compute = pl.new_compute_client()

            image_url = 'http://storage.googleapis.com/%s/%s' % (
                tmp_bucket_name, arch_name)

            req_body = dict(name=goog_image_name,
                            sourceType='RAW',
                            rawDisk=dict(source=image_url))

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')

            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id,
                                                     operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False

            util.wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            try:
                objs = cloudstorage.objects()
                objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
                cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
            except:
                e = sys.exc_info()[1]
                LOG.error('Faled to remove image compressed source: %s' % e)

        return '%s/images/%s' % (proj_name, goog_image_name)
Esempio n. 39
0
import glob
import shutil
import logging
import functools
import resource


from scalarizr.config import BuiltinBehaviours
from scalarizr.services import BaseConfig, BaseService, lazy
from scalarizr.libs.metaconf import Configuration
from scalarizr.util import disttool, system2, PopenError, wait_until, initdv2, software, firstmatched
from scalarizr.linux.coreutils import chown_r
import pymongo


MONGOD = software.which("mongod")
MONGO_CLI = software.which("mongo")
MONGO_DUMP = software.which("mongodump")
MONGOS = software.which("mongos")

ROUTER_DEFAULT_PORT = 27017
ARBITER_DEFAULT_PORT = 27020
REPLICA_DEFAULT_PORT = 27018
CONFIG_SERVER_DEFAULT_PORT = 27019

SERVICE_NAME = BuiltinBehaviours.MONGODB
STORAGE_PATH = "/mnt/mongodb-storage"

LOG_DIR = glob.glob("/var/log/mongo*")[0]
LOG_PATH_DEFAULT = os.path.join(LOG_DIR, "mongodb.shardsrv.log")
DEFAULT_UBUNTU_DB_PATH = "/var/lib/mongodb"
Esempio n. 40
0
import time
import shutil
import logging
import functools
import resource
import pymongo

from scalarizr.config import BuiltinBehaviours
from scalarizr.services import BaseConfig, BaseService, lazy
from scalarizr.util import system2, \
                                PopenError, wait_until, initdv2, software, \
                                firstmatched
from scalarizr.linux.coreutils import chown_r
from scalarizr import linux

MONGOD = software.which('mongod')
MONGO_CLI = software.which('mongo')
MONGO_DUMP = software.which('mongodump')
MONGOS = software.which('mongos')

ROUTER_DEFAULT_PORT = 27017
ARBITER_DEFAULT_PORT = 27020
REPLICA_DEFAULT_PORT = 27018
CONFIG_SERVER_DEFAULT_PORT = 27019

SERVICE_NAME = BuiltinBehaviours.MONGODB
STORAGE_PATH = "/mnt/mongodb-storage"

DEFAULT_USER = '******' if linux.os.debian_family else 'mongod'
LOG_DIR = '/var/log/mongodb'
Esempio n. 41
0
from scalarizr.util.software import which
from scalarizr.linux import coreutils, pkgmgr
from scalarizr.storage import StorageError

logger = logging.getLogger(__name__)


class Lvm2Error(PopenError):
    pass


if not os.path.exists('/sbin/pvs'):
    pkgmgr.installed('lvm2')

try:
    PVS = which('pvs')
    VGS = which('vgs')
    LVS = which('lvs')

    PVSCAN = which('pvscan')
    PVCREATE = which('pvcreate')
    VGCREATE = which('vgcreate')
    LVCREATE = which('lvcreate')

    LVCHANGE = which('lvchange')
    VGCHANGE = which('vgchange')
    VGEXTEND = which('vgextend')
    VGREDUCE = which('vgreduce')
    VGCFGRESTORE = which('vgcfgrestore')

    PVREMOVE = which('pvremove')
Esempio n. 42
0
__rabbitmq__ = __node__['rabbitmq']

SERVICE_NAME = CNF_SECTION = BuiltinBehaviours.RABBITMQ
RABBIT_CFG_PATH = '/etc/rabbitmq/rabbitmq.config'
COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
RABBITMQ_ENV_CNF_PATH = '/etc/rabbitmq/rabbitmq-env.conf'
SCALR_USERNAME = '******'
NODE_HOSTNAME_TPL = 'rabbit@%s'


class NodeTypes:
    RAM = 'ram'
    DISK = 'disk'


RABBITMQCTL = software.which('rabbitmqctl')
RABBITMQ_SERVER = software.which('rabbitmq-server')

# RabbitMQ from ubuntu repo puts rabbitmq-plugins
# binary in non-obvious place

try:
    RABBITMQ_PLUGINS = software.which('rabbitmq-plugins')
except LookupError:
    possible_path = '/usr/lib/rabbitmq/bin/rabbitmq-plugins'

    if os.path.exists(possible_path):
        RABBITMQ_PLUGINS = possible_path
    else:
        raise
Esempio n. 43
0

log = logging.getLogger(__name__)

__rabbitmq__ = __node__['rabbitmq']

SERVICE_NAME = BuiltinBehaviours.RABBITMQ
RABBIT_CFG_PATH = '/etc/rabbitmq/rabbitmq.config'
RABBIT_HOME = '/var/lib/rabbitmq/'
COOKIE_PATH = os.path.join(RABBIT_HOME, '.erlang.cookie')
RABBITMQ_ENV_CNF_PATH = '/etc/rabbitmq/rabbitmq-env.conf'
SCALR_USERNAME = '******'
NODE_HOSTNAME_TPL = 'rabbit@%s'
RABBIT_HOSTNAME_TPL = 'rabbit-%s'

RABBITMQCTL = software.which('rabbitmqctl')
RABBITMQ_SERVER = software.which('rabbitmq-server')

class NodeTypes:
    RAM = 'ram'
    DISK = 'disk'

# RabbitMQ from ubuntu repo puts rabbitmq-plugins
# binary in non-obvious place
RABBITMQ_PLUGINS = software.which('rabbitmq-plugins', '/usr/lib/rabbitmq/bin/')

try:
    RABBITMQ_VERSION = software.rabbitmq_software_info().version
except:
    RABBITMQ_VERSION = (0, 0, 0)
Esempio n. 44
0
            util.wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            try:
                self._remove_bucket(bucket_name, archive_name, cloudstorage)
            except (Exception, BaseException), e:
                LOG.error('Faled to remove image compressed source: %s' % e)

    def _prepare_software(self):
        try:
            pkgmgr.latest(self.gcimagebundle_pkg_name)
        except (Exception, BaseException), e:
            LOG.warn('Gcimagebundle update failed: %s' % e)

        if os_dist.redhat_family:
            semanage = software.which('semanage')
            if not semanage:
                pkgmgr.installed('policycoreutils-python')
                semanage = software.which('semanage')

            util.system2((semanage, 'permissive', '-a', 'rsync_t'))

    def snapshot(self, op, name):
        rebundle_dir = tempfile.mkdtemp()
        archive_path = ''
        try:
            pl = __node__['platform']
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.get_storage_conn()
Esempio n. 45
0
    def rebundle(self):
        rebundle_dir = tempfile.mkdtemp()

        try:
            pl = bus.platform
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()

            root_part_path = os.path.realpath('/dev/root')
            root_part_sysblock_path = glob.glob('/sys/block/*/%s' % os.path.basename(root_part_path))[0]
            root_device = '/dev/%s' % os.path.basename(os.path.dirname(root_part_sysblock_path))

            arch_name = '%s.tar.gz' % self._role_name.lower()
            arch_path = os.path.join(rebundle_dir, arch_name)

            # update gcimagebundle
            try:
                pkgmgr.latest(self.gcimagebundle_pkg_name)
            except:
                e = sys.exc_info()[1]
                LOG.warn('Gcimagebundle update failed: %s' % e)

            if os_dist.redhat_family:
                semanage = software.which('semanage')
                if not semanage:
                    pkgmgr.installed('policycoreutils-python')
                    semanage = software.which('semanage')

                util.system2((semanage, 'permissive', '-a', 'rsync_t'))

            gc_img_bundle_bin = software.which('gcimagebundle')

            o, e, p = util.system2((gc_img_bundle_bin,
                        '-d', root_device,
                        '-e', ','.join(self.exclude_dirs),
                        '-o', rebundle_dir,
                        '--output_file_name', arch_name), raise_exc=False)
            if p:
                raise HandlerError('Gcimagebundle util returned non-zero code %s. Stderr: %s' % (p, e))


            try:
                LOG.info('Uploading compressed image to cloud storage')
                tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(1, 1000000), int(time.time()))
                remote_path = 'gcs://%s/%s' % (tmp_bucket_name, arch_name)
                arch_size = os.stat(arch_path).st_size
                uploader = FileTransfer(src=arch_path, dst=remote_path)

                try:
                    upload_result = uploader.run()
                    if upload_result['failed']:
                        errors =  [str(failed['exc_info'][1]) for failed in upload_result['failed']]
                        raise HandlerError('Image upload failed. Errors:\n%s' % '\n'.join(errors))
                    assert arch_size == upload_result['completed'][0]['size']
                except:
                    with util.capture_exception(LOG):
                        objs = cloudstorage.objects()
                        objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
                    cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
            finally:
                os.unlink(arch_path)

        finally:
            shutil.rmtree(rebundle_dir)

        goog_image_name = self._role_name.lower().replace('_', '-') + '-' + str(int(time.time()))
        try:
            LOG.info('Registering new image %s' % goog_image_name)
            compute = pl.new_compute_client()

            image_url = 'http://storage.googleapis.com/%s/%s' % (tmp_bucket_name, arch_name)

            req_body = dict(
                    name=goog_image_name,
                    sourceType='RAW',
                    rawDisk=dict(
                            source=image_url
                    )
            )

            req = compute.images().insert(project=proj_id, body=req_body)
            operation = req.execute()['name']

            LOG.info('Waiting for image to register')
            def image_is_ready():
                req = compute.globalOperations().get(project=proj_id, operation=operation)
                res = req.execute()
                if res['status'] == 'DONE':
                    if res.get('error'):
                        errors = []
                        for e in res['error']['errors']:
                            err_text = '%s: %s' % (e['code'], e['message'])
                            errors.append(err_text)
                        raise Exception('\n'.join(errors))
                    return True
                return False
            util.wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            try:
                objs = cloudstorage.objects()
                objs.delete(bucket=tmp_bucket_name, object=arch_name).execute()
                cloudstorage.buckets().delete(bucket=tmp_bucket_name).execute()
            except:
                e = sys.exc_info()[1]
                LOG.error('Faled to remove image compressed source: %s' % e)

        return '%s/images/%s' % (proj_name, goog_image_name)
Esempio n. 46
0
    def snapshot(self, op, name):
        rebundle_dir = tempfile.mkdtemp()
        archive_path = ''
        try:
            pl = __node__['platform']
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.get_storage_conn()

            root_part_path = None
            for d in coreutils.df():
                if d.mpoint == '/':
                    root_part_path = d.device
                    break
            else:
                raise ImageAPIError('Failed to find root volume')

            root_part_sysblock_path = glob.glob(
                '/sys/block/*/%s' % os.path.basename(root_part_path))[0]
            root_device = '/dev/%s' % os.path.basename(
                os.path.dirname(root_part_sysblock_path))

            archive_name = '%s.tar.gz' % name.lower()
            archive_path = os.path.join(rebundle_dir, archive_name)

            self._prepare_software()

            gcimagebundle_bin = software.which('gcimagebundle')

            out, err, code = util.system2(
                (gcimagebundle_bin, '-d', root_device, '-e', ','.join(
                    self.exclude_dirs), '-o', rebundle_dir,
                 '--output_file_name', archive_name),
                raise_exc=False)
            if code:
                raise ImageAPIError(
                    'Gcimagebundle util returned non-zero code %s. Stderr: %s'
                    % (code, err))

            LOG.info('Uploading compressed image to cloud storage')
            tmp_bucket_name = 'scalr-images-%s-%s' % (random.randint(
                1, 1000000), int(time.time()))
            remote_dir = 'gcs://%s' % tmp_bucket_name

            def progress_cb(progress):
                LOG.debug('Uploading {perc}%'.format(
                    perc=progress / os.path.getsize(archive_path)))

            uploader = largetransfer.Upload(archive_path,
                                            remote_dir,
                                            simple=True,
                                            progress_cb=progress_cb)
            uploader.apply_async()
            try:
                uploader.join()
            except:
                if uploader.error:
                    error = uploader.error[1]
                else:
                    error = sys.exc_info()[1]
                msg = 'Image upload failed. Error:\n{error}'
                msg = msg.format(error=error)
                self._remove_bucket(tmp_bucket_name, archive_name,
                                    cloudstorage)
                raise ImageAPIError(msg)
        finally:
            shutil.rmtree(rebundle_dir)
            if os.path.exists(archive_path):
                os.remove(archive_path)

        image_name = name.lower().replace('_', '-') + '-' + str(
            int(time.time()))
        self._register_image(image_name, tmp_bucket_name, archive_name,
                             cloudstorage)

        return '%s/images/%s' % (proj_name, image_name)
Esempio n. 47
0
from scalarizr.linux import coreutils, pkgmgr
from scalarizr.storage import StorageError


logger = logging.getLogger(__name__)


class Lvm2Error(PopenError):
    pass

if not os.path.exists('/sbin/pvs'):
    pkgmgr.installed('lvm2')


try:
    PVS = which('pvs')
    VGS = which('vgs')
    LVS = which('lvs')

    PVSCAN = which('pvscan')
    PVCREATE = which('pvcreate')
    VGCREATE = which('vgcreate')
    LVCREATE = which('lvcreate')

    LVCHANGE = which('lvchange')
    VGCHANGE = which('vgchange')
    VGEXTEND = which('vgextend')
    VGREDUCE = which('vgreduce')
    VGCFGRESTORE = which('vgcfgrestore')

    PVREMOVE = which('pvremove')
Esempio n. 48
0
    def _create(self, volume, snapshot, snap_lv, tranzit_path,  complete_cb):
        try:
            chunk_prefix = '%s.data' % snapshot.id
            snapshot.path = None
            snap_mpoint = mkdtemp()
            try:
                opts = []
                if volume.fstype == 'xfs':
                    opts += ['-o', 'nouuid,ro']
                mount.mount(snap_lv, snap_mpoint, *opts)
                tar_cmd = ['tar', 'cp', '-C', snap_mpoint, '.']

                if which('pigz'):
                    compress_cmd = [which('pigz'), '-5']
                else:
                    compress_cmd = ['gzip', '-5']

                self._logger.debug("Creating and compressing snapshot data.")
                tar = subprocess.Popen(tar_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
                compress = subprocess.Popen(compress_cmd, stdin=tar.stdout, stdout=subprocess.PIPE, stderr=subprocess.PIPE, close_fds=True)
                tar.stdout.close() # Allow tar to receive a SIGPIPE if compress exits.
                split = threading.Thread(target=self._split, name='split',
                                  args=(compress.stdout, tranzit_path, chunk_prefix, snapshot))
                split.start()

                uploaders = []
                for i in range(2):
                    uploader = threading.Thread(name="Uploader-%s" % i, target=self._uploader,
                                                                      args=(volume.snap_backend['path'], snapshot))
                    self._logger.debug("Starting uploader '%s'", uploader.getName())

                    uploader.start()
                    uploaders.append(uploader)
                self._logger.debug('uploaders started. waiting compress')

                compress.wait()
                self._logger.debug('compress completed (code: %s). waiting split', compress.returncode)
                if compress.returncode:
                    raise StorageError('Compress process terminated with exit code %s. <err>: %s' % (compress.returncode, compress.stderr.read()))

                split.join()
                self._logger.debug('split completed. waiting uploaders')

                for uploader in uploaders:
                    uploader.join()
                self._logger.debug('uploaders completed')

                if self._inner_exc_info:
                    t, e, s = self._inner_exc_info
                    raise t, e, s

            finally:
                self._return_ev.set()
                mount.umount(snap_mpoint)
                os.rmdir(snap_mpoint)
                self._lvm.remove_lv(snap_lv)
                self._inner_exc_info = None
            self._state_map[snapshot.id] = Snapshot.COMPLETED
        except (Exception, BaseException), e:
            self._state_map[snapshot.id] = Snapshot.FAILED
            self._logger.exception('Snapshot creation failed. %s' % e)
Esempio n. 49
0
            util.wait_until(image_is_ready, logger=LOG, timeout=600)

        finally:
            try:
                self._remove_bucket(bucket_name, archive_name, cloudstorage)
            except (Exception, BaseException), e:
                LOG.error('Faled to remove image compressed source: %s' % e)

    def _prepare_software(self):
        try:
            pkgmgr.latest(self.gcimagebundle_pkg_name)
        except (Exception, BaseException), e:
            LOG.warn('Gcimagebundle update failed: %s' % e)

        if os_dist.redhat_family:
            semanage = software.which('semanage')
            if not semanage:
                pkgmgr.installed('policycoreutils-python')
                semanage = software.which('semanage')

            util.system2((semanage, 'permissive', '-a', 'rsync_t'))

    def snapshot(self, op, name):
        rebundle_dir = tempfile.mkdtemp()
        archive_path = ''
        try:
            pl = __node__['platform']
            proj_id = pl.get_numeric_project_id()
            proj_name = pl.get_project_id()
            cloudstorage = pl.new_storage_client()
Esempio n. 50
0
from scalarizr.config import BuiltinBehaviours

log = logging.getLogger(__name__)

__rabbitmq__ = __node__['rabbitmq']

SERVICE_NAME = BuiltinBehaviours.RABBITMQ
RABBIT_CFG_PATH = '/etc/rabbitmq/rabbitmq.config'
RABBIT_HOME = '/var/lib/rabbitmq/'
COOKIE_PATH = os.path.join(RABBIT_HOME, '.erlang.cookie')
RABBITMQ_ENV_CNF_PATH = '/etc/rabbitmq/rabbitmq-env.conf'
SCALR_USERNAME = '******'
NODE_HOSTNAME_TPL = 'rabbit@%s'
RABBIT_HOSTNAME_TPL = 'rabbit-%s'

RABBITMQCTL = software.which('rabbitmqctl')
RABBITMQ_SERVER = software.which('rabbitmq-server')

# RabbitMQ from ubuntu repo puts rabbitmq-plugins
# binary in non-obvious place
RABBITMQ_PLUGINS = software.which('rabbitmq-plugins', '/usr/lib/rabbitmq/bin/')


class RabbitMQInitScript(initdv2.ParametrizedInitScript):
    @lazy
    def __new__(cls, *args, **kws):
        obj = super(RabbitMQInitScript, cls).__new__(cls, *args, **kws)
        cls.__init__(obj)
        return obj

    def __init__(self):
Esempio n. 51
0
                    'LIMIT 1', ('HostInitResponse', ))
                raw_msg, format = cur.fetchone()

                if 'xml' == format:
                    msg.fromxml(raw_msg)
                elif 'json' == format:
                    msg.fromjson(raw_msg)

                producer = msg_service.get_producer()
                producer.send(Queues.CONTROL, msg)

            finally:
                cur.close()

        if options.report:  #collecting
            hostname = system2((which('hostname'), ), shell=True)[0]
            tar_file = os.path.join(os.getcwd(), 'report-%s.tar.gz' % hostname)
            json_file = os.path.join(os.getcwd(), 'sysinfo-%s.json' % hostname)

            cnf = bus.cnf
            cnf.bootstrap()
            ini = cnf.rawini
            try:
                log_params = ini.get('handler_file', 'args')
                try:
                    log_file = log_params(0)
                except (IndexError, TypeError):
                    raise
            except Exception, BaseException:
                log_file = '/var/log/scalarizr.log'
Esempio n. 52
0
import shutil
import logging
import functools
import resource


from scalarizr.config import BuiltinBehaviours
from scalarizr.services import BaseConfig, BaseService, lazy
from scalarizr.util import disttool, system2, \
                                PopenError, wait_until, initdv2, software, \
                                firstmatched
from scalarizr.linux.coreutils import chown_r
import pymongo


MONGOD = software.which('mongod')
MONGO_CLI = software.which('mongo')
MONGO_DUMP = software.which('mongodump')
MONGOS = software.which('mongos')

ROUTER_DEFAULT_PORT = 27017
ARBITER_DEFAULT_PORT = 27020
REPLICA_DEFAULT_PORT = 27018
CONFIG_SERVER_DEFAULT_PORT = 27019

SERVICE_NAME = BuiltinBehaviours.MONGODB
STORAGE_PATH = "/mnt/mongodb-storage"

DEFAULT_USER = '******' if disttool.is_debian_based() else 'mongod'
LOG_DIR = '/var/log/mongodb'
if not os.path.isdir(LOG_DIR):
Esempio n. 53
0
log_level :{2}
'''

if linux.os.windows_family:
    CLIENT_CONF_PATH = r'C:\chef\client.rb'
    VALIDATOR_KEY_PATH = r'C:\chef\validation.pem'
    CLIENT_KEY_PATH = r'C:\chef\client.pem'
    JSON_ATTRIBUTES_PATH = r'C:\chef\json_attributes.json'
    CHEF_CLIENT_BIN = r'C:\opscode\chef\bin\chef-client.bat'
    CHEF_SOLO_BIN = r'C:\opscode\chef\bin\chef-solo.bat'
else:
    CLIENT_CONF_PATH = '/etc/chef/client.rb'
    VALIDATOR_KEY_PATH =  '/etc/chef/validation.pem'
    CLIENT_KEY_PATH = '/etc/chef/client.pem'
    JSON_ATTRIBUTES_PATH = '/etc/chef/json_attributes.json'
    CHEF_CLIENT_BIN = which('chef-client')
    CHEF_SOLO_BIN = which('chef-solo')


PID_FILE = '/var/run/chef-client.pid'

def extract_json_attributes(chef_data):
    """
    Extract json attributes dictionary from scalr formatted structure
    """
    try:
        json_attributes = json.loads(chef_data.get('json_attributes') or "{}")
    except ValueError, e:
        raise HandlerError("Chef attributes is not a valid JSON: {0}".format(e))

    if chef_data.get('run_list'):
Esempio n. 54
0
                raw_msg, format = cur.fetchone()

                if 'xml' == format:
                    msg.fromxml(raw_msg)
                elif 'json' == format:
                    msg.fromjson(raw_msg)

                producer = msg_service.get_producer()
                producer.send(Queues.CONTROL, msg)

            finally:
                cur.close()


        if options.report:                      #collecting
            hostname = system2((which('hostname'),), shell=True)[0]
            tar_file = os.path.join(os.getcwd(), 'report-%s.tar.gz' % hostname)
            json_file = os.path.join(os.getcwd(), 'sysinfo-%s.json' % hostname)

            cnf = bus.cnf
            cnf.bootstrap()
            ini = cnf.rawini
            try:
                log_params = ini.get('handler_file', 'args')
                try:
                    log_file = log_params(0)
                except (IndexError, TypeError):
                    raise
            except Exception, BaseException:
                log_file = '/var/log/scalarizr.log'
Esempio n. 55
0



SERVICE_NAME = CNF_SECTION = BuiltinBehaviours.RABBITMQ
RABBIT_CFG_PATH = '/etc/rabbitmq/rabbitmq.config'
COOKIE_PATH = '/var/lib/rabbitmq/.erlang.cookie'
RABBITMQ_ENV_CNF_PATH = '/etc/rabbitmq/rabbitmq-env.conf'
SCALR_USERNAME = '******'


class NodeTypes:
	RAM = 'ram'
	DISK = 'disk'

RABBITMQCTL = software.which('rabbitmqctl')
RABBITMQ_SERVER = software.which('rabbitmq-server')

# RabbitMQ from ubuntu repo puts rabbitmq-plugins
# binary in non-obvious place
try:
	RABBITMQ_PLUGINS = software.which('rabbitmq-plugins')
except LookupError:
	possible_path = '/usr/lib/rabbitmq/bin/rabbitmq-plugins'

	if os.path.exists(possible_path):
		RABBITMQ_PLUGINS = possible_path
	else:
		raise