Beispiel #1
0
    def _create_dbuser(self, username):
        import os
        from stoqlib.lib.process import Process, PIPE
        for envname in ['PGUSER', 'PGHOST']:
            if envname in os.environ:
                del os.environ[envname]

        # See if we can connect to the database
        args = ['psql', 'postgres', username, '-c', 'SELECT 1;']
        proc = Process(args, stdout=PIPE)
        proc.communicate()
        if proc.returncode == 0:
            return 0

        from stoqlib.lib.kiwilibrary import library

        createdbuser = library.get_resource_filename('stoq', 'scripts',
                                                     'createdbuser.sh')

        args = ['pkexec', '-u', 'postgres', createdbuser, username]
        proc = Process(args)
        proc.communicate()

        if proc.returncode != 0:
            print("ERROR: Failed to run %r" % (args, ))
            return 30
        return 0
Beispiel #2
0
    def _create_dbuser(self, username):
        import os
        from stoqlib.lib.process import Process, PIPE
        for envname in ['PGUSER', 'PGHOST']:
            if envname in os.environ:
                del os.environ[envname]

        # See if we can connect to the database
        args = ['psql', 'postgres', username, '-c', 'SELECT 1;']
        proc = Process(args, stdout=PIPE)
        proc.communicate()
        if proc.returncode == 0:
            return 0

        from stoqlib.lib.kiwilibrary import library

        createdbuser = library.get_resource_filename(
            'stoq', 'scripts', 'createdbuser.sh')

        args = ['pkexec',
                '-u', 'postgres',
                createdbuser,
                username]
        proc = Process(args)
        proc.communicate()

        if proc.returncode != 0:
            print("ERROR: Failed to run %r" % (args, ))
            return 30
        return 0
Beispiel #3
0
    def check_version(self, store):
        """Verify that the database version is recent enough to be supported
        by stoq. Emits a warning if the version isn't recent enough, suitable
        for usage by an installer.

        :param store: a store
        """
        if self.rdbms == 'postgres':
            version = store.execute('SELECT VERSION();').get_one()[0]
            server_version = version.split(' ', 2)[1]
            assert server_version.count('.') == 2, version
            parts = server_version.split(".")[:2]
            try:
                svs = map(int, parts)
            except ValueError:
                log.info("Error getting server version: %s" %
                         (server_version, ))
                return

            # Client version
            kwargs = {}
            args = ['psql']
            if _system == 'Windows':
                # FIXME: figure out why this isn't working
                return
            else:
                args.append('--version')
            p = Process(args, stdout=PIPE, **kwargs)
            stdout = p.communicate()[0]
            line = stdout.split('\n', 1)[0]
            if line.endswith('\r'):
                line = line[:-1]

            parts = line.split(' ')
            # assert len(parts) == 3, parts
            if len(parts) != 3:
                log.info("Error getting psql version: %s" % (line, ))
                return

            client_version = parts[2]
            # assert client_version.count('.') == 2, line
            if client_version.count('.') != 2:
                log.info("Error getting pg version: %s" % (client_version, ))
                return

            cvs = map(int, client_version.split('.'))[:2]

            if svs != cvs:
                warning(
                    _(u"Problem with PostgreSQL version"),
                    _(u"The version of the PostgreSQL database server (%s) and the "
                      "postgres client tools (%s) differ. I will let you use "
                      "Stoq, but you will always see this warning when "
                      "starting Stoq until you resolve the version "
                      "incompatibilty by upgrading the server or the client "
                      "tools.") % (server_version, client_version))
        else:
            raise NotImplementedError(self.rdbms)
Beispiel #4
0
    def check_version(self, store):
        """Verify that the database version is recent enough to be supported
        by stoq. Emits a warning if the version isn't recent enough, suitable
        for usage by an installer.

        :param store: a store
        """
        if self.rdbms == 'postgres':
            version = store.execute('SELECT VERSION();').get_one()[0]
            server_version = version.split(' ', 2)[1]
            assert server_version.count('.') == 2, version
            parts = server_version.split(".")[:2]
            try:
                svs = map(int, parts)
            except ValueError:
                log.info("Error getting server version: %s" % (server_version, ))
                return

            # Client version
            kwargs = {}
            args = ['psql']
            if _system == 'Windows':
                # FIXME: figure out why this isn't working
                return
            else:
                args.append('--version')
            p = Process(args, stdout=PIPE, **kwargs)
            stdout = p.communicate()[0]
            line = stdout.split('\n', 1)[0]
            if line.endswith('\r'):
                line = line[:-1]

            parts = line.split(' ')
            # assert len(parts) == 3, parts
            if len(parts) != 3:
                log.info("Error getting psql version: %s" % (line, ))
                return

            client_version = parts[2]
            # assert client_version.count('.') == 2, line
            if client_version.count('.') != 2:
                log.info("Error getting pg version: %s" % (client_version, ))
                return

            cvs = map(int, client_version.split('.'))[:2]

            if svs != cvs:
                warning(_(u"Problem with PostgreSQL version"),
                        _(u"The version of the PostgreSQL database server (%s) and the "
                          "postgres client tools (%s) differ. I will let you use "
                          "Stoq, but you will always see this warning when "
                          "starting Stoq until you resolve the version "
                          "incompatibilty by upgrading the server or the client "
                          "tools.") % (server_version, client_version))
        else:
            raise NotImplementedError(self.rdbms)
Beispiel #5
0
    def _create_dbuser(self, username):
        import os
        from stoqlib.lib.process import Process, PIPE

        for envname in ["PGUSER", "PGHOST"]:
            if envname in os.environ:
                del os.environ[envname]

        # See if we can connect to the database
        args = ["psql", "postgres", username, "-c", "SELECT 1;"]
        proc = Process(args, stdout=PIPE)
        proc.communicate()
        if proc.returncode == 0:
            return 0

        args = ["pkexec", "-u", "postgres", "stoqcreatedbuser", username]
        proc = Process(args)
        proc.communicate()
        if proc.returncode != 0:
            print("ERROR: Failed to run %r" % (args,))
            return 30
        return 0
Beispiel #6
0
    def _create_dbuser(self, username):
        import os
        from stoqlib.lib.process import Process, PIPE
        for envname in ['PGUSER', 'PGHOST']:
            if envname in os.environ:
                del os.environ[envname]

        # See if we can connect to the database
        args = ['psql', 'postgres', username, '-c', 'SELECT 1;']
        proc = Process(args, stdout=PIPE)
        proc.communicate()
        if proc.returncode == 0:
            return 0

        args = ['pkexec',
                '-u', 'postgres',
                'stoqcreatedbuser',
                username]
        proc = Process(args)
        proc.communicate()
        if proc.returncode != 0:
            print "ERROR: Failed to run %r" % (args, )
            return 30
        return 0
Beispiel #7
0
    def execute_sql(self, filename, lock_database=False):
        """Inserts raw SQL commands into the database read from a file.

        :param filename: filename with SQL commands
        :param lock_database: If the existing tables in the database should be
          locked
        :returns: return code, ``0`` if succeeded, positive integer for failure
        """
        log.info("Executing SQL script %s database locked=%s" % (filename,
                                                                 lock_database))

        if self.rdbms == 'postgres':
            # Okay, this might look crazy, but it's actually the only way
            # to execute many SQL statements in PostgreSQL and
            # 1) Stop immediatelly when an error occur
            # 2) Print the error message, the filename and the line number where
            #    the error occurred.
            # 3) Do not print anything on the output unless it's an warning or a
            #    an error
            args = ['psql']
            # -U needs to go in first or psql on windows get confused
            args.extend(self.get_tool_args())
            args.extend(['-n', '-q'])

            kwargs = {}
            if _system == 'Windows':
                # Hide the console window
                # For some reason XP doesn't like interacting with
                # proceses via pipes
                read_from_pipe = False
            else:
                read_from_pipe = True

            # We have two different execution modes,
            # 1) open stdin (-) and write the data via a pipe,
            #    this allows us to also disable noticies and info messages,
            #    so that only warnings are printed, we also fail if a warning
            #    or error is printed
            # 2) Pass in the file normally to psql, no error reporting included
            if read_from_pipe:
                args.extend(['-f', '-'])
                args.extend(['--variable', 'ON_ERROR_STOP='])
            else:
                args.extend(['-f', filename])

            args.append(self.dbname)
            log.debug('executing %s' % (' '.join(args), ))
            proc = Process(args,
                           stdin=PIPE,
                           stdout=PIPE,
                           stderr=PIPE,
                           **kwargs)

            proc.stdin.write('BEGIN TRANSACTION;')
            if lock_database:
                store = self.create_store()
                lock_query = store.get_lock_database_query()
                proc.stdin.write(lock_query)
                store.close()

            if read_from_pipe:
                # We don't want to see notices on the output, skip them,
                # this will make all reported line numbers offset by 1
                proc.stdin.write("SET SESSION client_min_messages TO 'warning';")

                data = open(filename).read()
                # Rename serial into bigserial, for 64-bit id columns
                data = data.replace('id serial', 'id bigserial')
                data += '\nCOMMIT;'
            else:
                data = None
            stdout, stderr = proc.communicate(data)
            if read_from_pipe and stderr:
                raise SQLError(stderr[:-1])
            return proc.returncode
        else:
            raise NotImplementedError(self.rdbms)
Beispiel #8
0
def start_backup_scheduler(doing_backup):
    _setup_signal_termination()

    if not api.sysparam.get_bool('ONLINE_SERVICES'):
        logger.info("ONLINE_SERVICES not enabled. Not scheduling backups...")
        return

    logger.info("Starting backup scheduler")

    config = get_config()
    backup_schedule = config.get('Backup', 'schedule')
    if backup_schedule is None:
        # By defualt, we will do 2 backups. One in a random time between
        # 9-11 or 14-17 and another one 12 hours after that.
        # We are using 2, 3 and 4 because they will be summed with 12 bellow
        hour = random.choice([2, 3, 4, 9, 10])
        minute = random.randint(0, 59)
        backup_schedule = '%d:%d,%s:%d' % (hour, minute, hour + 12, minute)
        config.set('Backup', 'schedule', backup_schedule)
        config.flush()

    backup_hours = [
        list(map(int,
                 i.strip().split(':'))) for i in backup_schedule.split(',')
    ]
    now = datetime.datetime.now()
    backup_dates = collections.deque(
        sorted(
            now.replace(hour=bh[0], minute=bh[1], second=0, microsecond=0)
            for bh in backup_hours))

    while True:
        now = datetime.datetime.now()
        next_date = datetime.datetime.min
        while next_date < now:
            next_date = backup_dates.popleft()
            backup_dates.append(next_date + datetime.timedelta(1))

        time.sleep(max(1, (next_date - now).total_seconds()))

        for i in range(3):
            # FIXME: This is SO UGLY, we should be calling backup_database
            # task directly, but duplicity messes with multiprocessing in a
            # way that it will not work
            args = sys.argv[:]
            for i, arg in enumerate(args[:]):
                if arg == 'run':
                    args[i] = 'backup_database'
                    break

            doing_backup.value = 1
            try:
                p = Process(args)
                stdout, stderr = p.communicate()
            finally:
                doing_backup.value = 0

            if p.returncode == 0:
                break
            else:
                # When duplicity fails in unpredicted situations (e.g. the
                # power is shut down suddenly) it can leave a lockfile behind,
                # and that can make any future backup attempts fail to.
                # Check if that was the reason of the failure and, if the
                # lockfile is older than 3h remove it and try again.
                # Note that this only happens for duplicity (linux) and
                # not for duplicati (windows)
                match = re.search('/.*lockfile.lock', stderr)
                if match is not None:
                    lockfile = match.group(0)
                    now = datetime.datetime.now()
                    mdate = datetime.datetime.fromtimestamp(
                        os.path.getmtime(lockfile))
                    if (now - mdate) > _lock_remove_threshold:
                        os.unlink(lockfile)

                logger.warning(
                    "Failed to backup database:\nstdout: %s\nstderr: %s",
                    stdout, stderr)
                # Retry again with a exponential backoff
                time.sleep((60 * 2)**(i + 1))
Beispiel #9
0
    def execute_sql(self, filename, lock_database=False):
        """Inserts raw SQL commands into the database read from a file.

        :param filename: filename with SQL commands
        :param lock_database: If the existing tables in the database should be
          locked
        :returns: return code, ``0`` if succeeded, positive integer for failure
        """
        log.info("Executing SQL script %s database locked=%s" % (filename,
                                                                 lock_database))

        if self.rdbms == 'postgres':
            # Okay, this might look crazy, but it's actually the only way
            # to execute many SQL statements in PostgreSQL and
            # 1) Stop immediatelly when an error occur
            # 2) Print the error message, the filename and the line number where
            #    the error occurred.
            # 3) Do not print anything on the output unless it's an warning or a
            #    an error
            args = ['psql']
            # -U needs to go in first or psql on windows get confused
            args.extend(self.get_tool_args())
            args.extend(['-n', '-q'])

            kwargs = {}
            if _system == 'Windows':
                # Hide the console window
                # For some reason XP doesn't like interacting with
                # proceses via pipes
                read_from_pipe = False
            else:
                read_from_pipe = True

            # We have two different execution modes,
            # 1) open stdin (-) and write the data via a pipe,
            #    this allows us to also disable noticies and info messages,
            #    so that only warnings are printed, we also fail if a warning
            #    or error is printed
            # 2) Pass in the file normally to psql, no error reporting included
            if read_from_pipe:
                args.extend(['-f', '-'])
                args.extend(['--variable', 'ON_ERROR_STOP='])
            else:
                args.extend(['-f', filename])

            args.append(self.dbname)
            log.debug('executing %s' % (' '.join(args), ))
            proc = Process(args,
                           stdin=PIPE,
                           stdout=PIPE,
                           stderr=PIPE,
                           **kwargs)

            proc.stdin.write('BEGIN TRANSACTION;')
            if lock_database:
                store = self.create_store()
                lock_query = store.get_lock_database_query()
                proc.stdin.write(lock_query)
                store.close()

            if read_from_pipe:
                # We don't want to see notices on the output, skip them,
                # this will make all reported line numbers offset by 1
                proc.stdin.write("SET SESSION client_min_messages TO 'warning';")

                data = open(filename).read()
                # Rename serial into bigserial, for 64-bit id columns
                data = data.replace('id serial', 'id bigserial')
                data += '\nCOMMIT;'
            else:
                data = None
            stdout, stderr = proc.communicate(data)
            if read_from_pipe and stderr:
                raise SQLError(stderr[:-1])
            return proc.returncode
        else:
            raise NotImplementedError(self.rdbms)
Beispiel #10
0
def start_backup_scheduler(doing_backup):
    _setup_signal_termination()

    if not api.sysparam.get_bool('ONLINE_SERVICES'):
        logger.info("ONLINE_SERVICES not enabled. Not scheduling backups...")
        return

    logger.info("Starting backup scheduler")

    config = get_config()
    backup_schedule = config.get('Backup', 'schedule')
    if backup_schedule is None:
        # By defualt, we will do 2 backups. One in a random time between
        # 9-11 or 14-17 and another one 12 hours after that.
        # We are using 2, 3 and 4 because they will be summed with 12 bellow
        hour = random.choice([2, 3, 4, 9, 10])
        minute = random.randint(0, 59)
        backup_schedule = '%d:%d,%s:%d' % (hour, minute, hour + 12, minute)
        config.set('Backup', 'schedule', backup_schedule)
        config.flush()

    backup_hours = [list(map(int, i.strip().split(':')))
                    for i in backup_schedule.split(',')]
    now = datetime.datetime.now()
    backup_dates = collections.deque(sorted(
        now.replace(hour=bh[0], minute=bh[1], second=0, microsecond=0)
        for bh in backup_hours))

    while True:
        now = datetime.datetime.now()
        next_date = datetime.datetime.min
        while next_date < now:
            next_date = backup_dates.popleft()
            backup_dates.append(next_date + datetime.timedelta(1))

        time.sleep(max(1, (next_date - now).total_seconds()))

        for i in range(3):
            # FIXME: This is SO UGLY, we should be calling backup_database
            # task directly, but duplicity messes with multiprocessing in a
            # way that it will not work
            args = sys.argv[:]
            for i, arg in enumerate(args[:]):
                if arg == 'run':
                    args[i] = 'backup_database'
                    break

            doing_backup.value = 1
            try:
                p = Process(args)
                stdout, stderr = p.communicate()
            finally:
                doing_backup.value = 0

            if p.returncode == 0:
                break
            else:
                # When duplicity fails in unpredicted situations (e.g. the
                # power is shut down suddenly) it can leave a lockfile behind,
                # and that can make any future backup attempts fail to.
                # Check if that was the reason of the failure and, if the
                # lockfile is older than 3h remove it and try again.
                # Note that this only happens for duplicity (linux) and
                # not for duplicati (windows)
                match = re.search('/.*lockfile.lock', stderr)
                if match is not None:
                    lockfile = match.group(0)
                    now = datetime.datetime.now()
                    mdate = datetime.datetime.fromtimestamp(os.path.getmtime(lockfile))
                    if (now - mdate) > _lock_remove_threshold:
                        os.unlink(lockfile)

                logger.warning(
                    "Failed to backup database:\nstdout: %s\nstderr: %s",
                    stdout, stderr)
                # Retry again with a exponential backoff
                time.sleep((60 * 2) ** (i + 1))