示例#1
0
def dump(ctx, db_name, s3_file):
    config = (
        ctx.obj['config']
    )

    from odooku.backends import get_backend
    from odoo.api import Environment
    from odoo.service.db import dump_db

    s3_backend = get_backend('s3')

    with tempfile.TemporaryFile() as t:
        with Environment.manage():
            dump_db(db_name, t)

        t.seek(0)
        if s3_file:
            s3_backend.client.upload_fileobj(t, s3_backend.bucket, s3_file)
        else:
            # Pipe to stdout
            while True:
                chunk = t.read(CHUNK_SIZE)
                if not chunk:
                    break
                sys.stdout.write(chunk)
示例#2
0
 def dump_tmp(self):
     """Dump to a temp file and return it's location to the caller. Later,
     copy the dump via scp, then clean_tmp.
     """
     t = tempfile.NamedTemporaryFile(suffix='.zip',
                                     prefix='odoo_',
                                     delete=False)
     dump_db(self.env.cr.dbname, t)
     return t.name
示例#3
0
    def action_backup(self):
        """Run selected backups."""
        backup = None
        successful = self.browse()

        # Start with local storage
        for rec in self.filtered(lambda r: r.method == "local"):
            filename = self.filename(datetime.now(), ext=rec.backup_format)
            with rec.backup_log():
                # Directory must exist
                try:
                    os.makedirs(rec.folder)
                except OSError:
                    pass

                with open(os.path.join(rec.folder, filename), "wb") as destiny:
                    # Copy the cached backup
                    if backup:
                        with open(backup) as cached:
                            shutil.copyfileobj(cached, destiny)
                    # Generate new backup
                    else:
                        db.dump_db(self.env.cr.dbname,
                                   destiny,
                                   backup_format=rec.backup_format)
                        backup = backup or destiny.name
                successful |= rec

        # Ensure a local backup exists if we are going to write it remotely
        sftp = self.filtered(lambda r: r.method == "sftp")
        if sftp:
            for rec in sftp:
                filename = self.filename(datetime.now(), ext=rec.backup_format)
                with rec.backup_log():

                    cached = db.dump_db(self.env.cr.dbname,
                                        None,
                                        backup_format=rec.backup_format)

                    with cached:
                        with rec.sftp_connection() as remote:
                            # Directory must exist
                            try:
                                remote.makedirs(rec.folder)
                            except pysftp.ConnectionException:
                                pass

                            # Copy cached backup to remote server
                            with remote.open(
                                    os.path.join(rec.folder, filename),
                                    "wb") as destiny:
                                shutil.copyfileobj(cached, destiny)
                        successful |= rec

        # Remove old files for successful backups
        successful.cleanup()
示例#4
0
    def action_backup(self):
        """Run selected backups."""
        backup = None
        filename = self.filename(datetime.now())
        successful = self.browse()

        # Start with local storage
        for rec in self.filtered(lambda r: r.method == "local"):
            with rec.backup_log():
                # Directory must exist
                try:
                    os.makedirs(rec.folder)
                except OSError:
                    pass

                with open(os.path.join(rec.folder, filename),
                          'wb') as destiny:
                    # Copy the cached backup
                    if backup:
                        with open(backup) as cached:
                            shutil.copyfileobj(cached, destiny)
                    # Generate new backup
                    else:
                        db.dump_db(self.env.cr.dbname, destiny)
                        backup = backup or destiny.name
                successful |= rec

        # Ensure a local backup exists if we are going to write it remotely
        sftp = self.filtered(lambda r: r.method == "sftp")
        if sftp:
            if backup:
                cached = open(backup)
            else:
                cached = db.dump_db(self.env.cr.dbname, None)

            with cached:
                for rec in sftp:
                    with rec.backup_log():
                        remote = rec.sftp_connection()
                        # Directory must exist
                        try:
                            remote.mkd(rec.folder)
                        except ftplib.Error:
                            pass
                        remote_path = os.path.join(rec.folder, filename)
                        res = remote.storbinary('STOR ' + remote_path, cached)
                        remote.close()
                        _logger.info("FTP STOR reply: %s", res)
                        successful |= rec

        # Remove old files for successful backups
        successful.cleanup()
示例#5
0
        def iterator():
            with tempfile.TemporaryDirectory() as tmp_dir:
                filename = tmp_dir + "/backup.zip"
                with open(filename, "wb") as f:
                    dump_db(db_name, f)

                with open(filename, "rb") as f:
                    f.seek(0)
                    while 1:
                        data = f.read(16 * 1024)
                        if not data:
                            break
                        yield data
示例#6
0
    def backup(self):
        self.ensure_one()
        if not self.env.user._is_superuser() and not self.env.user.has_group('base.group_system'):
            raise AccessError(_("Only administrators can change the settings"))

        if not self.backup_name:
            return

        self.backup_name = string.replace(self.backup_name,' ', '_')

        file = open("%s/%s.zip" % (backup_dir,self.backup_name),"w", 0600)
        dump_db(self._cr.dbname,file)
        file.close()
示例#7
0
    def backup(self):
        self.ensure_one()
        if not self.env.user._is_superuser() and not self.env.user.has_group(
                'base.group_system'):
            raise AccessError(_("Only administrators can change the settings"))

        if not self.backup_name:
            return

        self.backup_name = self.backup_name.replace(' ', '_')

        with open("{}/{}.zip".format(backup_dir, self.backup_name), "wb",
                  0o600) as file:
            dump_db(self._cr.dbname, file)
示例#8
0
 def client_db_backup(self, db=None, backup_format='zip', **params):
     filename = "%s.%s" % (db, backup_format)
     headers = [
         ('Content-Type', 'application/octet-stream; charset=binary'),
         ('Content-Disposition', http.content_disposition(filename)),
     ]
     try:
         stream = tempfile.TemporaryFile()
         service_db.dump_db(db, stream, backup_format)
     except exceptions.AccessDenied as e:
         raise werkzeug.exceptions.Forbidden(description=str(e))
     except Exception as e:
         _logger.error("Cannot backup db %s", db, exc_info=True)
         raise werkzeug.exceptions.InternalServerError(description=str(e))
     else:
         stream.seek(0)
         response = werkzeug.wrappers.Response(stream,
                                               headers=headers,
                                               direct_passthrough=True)
         return response
示例#9
0
    def action_backup(self):
        backup = None
        filename = self.filename(datetime.now())
        successful = self.browse()

        for rec in self:
            with rec.backup_log():
                # Directory must exist
                try:
                    if not os.path.isdir(rec.folder):
                        os.makedirs(rec.folder)
                except ValueError as err:
                    _LOGGER.exception('%s', err)
                    raise exceptions.ValidationError(
                        _('Backup directory must be set!'))
                except OSError as err:
                    _LOGGER.exception('%s', err)

                with open(os.path.join(rec.folder, filename), 'wb') as destiny:
                    # Copy the cached backup
                    if backup:
                        with open(backup, 'rb') as cached:
                            shutil.copyfileobj(cached, destiny)
                    # Generate new backup
                    else:
                        try:
                            db.dump_db(self.env.cr.dbname, destiny)
                            backup = backup or destiny.name
                        except IOError as err:
                            _LOGGER.exception('%s', err)
                successful |= rec

        # Remove old files for successful backups
        successful.cleanup_old_backups()

        return True
 def test_exp_database_backup_restore(self):
     dispatch_rpc('db', 'create_database', [
         MASTER_PASSWORD, "muk_dms_file_create_db_test", False, "en",
         "admin", "admin"
     ])
     self.assertTrue('muk_dms_file_create_db_test' in db_list())
     dump_stream = dump_db("muk_dms_file_create_db_test", None, 'zip')
     with tempfile.NamedTemporaryFile(delete=False) as data_file:
         data_file.write(dump_stream.read())
     restore_db('muk_dms_file_restore_db_test', data_file.name, True)
     self.assertTrue('muk_dms_file_restore_db_test' in db_list())
     dispatch_rpc('db', 'drop',
                  [MASTER_PASSWORD, 'muk_dms_file_restore_db_test'])
     dispatch_rpc('db', 'drop',
                  [MASTER_PASSWORD, 'muk_dms_file_create_db_test'])
     self.assertTrue('muk_dms_file_create_db_test' not in db_list())
     self.assertTrue('muk_dms_file_restore_db_test' not in db_list())
示例#11
0
 def action_backup(self):
     db_name = self.env.cr.dbname
     successful = self.browse()
     for rec in self.filtered(
             lambda r: r.active and r.nextcall == date.today()):
         fname = "{:%Y_%m_%d_%H_%M_%S}-{}-backup.zip".format(
             datetime.now(), db_name)
         backup_file = db.dump_db(db_name, None)
         try:
             client = rec._get_client()
             client.upload_fileobj(backup_file, rec.bucket_name,
                                   '%s/%s' % (rec.folder, fname))
         except ClientError as e:
             logging.error(e)
         finally:
             rec.nextcall += _intervalTypes[rec.periodicity](
                 INTERVAL_NUMBER)
             backup_file.close()
         successful |= rec
     successful.cleanup()
示例#12
0
 def dump_db(stream):
     return db.dump_db(database_obj.name, stream)
示例#13
0
 def dump_db(stream):
     return db.dump_db(database_obj.name, stream)
 def dump_db(stream):
     return db.dump_db(self.env.cr.dbname, stream)
示例#15
0
    def database_backup(self,
                        bu_type='manual',
                        backup_format='zip',
                        backup_name=False,
                        keep_till_date=False):
        """Returns a dictionary where:
        * keys = database name
        * value = dictionary with:
            * key error and error as value
            * key database and database name as value
        """
        self.ensure_one()

        now = datetime.now()
        error = False

        # check if bd exists
        try:
            if not db_ws.exp_db_exist(self.name):
                error = "Database %s do not exist" % (self.name)
                _logger.warning(error)
        except Exception as e:
            error = ("Could not check if database %s exists. "
                     "This is what we get:\n"
                     "%s" % (self.name, e))
            _logger.warning(error)
        else:
            # crear path para backups si no existe
            try:
                if not os.path.isdir(self.backups_path):
                    os.makedirs(self.backups_path)
            except Exception as e:
                error = ("Could not create folder %s for backups. "
                         "This is what we get:\n"
                         "%s" % (self.backups_path, e))
                _logger.warning(error)
            else:
                if not backup_name:
                    backup_name = '%s_%s_%s.%s' % (
                        self.name, bu_type, now.strftime('%Y%m%d_%H%M%S'),
                        backup_format)
                backup_path = os.path.join(self.backups_path, backup_name)
                if os.path.isfile(backup_path):
                    return {'error': "File %s already exists" % backup_path}
                backup = open(backup_path, 'wb')
                # backup
                try:
                    db_ws.dump_db(self.name,
                                  backup,
                                  backup_format=backup_format)
                except Exception as e:
                    error = ('Unable to dump self. '
                             'If you are working in an instance with '
                             '"workers" then you can try restarting service.\n'
                             'This is what we get: %s' % e)
                    _logger.warning(error)
                    backup.close()
                else:
                    backup.close()
                    backup_vals = {
                        'database_id': self.id,
                        'name': backup_name,
                        'path': self.backups_path,
                        'date': now,
                        'type': bu_type,
                        'keep_till_date': keep_till_date,
                    }
                    self.backup_ids.create(backup_vals)
                    _logger.info('Backup %s Created' % backup_name)

                    if bu_type == 'automatic':
                        _logger.info('Reconfiguring next backup')
                        new_date = self.relative_delta(datetime.now(),
                                                       self.backup_interval,
                                                       self.backup_rule_type)
                        self.backup_next_date = new_date

                    # TODO check gdrive backup pat
                    if self.syncked_backup_path:
                        # so no existe el path lo creamos
                        try:
                            if not os.path.isdir(self.syncked_backup_path):
                                _logger.info('Creating syncked backup folder')
                                os.makedirs(self.syncked_backup_path)
                        except Exception as e:
                            error = ("Could not create folder %s for backups. "
                                     "This is what we get:\n"
                                     "%s" % (self.syncked_backup_path, e))
                            _logger.warning(error)

                        # now we copy the backup
                        _logger.info('Make backup a copy con syncked path')
                        try:
                            syncked_backup = os.path.join(
                                self.syncked_backup_path,
                                self.name + '.%s' % backup_format)
                            shutil.copy2(backup_path, syncked_backup)
                        except Exception as e:
                            error = ("Could not copy into syncked folder. "
                                     "This is what we get:\n"
                                     "%s" % (e))
                            _logger.warning(error)
        if error:
            return {'error': error}
        else:
            return {'backup_name': backup_name}