def dump_by_table_sh(sc, db, target_dir): if __name__ == '__main__': sc = 'out_replace_basesmall' db = 'storage2' source_base = '/run/user/1000/gvfs/dav:host=drive.switch.ch,ssl=true,prefix=%2Fremote.php%2Fdav/files/[email protected]' source_dir = 'SQL_DUMPS/out_replace_basesmall/' target_dir = os.path.join(source_base, source_dir) if not os.path.exists(target_dir): os.makedirs(target_dir) db_format = dict(user=config.PSQL_USER, pw=config.PSQL_PASSWORD, host=config.PSQL_HOST, port=config.PSQL_PORT, db=db) dbname = 'postgresql://{user}:{pw}@{host}:{port}/{db}'.format(**db_format) for itb in get_sql_tables(sc, db=db): tb = sc + '.' + itb fn = os.path.join(target_dir, itb + '.sql') print('Dumping table ', itb, ' to file ', fn) with open(fn, 'w+') as f: pg_dump('--dbname', dbname, '--table', tb, _out=f)
def main(): _logger.info('Starting process for databases backup') if len(DATABASES) == 0: try: # Raise exception if databases field is empty raise NameError('No databases') except NameError: _logger.exception('No databases to backup') raise for db in DATABASES: _logger.info('Starting backup of database: %s', db) uri = ( '--dbname=postgres://%s@localhost/%s?passfile=%s.pgpass' % (PG_USER, db, WORKDIR) ) # Set URI with passfile as pg_dump doesn't accept hardcoded passwords pg_dump( uri, '--no-privileges', # No privileges and no owner to avoid permissions conflicts '--no-owner', '-f', BACKUP_PATH + db + '.backup', # Output file ) _logger.info('Ending database backup process')
def processarb(self): from sh import pg_dump os.putenv('PGPASSFILE', self.pgpass) h = '--host={0}'.format(self.maquina) p = '--port={0}'.format(self.porta) d = '--dbname={0}'.format(self.banco) u = '--username={0}'.format(self.usuario) with gzip.open(self.arquivo, 'wb') as f: pg_dump('--no-password', h, p, d, u, _out=f) os.unsetenv('PGPASSFILE')
def main(): with open('backup.psql', 'wb') as f: pg_dump('--dbname', os.getenv('PG_CONNECTION'), _out=f) service_account_info = json.loads(os.getenv('GDRIVE_SERVICE_ACCOUNT')) google_credentials = service_account.Credentials.from_service_account_info( service_account_info, scopes=['https://www.googleapis.com/auth/drive']) authed_session = AuthorizedSession(google_credentials) folderId = createFolder(authed_session) upload('backup.psql', authed_session, folderId, 'application/zip')
def backup_reference(): """ Dump the reference db to sql. """ with open(LOCAL_REF_FILE_NAME, 'wb') as f: pg_dump('-h', ctx.ref_db_hostname, '-U', 'postgres', '-d', ctx.CONTEXT[ctx.DB_NAME], _out=f) return _handle_copy_up(LOCAL_REF_FILE_NAME)
def backup_airspace(): """ Dump the airspace db to sql. """ with open(LOCAL_AIRSPACE_FILE_NAME, 'wb') as f: pg_dump('-h', ctx.geo_db_hostname, '-U', 'postgres', '-d', ctx.CONTEXT[ctx.DB_NAME], _out=f) return _handle_copy_up(LOCAL_AIRSPACE_FILE_NAME)
def _dump_and_restore_dcc_ids(old_conn_str, new_conn_str, starttime): logger.info({ 'msg': 'dumping dcc_id tables from old database', 'elapsed': secs_since(starttime) }) dump_file_path = _temp_dump_file("dcc") pg_dump(_dcc_dump_args(old_conn_str, dump_file_path)) pg_restore(_restore_args(new_conn_str, dump_file_path)) os.remove(dump_file_path) logger.info({ 'msg': 'finished restoring dcc_id tables into new database', 'elapsed': secs_since(starttime) })
def postgres_backup_database(id_task,task): date_backup = datetime.datetime.now() databases=task.source.split(",") status = "success" for db in databases: db = db.strip() current_path = task.dest + date_backup.strftime("%d-%m-%Y_%H-%M-%S-") + db + ".gz" try: with gzip.open(current_path, 'wb') as f: pg_dump('-U', task.password, db, _out=f) except Exception as e: status = "backup failed: " + str(e) if status == "success": sql.add_task_log(id_task,"Завершено успешно: " + task.source,1 ) else: sql.add_task_log(id_task,status.encode('utf8'),0 )
def backup(user, database, date_suffix, config_file, config_dir): """ Back up PG database, copy to S3 """ now = str(datetime.datetime.now()) # backup if date_suffix: s3_file_name = f"{database}-{now}.psql" else: s3_file_name = f"{database}.psql" db_dump = sh.pg_dump("-U", f"{user}", "-w", f"{database}") # upload to S3 s3simple = S3Simple(region_name=aws_region, profile=aws_profile, bucket_name=backup_bucket) s3simple.put_to_s3(key=s3_file_name, body=str(db_dump)) # config file or dir? if config_file: file_parts = config_file.split('/') s3_name = file_parts[-1] s3simple.send_file_to_s3(local_file=config_file, s3_file=s3_name) if config_dir: path_parts = config_dir.split('/') s3_name = 'backup' + '_'.join(path_parts) + '.zip' zip_name = tmp + s3_name result = sh.zip("-r", zip_name, config_dir) s3simple.send_file_to_s3(local_file=zip_name, s3_file=s3_name)
def migrate_table(self, db_settings, old_db, old_table, verbose): with ShVerbose(verbose=verbose): sh.psql( sh.pg_dump(old_db, h=db_settings['HOST'], p=db_settings['PORT'], U=db_settings['USER'], t=old_table, _piped=True), db_settings['NAME'], h=db_settings['HOST'], p=db_settings['PORT'], U=db_settings['USER'])
def dump_database(dest_folder, database_name): """ Dumps database using Oerplib in Base64 format Args: dest_folder (str): Folder where the function will save the dump database_name (str): Database name that will be dumped super_user_pass (str): Super user password to be used to connect with odoo instance host (str): Host name or IP address to connect port (int): Port number which Odoo instance is listening to Returns: The full dump path and name with .b64 extension """ logger.debug("Dumping database %s into %s folder", database_name, dest_folder) dump_name = os.path.join(dest_folder, "database_dump.sql") pg_dump(database_name, no_owner=True, file=dump_name) return dump_name
def dump_database(): """Dumps the database via pg_dump.""" from sh import pg_dump from dispatch.config import DATABASE_HOSTNAME, DATABASE_PORT, DATABASE_CREDENTIALS username, password = str(DATABASE_CREDENTIALS).split(":") pg_dump( "-f", "dispatch-backup.dump", "-h", DATABASE_HOSTNAME, "-p", DATABASE_PORT, "-U", username, "dispatch", _env={"PGPASSWORD": password}, )
def generate_db_backup(host, port, user, password, database): """ Generate a Postgres dump file from the database. """ now = datetime.datetime.now().strftime("%Y-%m-%d") filename = "%s-zou-db-backup.sql.gz" % now with gzip.open(filename, "wb") as archive: pg_dump( "-h", host, "-p", port, "-U", user, database, _out=archive, _env={"PGPASSWORD": password}, ) return filename
def create_backup(): """Create postgres db backup file and return file path. """ try: storage_root = str(settings.BACKUPS_STORAGE_ROOT) datetime = timezone.localtime().strftime('%d%b%Y%H%M%S') filename = '{}/{}_{}'.format(storage_root, datetime, 'backup.gz') db = settings.DATABASES['default'] host = db['HOST'] name = db['NAME'] user = db['USER'] password = db['PASSWORD'] with gzip.open(filename, 'wb') as f: pg_dump('-h', host, '-U', user, name, _out=f, _in=password) return filename except Exception as e: return None
def dump(url=None): ensure('tmp') ensure('log') url = get_db_url(url) host, port, dbname, username, password = parse_db_url(url) date = datetime.datetime.now().strftime('%Y-%m-%d') dump_file = '%s-%s.dump' % (dbname, date) local_path = 'tmp/%s' % dump_file args = [ '--format=custom', '--file', local_path, '--host', host, '--port', port ] if username: args += ['--username', username] env = os.environ.copy() if password: env['PGPASSWORD'] = password args.append(dbname) print 'Dumping DB to', local_path sh.pg_dump(*args, _env=env, _err='log/pg_dump_error.txt') return local_path
def migrate_table(self, db_settings, old_db, old_table, verbose): with ShVerbose(verbose=verbose): sh.psql(sh.pg_dump(old_db, h=db_settings['HOST'], p=db_settings['PORT'], U=db_settings['USER'], t=old_table, _piped=True), db_settings['NAME'], h=db_settings['HOST'], p=db_settings['PORT'], U=db_settings['USER'])
def _dump_and_restore_id_maps(site, old_conn_str, new_conn_str, starttime): logger.info({ 'msg': 'dumping id_map tables from old database for ' + site + ' site.', 'elapsed': secs_since(starttime) }) dump_file_path = _temp_dump_file(site) pg_dump(_dump_args(site, old_conn_str, dump_file_path)) logger.info({ 'msg': 'inserting id_map dumps into new database for ' + site + ' site.', 'elapsed': secs_since(starttime) }) pg_restore(_restore_args(new_conn_str, dump_file_path)) os.remove(dump_file_path) logger.info({ 'msg': 'finished restoring id_map dumps into new database for ' + site + ' site.', 'elapsed': secs_since(starttime) })
def backup_postgres(): """Backsup the postgres database. -h host, -U user, -w no-password (taken from the environment). Streaming output to be zipped on the go, keeping memory use minimal. """ output_path = '{}/postgres_backup_{}.gz'.format(settings.BACKUP_DIR, int(time.time())) with gzip.open(output_path, 'wb') as f: pg_dump('-h', os.environ['DATABASE_HOST'], '-U', os.environ['DATABASE_USER'], os.environ['DATABASE_NAME'], '-w', _env={'PGPASSWORD': os.environ['DATABASE_PASSWORD']}, _out=f) return "Backup of postgress db: {} success.".format( os.environ['DATABASE_NAME'])
def dump_psql_copy_tables(fp, survey_name, *args, **kwargs): tables = ['mobile_users', 'mobile_coordinates', 'mobile_prompt_responses', 'mobile_cancelled_prompt_responses'] temp_tables = ['temp_{table}_{survey}'.format(table=t, survey=survey_name) for t in tables] with gzip.open(fp, 'wb') as dump_f: def _preprocesser(line): for idx, table in enumerate(tables): line = line.replace(temp_tables[idx], table) dump_f.write(line.encode()) pg_dump('-h', kwargs['host'], '-U', kwargs['user'], '-p', kwargs['port'], '-t', temp_tables[0], '-t', temp_tables[1], '-t', temp_tables[2], '-t', temp_tables[3], kwargs['dbname'], _out=_preprocesser)
def main(): schemas = os.getenv('DUMP_SCHEMAS') for schema in schemas.split(" "): psql(pg_dump( '-h', prod_pg_opts['host'], '-U', prod_pg_opts['username'], '-d', prod_pg_opts['database'], '--schema', schema, '-O', '-v', '--clean', _piped=True), '-h', qa_pg_opts['host'], '-U', qa_pg_opts['username'], '-d', qa_pg_opts['database'] )
def db_backup(self): start = time.time() self.date_string = datetime.today().strftime('%y%m%d') self.output_filename = self.database_name + "_" + self.date_string if self.schema != "": self.output_filename += "_" + self.schema self.output_filename += ".backup" self.path = os.path.join(self.BACKUP_DIR, self.output_filename) print("Backing up database", self.database_name, "to file:\n" + self.path) print("\nThis may take a few minutes ...\n") if self.schema == "schema": pg_dump('-h', 'localhost', '-U', 'postgres', '-s', self.database_name, _out=self.path) elif self.schema != "": pg_dump('-h', 'localhost', '-U', 'postgres', '-F', 'c', '-Z', '9', '--schema', self.schema, self.database_name, _out=self.path) else: pg_dump('-h', 'localhost', '-U', 'postgres', '-F', 'c', self.database_name, _out=self.path) end = time.time() print("Backup complete in", "{0:.1f}".format(end - start), "seconds\n")
def generateBackup(): with gzip.open('postgres.gz', 'wb') as zipFile: pg_dump('-h', 'localhost', '-U', 'postgres', 'postgres', _out=zipFile)
creation_time = os.path.getctime(i) if (current_time - creation_time) // (24 * 3600) > finish_day: os.remove(i) file_system_mount = os.statvfs(system_mount_path) disk_size = int( (file_system_mount.f_bsize * file_system_mount.f_bavail) / 1000000000) if disk_size <= disk_need_size: message = "На " + db_host + " нехватает места для создания дампа" bot.send_message(chat_id, message) sys.exit() try: for database in list_db: current_datetime = datetime.datetime.now().strftime( "%Y-%m-%d_%H-%M") pg_dump( '-h', db_host, '-U', db_user, '-d', database, '--encoding=UTF8', '-j', cpu, '--format=directory', '-f', 'backup_' + db_host + '_' + database + '_' + current_datetime) t = Thread(target=gziping, args=('backup_' + db_host + '_' + database + '_' + current_datetime, )) t.start() except Exception as e: message = "На " + db_host + " не удалось создать дамп" bot.send_message(chat_id, message) if args.state == 'restore': # tar = tarfile.open(args.files_dump) # tar.extractall() # tar.close() pg_restore('-h', db_host, '-U', db_user, '-d', args.database, '-j', cpu, args.files_dump)
def dump(table, host, port, user, password, db): """Dump the create table statement for this table.""" out = pg_dump(db, '--schema-only', t=table, h=host, U=user, p=port, _env={'PGPASSWORD': password}) return extract_create(out, table)
def backup_database(dbname: str, file: Path) -> None: from sh import pg_dump with file.open("wb") as file: pg_dump(*_maint_conn_args(), "--format=custom", dbname, _out=file)
import gzip from sh import pg_dump with gzip.open('./postgresql/baks/backup.gz', 'wb') as f: pg_dump('-h', 'localhost', '-U', 'george', 'Student', _out=f)
def copy_schema(log, sql_path, from_config, from_schema, to_config, to_schema, structure_only=False): log.info( f'{from_config["name"]}.{from_schema} -> {to_config["name"]}.{to_schema}' ) log.debug("Dump") os.environ["PGPASSWORD"] = from_config["password"] if structure_only: schema_dump = ( sql_path / "schemas" / f'{from_config["name"]}_{from_schema}_structure_only.sql') print( sh.pg_dump( "-U", from_config["user"], "-n", from_schema, "-h", from_config["host"], "-p", from_config["port"], "-s", from_config["name"], _err_to_out=True, _out=str(schema_dump), ), end="", ) else: schema_dump = sql_path / "schemas" / f'{from_config["name"]}_{from_schema}.sql' print( sh.pg_dump( "-U", from_config["user"], "-n", from_schema, "-h", from_config["host"], "-p", from_config["port"], from_config["name"], _err_to_out=True, _out=str(schema_dump), ), end="", ) log.debug("Modify") with fileinput.FileInput(str(schema_dump), inplace=True) as file: for line in file: print(line.replace(from_schema, to_schema), end="") with fileinput.FileInput(schema_dump, inplace=True) as file: for line in file: print( line.replace( "CREATE SCHEMA " + to_schema, f"DROP SCHEMA IF EXISTS {to_schema} CASCADE; CREATE SCHEMA {to_schema}; " f'set search_path to {to_schema},public; CREATE EXTENSION IF NOT EXISTS "uuid-ossp"', ), end="", ) # change role name with fileinput.FileInput(str(schema_dump), inplace=True) as file: for line in file: print( line.replace( f'TO {from_config["user"]}', f'TO {to_config["user"]}', ), end="", ) with fileinput.FileInput(str(schema_dump), inplace=True) as file: for line in file: print( line.replace( f'Owner: {from_config["user"]}', f'Owner: {to_config["user"]}', ), end="", ) log.debug(f"Saved to file: {schema_dump}") log.debug("Import") os.environ["PGPASSWORD"] = to_config["password"] schemafile = open(schema_dump, "r") print( sh.psql( "-U", to_config["user"], "-h", to_config["host"], "-p", to_config["port"], "--quiet", "-o", "/dev/null", to_config["name"], _err_to_out=True, _in=schemafile, ), end="", )
endpoint += f"bot{getenv('TG_TOKEN_BACKUP')}/" timeout = int(getenv("TG_TIMEOUT", 10)) chat = getenv("CHAT_BACKUP") connect = 'postgresql://{username}:{password}@{host}:{port}/{db}'.format( username=getenv("POSTGRES_USER"), password=getenv("POSTGRES_PASSWORD"), host=getenv("POSTGRES_HOST"), db=getenv("POSTGRES_DB"), port='5432') date = dt.now().strftime("%Y_%m_%d_%H_%M") file_name = 'backup_{custom}_{date}.gz'.format(custom=getenv("NAME"), date=date) try: with gzip.open(file_name, 'wb') as f: print(pg_dump(connect, _out=f)) text = "#{custom}\n#d{date}\n{file}".format(custom=getenv("NAME"), date=date, file=file_name) split = (f"/usr/bin/zip -r -s {getenv('MAX_SIZE_BACKUP')} " + f"{file_name}.zip {file_name} --password" + f" {getenv('BACKUP_PASSWORD')}") p = Popen(split, shell=True) p.wait() files = [] content = [] for i, file in enumerate(sorted(glob(f"{file_name}.z*"), key=getmtime)): content.append({"type": "document", "media": f"attach://document{i}"}) files.append((f"document{i}", (file, open(file, "rb")))) if i % 10 == 0:
from sh import pg_dump import gzip import boto3 import os # variables, change these to the names of things for your local project local_file_name = 'backup.gz' database_name = 'uploadtest' s3_bucket_name = 'riley-mathews-database-backups' s3_file_key = 'test/backup.gz' with gzip.open(local_file_name, mode='wb') as file: # name of your database replaces 'uploadtest' pg_dump(database_name, _out=file) # initialize s3 s3 = boto3.resource('s3') # name of your s3 bucket goes here! bucket = s3.Bucket(s3_bucket_name) with gzip.open(local_file_name, mode='rb') as file: # key is the directory under the bucket you want your backup to live bucket.put_object( Key=s3_file_key, Body=file, ContentType='application/x-gzip', ContentEncoding='gzip', ) # os.remove('backup.gz')
def generate_backup(config): """ Generates the database dump. """ print "Generating database dump." # dumps folder creation dumps_folder = "{}/cron_db_dumps".format(expanduser('~')) try: mkpath(dumps_folder) except DistutilsFileError: print "Error: can't mkdir {}".format(dumps_folder) return False # dump name generation db_name = config.get('db', 'name') dump_name = strftime("%Y-%m-%d-%H:%M:%S", gmtime()) dump_name = "{}/{}-{}.dump".format(dumps_folder, db_name, dump_name) # db engine db_engine = config.get('db', 'engine') if db_engine == 'postgresql': dump_format = '-Fc' # custom format sh.pg_dump(dump_format, db_name, '-f', dump_name) elif db_engine == 'mysql': db_user = config.get('db', 'user') db_password = config.get('db', 'password') sh.mysqldump('--compress', '-u', db_user, '--password={}'.format( db_password), db_name, '--result-file={}'.format(dump_name)) else: print "Error: DB engine not supported." return False # gzip dump print "Compressing database dump." with open(dump_name, 'rb') as dump_file: dump_gzipped = gzip.open("{}.gz".format(dump_name), 'wb') dump_gzipped.writelines(dump_file) dump_gzipped.close() # remove raw file remove(dump_name) return "{}.gz".format(dump_name)
def dump(url=None): ensure('tmp') ensure('log') url = get_db_url(url) host, port, dbname, username, password = parse_db_url(url) date = datetime.datetime.now().strftime('%Y-%m-%d') dump_file = '%s-%s.dump' % (dbname, date) local_path = 'tmp/%s' % dump_file args = [ '--format=custom', '--file', local_path, '--host', host, '--port', port] if username: args += ['--username', username] env = os.environ.copy() if password: env['PGPASSWORD'] = password args.append(dbname) print 'Dumping DB to', local_path sh.pg_dump(*args, _env=env, _err='log/pg_dump_error.txt') return local_path