Esempio n. 1
0
def do_mysql_backup(tar_output: tarfile.TarFile) -> int:
    result = 0
    list_of_dbs_str = config.get("mysql", "databases")

    # Turn ugly ugly ini string into a list of db names.
    list_of_dbs = list_of_dbs_str.split(',')
    for s in list_of_dbs:
        s.strip()

    # Attempt to dump the set of databases into some files. This relies on
    # my.cnf being configured in $HOME, and a mysqldump section existing in
    # there.
    for s in list_of_dbs:
        handle, filename = tempfile.mkstemp()
        os.close(handle)
        ret = os.system("mysqldump {0} > {1}".format(s, filename))
        if not os.WIFEXITED(ret) or os.WEXITSTATUS(ret) != 0:
            print("Couldn't dump database {0}".format(s), file=sys.stderr)
            result = 1
            os.unlink(filename)
            continue

        # And put that into the tarfile.
        tar_output.add(filename, arcname="mysql/{0}.db".format(s))
        os.unlink(filename)

    return result
Esempio n. 2
0
    def write(self, file_name):
        if not self.data or not os.path.isdir(self.data):
            raise Exception('Must set data before building')

        gzfile = GzipFile(file_name, 'w')
        tar = TarFile(fileobj=gzfile, mode='w')

        buff = BytesIO(json.dumps(self.control).encode())
        info = TarInfo(name='./CONTROL')
        info.size = buff.getbuffer().nbytes
        tar.addfile(tarinfo=info, fileobj=buff)

        if self.init is not None:
            buff = BytesIO(self.init.encode())
            info = TarInfo(name='./INIT')
            info.size = buff.getbuffer().nbytes
            tar.addfile(tarinfo=info, fileobj=buff)

        data = BytesIO()
        datatar = TarFile(fileobj=data, mode='w')
        datatar.add(self.data, '/')
        datatar.close()
        data.seek(0)

        info = TarInfo(name='./DATA')
        info.size = data.getbuffer().nbytes
        tar.addfile(tarinfo=info, fileobj=data)

        tar.close()
        gzfile.close()
Esempio n. 3
0
def do_gerrit_backup(tar_output: tarfile.TarFile) -> int:
    # Only backup all-projects, which counts as config. Everything else is in
    # mysql.
    os.chdir('/home/gerrit/srdata/git/')
    tar_output.add('All-Projects.git', recursive=True)

    return 0
 def _createScriptExtensionTarArchive(self, sourceDirectory, scriptExtensionName):
     """ Creates a TAR archive for the given script extension. """
     
     tarFileName = scriptExtensionName + ".tar"
     tarFilePath = os.path.join(self.__buildConfiguration.distDirectory, tarFileName)
     tarFile = TarFile(tarFilePath, "w")
     
     for inputDirectory in ["lib", "src"]:
         baseDirectory = os.path.join(sourceDirectory, inputDirectory)
         if os.path.exists(baseDirectory):
             for packageDirName in os.listdir(baseDirectory):
                 pythonModulesToAddList = list()
                 packageDirectory = os.path.join(baseDirectory, packageDirName)
                 if os.path.exists(packageDirectory):
                     for walkTuple in os.walk(packageDirectory):
                         directoryPath = walkTuple[0]
                         fileNameList = walkTuple[2]
                         for fileName in fileNameList:
                             if fileName.endswith(".py") or fileName == "SCRIPTS":
                                 filePath = os.path.join(directoryPath, fileName)
                                 pythonModulesToAddList.append(filePath)
         
                 for pythonModule in pythonModulesToAddList:
                     startPosition = pythonModule.find(baseDirectory) + len(baseDirectory) + 1
                     archiveName = pythonModule[startPosition:]
                     tarFile.add(pythonModule, archiveName)
     tarFile.close()
     if self.verbose:
         print("Created tar archive '%s'." % tarFilePath)
Esempio n. 5
0
def pack_archives(archive_list, q):
    """Pack the files in the archive_list into tar files"""
    for archive_info in archive_list:
        # first element is tarfile path / archive location
        tar_file_path = archive_info[0]
        try:
            os.unlink(tar_file_path)
        except:
            pass
        # create the tar file
        tar_file = TarFile(tar_file_path, mode='w')
        logging.debug(
            ("Created TarFile archive file: {}").format(tar_file_path))

        # second element contains the MigrationFiles for this archive
        migration_paths = archive_info[1]
        # loop over the MigrationFiles in the MigrationArchive
        for mp in migration_paths:
            # don't add if it's a directory - files under the directory will
            # be added
            if not (os.path.isdir(mp[0])):
                tar_file.add(mp[0], arcname=mp[1])
                logging.debug(
                    ("    Adding file to TarFile archive: {}").format(mp[0]))
        tar_file.close()
        # calculate digest (element 2), digest format (element 3)
        # and size (element 4) and add to archive
        archive_info[2] = calculate_digest_adler32(tar_file_path)
        archive_info[3] = "ADLER32"
        archive_info[4] = os.stat(tar_file_path).st_size

    q.put(archive_list)
    def _createScriptExtensionTarArchive(self, sourceDirectory,
                                         scriptExtensionName):
        """ Creates a TAR archive for the given script extension. """

        tarFileName = scriptExtensionName + ".tar"
        tarFilePath = os.path.join(self.__buildConfiguration.distDirectory,
                                   tarFileName)
        tarFile = TarFile(tarFilePath, "w")

        for inputDirectory in ["lib", "src"]:
            baseDirectory = os.path.join(sourceDirectory, inputDirectory)
            if os.path.exists(baseDirectory):
                for packageDirName in os.listdir(baseDirectory):
                    pythonModulesToAddList = list()
                    packageDirectory = os.path.join(baseDirectory,
                                                    packageDirName)
                    if os.path.exists(packageDirectory):
                        for walkTuple in os.walk(packageDirectory):
                            directoryPath = walkTuple[0]
                            fileNameList = walkTuple[2]
                            for fileName in fileNameList:
                                if fileName.endswith(
                                        ".py") or fileName == "SCRIPTS":
                                    filePath = os.path.join(
                                        directoryPath, fileName)
                                    pythonModulesToAddList.append(filePath)

                    for pythonModule in pythonModulesToAddList:
                        startPosition = pythonModule.find(baseDirectory) + len(
                            baseDirectory) + 1
                        archiveName = pythonModule[startPosition:]
                        tarFile.add(pythonModule, archiveName)
        tarFile.close()
        if self.verbose:
            print("Created tar archive '%s'." % tarFilePath)
Esempio n. 7
0
def pack_archive(request_staging_dir, archive, pr):
    """Create a tar file containing the files that are in the
       MigrationArchive object"""

    # if the file exists then delete it!
    try:
        os.unlink(tar_file_path)
    except:
        pass
    # create the tar file
    tar_file = TarFile(tar_file_path, mode='w')
    logging.debug(("Created TarFile archive file: {}").format(tar_file_path))

    # get the MigrationFiles belonging to this archive
    migration_files = archive.migrationfile_set.all()
    # loop over the MigrationFiles in the MigrationArchive
    for mp in migration_paths:
        # don't add if it's a directory - files under the directory will
        # be added
        if not (os.path.isdir(mp[0])):
            tar_file.add(mp[0], arcname=mp[1])
            logging.debug(
                ("    Adding file to TarFile archive: {}").format(mp[0]))

    tar_file.close()

    ### end of parallelisation

    return tar_file_path
Esempio n. 8
0
 def create_archive(self):
     (handle, path) = mkstemp(dir=self.temp_dir)
     os.close(handle)
     archive = TarFile(path, mode='w')
     archive.add(os.path.join(_common.RSRC, 'full.mp3'), 'full.mp3')
     archive.close()
     return path
Esempio n. 9
0
 def create_archive(self):
     (handle, path) = mkstemp(dir=self.temp_dir)
     os.close(handle)
     archive = TarFile(path, mode="w")
     archive.add(os.path.join(_common.RSRC, "full.mp3"), "full.mp3")
     archive.close()
     return path
Esempio n. 10
0
def atomic_contents_add(
    tar_file: tarfile.TarFile,
    origin_path: Path,
    excludes: List[str],
    arcname: str = ".",
) -> None:
    """Append directories and/or files to the TarFile if excludes wont filter."""

    if _is_excluded_by_filter(origin_path, excludes):
        return None

    # Add directory only (recursive=False) to ensure we also archive empty directories
    tar_file.add(origin_path.as_posix(), arcname, recursive=False)

    for directory_item in origin_path.iterdir():
        if _is_excluded_by_filter(directory_item, excludes):
            continue

        arcpath = PurePath(arcname, directory_item.name).as_posix()
        if directory_item.is_dir() and not directory_item.is_symlink():
            atomic_contents_add(tar_file, directory_item, excludes, arcpath)
            continue

        tar_file.add(directory_item.as_posix(), arcname=arcpath)

    return None
Esempio n. 11
0
    def download(self):
        """
        Ein Download wird ausgeführt
        """
        self.init2()  # Basisklasse einrichten

        simulation = self.request.POST.get("simulation", False)

        self._setup_path()
        if simulation:
            self.request.echo("<h1>Download Simulation!</h1><pre>")
            self.request.echo("request path: %s\n" % self.request_path)
            log_typ = "download simulation start"
        else:
            log_typ = "download start"

        self.db.log(log_typ, self.context['request_path'])

        artist = self.request.POST.get("artist", "")
        album = self.request.POST.get("album", "")

        files, _ = self._read_dir()

        args = {"prefix": "PyDown_%s_" % self.request.environ["REMOTE_USER"]}
        if self.request.cfg["temp"]:
            args["dir"] = self.request.cfg["temp"]
        temp = NamedTemporaryFile(**args)

        tar = TarFile(mode="w", fileobj=temp)

        if simulation:
            self.request.write("-" * 80)
            self.request.write("\n")

        for file_info in files:
            filename = file_info[0]
            abs_path = posixpath.join(self.request_path, filename)
            arcname = posixpath.join(artist, album, filename)

            if simulation:
                #~ self.request.write("absolute path..: %s\n" % abs_path)
                self.request.write("<strong>%s</strong>\n" % arcname)

            try:
                tar.add(abs_path, arcname)
            except IOError, e:
                self.request.write(
                    "<h1>Error</h1><h2>Can't create archive: %s</h2>" % e)
                try:
                    tar.close()
                except:
                    pass
                try:
                    temp.close()
                except:
                    pass
                return
Esempio n. 12
0
def add_entry(tar: tarfile.TarFile, name: str, file: str) -> None:
    def reset(tarinfo):
        tarinfo.uid = tarinfo.gid = 0
        tarinfo.uname = tarinfo.gname = "root"
        tarinfo.mtime = 0
        tarinfo.mode = 0o644
        return tarinfo

    tar.add(file, os.path.join(name, os.path.basename(file)), filter=reset)
Esempio n. 13
0
 def reader(self):
     """Package up filesystem contents as a tarball."""
     result = BytesIO()
     tarball = TarFile(fileobj=result, mode="w")
     for child in self.path.children():
         tarball.add(child.path, arcname=child.basename(), recursive=True)
     tarball.close()
     result.seek(0, 0)
     yield result
Esempio n. 14
0
 def reader(self):
     """Package up filesystem contents as a tarball."""
     result = BytesIO()
     tarball = TarFile(fileobj=result, mode="w")
     for child in self.path.children():
         tarball.add(child.path, arcname=child.basename(), recursive=True)
     tarball.close()
     result.seek(0, 0)
     yield result
Esempio n. 15
0
    def download(self):
        """
        Ein Download wird ausgeführt
        """
        self.init2() # Basisklasse einrichten

        simulation = self.request.POST.get("simulation", False)

        self._setup_path()
        if simulation:
            self.request.echo("<h1>Download Simulation!</h1><pre>")
            self.request.echo("request path: %s\n" % self.request_path)
            log_typ = "download simulation start"
        else:
            log_typ = "download start"

        self.db.log(log_typ, self.context['request_path'])

        artist = self.request.POST.get("artist", "")
        album = self.request.POST.get("album", "")

        files, _ = self._read_dir()

        args = {"prefix": "PyDown_%s_" % self.request.environ["REMOTE_USER"]}
        if self.request.cfg["temp"]:
            args["dir"] = self.request.cfg["temp"]
        temp = NamedTemporaryFile(**args)

        tar = TarFile(mode="w", fileobj=temp)

        if simulation:
            self.request.write("-"*80)
            self.request.write("\n")

        for file_info in files:
            filename = file_info[0]
            abs_path = posixpath.join(self.request_path, filename)
            arcname = posixpath.join(artist, album, filename)

            if simulation:
                #~ self.request.write("absolute path..: %s\n" % abs_path)
                self.request.write("<strong>%s</strong>\n" % arcname)

            try:
                tar.add(abs_path, arcname)
            except IOError, e:
                self.request.write("<h1>Error</h1><h2>Can't create archive: %s</h2>" % e)
                try:
                    tar.close()
                except:
                    pass
                try:
                    temp.close()
                except:
                    pass
                return
Esempio n. 16
0
def make_tarball(archive_name, *args):
    tf = TarFile(archive_name, mode='w')

    for filename in args:
        tf.add(filename)
        print(f'{filename} : added')

    tf.close()


# extract from the archive
Esempio n. 17
0
    def _add_file_to_archive(cls, archive: tarfile.TarFile, filepath: str) -> None:
        '''
        Add the file to the archive. When the archive is extracted, the file should exist in a
        directory called "export_data".

        Args:
            archive: The archive object ot add the file to.
            filepath: The path to the file that will be added to the supplied archive.
        '''
        filename = os.path.basename(filepath)
        arcname = os.path.join(cls.DISK_EXPORT_DIR, filename)
        archive.add(filepath, arcname=arcname, recursive=False)
Esempio n. 18
0
def include_tests(archive: TarFile):
    def exclude(name: str):
        return name.endswith('pyc') or '__pycache__' in name

    archive.add(join(__PROJECT_DIR__, 'test.py'), 'tests/test.py')
    print('Processing tests/test.py.', file=sys.stderr)
    for d in 'benchmark interpreter test tests __init__.py'.split():
        archive.add(join(__PROJECT_DIR__, 'ifj2017/{}'.format(d)),
                    'tests/ifj2017/{}'.format(d),
                    exclude=exclude)
        print('Processing {}.'.format('tests/ifj2017/{}'.format(d)),
              file=sys.stderr)
Esempio n. 19
0
def collect_into_archive():
    session = connect.session(connect.cchdo())

    # let's go and get all the directories.
    ftypes = ['Bottle', 'Exchange Bottle', 'Exchange Bottle (Zipped)']
    bot_query = or_(*[Document.FileType == x for x in ftypes])
    doc_types = ['Documentation', 'PDF Documentation']
    bot_doc_query = or_(*[Document.FileType == x for x in ftypes + doc_types])
    expocodes_with_bottle = session.query(distinct(
        Document.ExpoCode)).filter(bot_query).all()
    expocodes_with_bottle = [x[0] for x in expocodes_with_bottle]
    expocodes_with_bottle.remove(None)
    expocodes_with_bottle.remove('NULL')

    tempdir = mkdtemp()
    log.debug(tempdir)

    # Get all required files for the cruises.
    for expocode in expocodes_with_bottle:
        docs = session.query(Document).filter(
            Document.ExpoCode == expocode).filter(bot_doc_query).all()
        cruise_dir = os.path.join(tempdir, _clean_for_filename(expocode))
        os.makedirs(cruise_dir)

        #log.debug(expocode)
        for doc in docs:
            datapath = doc.FileName
            tmppath = os.path.join(cruise_dir, os.path.basename(datapath))

            try:
                shutil.copy(datapath, tmppath)
            except IOError:
                log.warn(u'missing file: {}'.format(datapath))
    session.close()

    #for root, dirs, files in os.walk(path):
    #    for momo in dirs:
    #        os.chown(os.path.join(root, momo), 502, 20)
    #    for momo in files:
    #os.chown(os.path.join(root, momo), 502, 20)

    cwd = os.getcwd()
    os.chdir(tempdir)

    tarball = TarFile(mode='w', fileobj=sys.stdout)
    tarball.add('.')
    tarball.close()

    os.chdir(cwd)

    shutil.rmtree(tempdir)
Esempio n. 20
0
def replace_or_append_file_to_layer(file_to_replace: str,
                                    content_or_path: bytes,
                                    img: tarfile.TarFile):
    # Is content or path?
    if not os.path.exists(content_or_path):

        # Is a content
        t = tarfile.TarInfo(file_to_replace)
        t.size = len(content_or_path)
        img.addfile(t, io.BytesIO(content_or_path))

    else:
        # Is a path
        img.add(content_or_path, file_to_replace)
Esempio n. 21
0
def replace_or_append_file_to_layer(file_to_replace: str,
                                    content_or_path: bytes,
                                    img: tarfile.TarFile):
    # Is content or path?
    if not os.path.exists(content_or_path):

        # Is a content
        t = tarfile.TarInfo(file_to_replace)
        t.size = len(content_or_path)
        img.addfile(t, io.BytesIO(content_or_path))

    else:
        # Is a path
        img.add(content_or_path, file_to_replace)
Esempio n. 22
0
def pull(hostname: str = None):
    home_dir = Path.home().__str__()
    io_stream = io.BytesIO()

    tar = TarFile(fileobj=io_stream, mode="w")
    for file_path in CONFIG.files + CONFIG.host_files.get(hostname, []):
        if isinstance(file_path, str):
            tar.add(name=f"{home_dir}/{file_path}", arcname=file_path)
        elif isinstance(file_path, dict):
            tar.add(name=f"{home_dir}/{file_path['src']}",
                    arcname=file_path['dst'])

    tar.close()
    io_stream.seek(0)
    return Response(io_stream.read1(), mimetype='application/x-tar')
Esempio n. 23
0
def do_svn_backup(tar_output: tarfile.TarFile) -> int:
    # Run svnadmin dump through gzip and use that for the backup.
    result = 0
    handle, filename = tempfile.mkstemp()
    admincall = subprocess.Popen(['svnadmin', 'dump', '/srv/svn/sr', '--deltas'],
                                 stdout=subprocess.PIPE,
                                 stderr=open('/dev/null', 'w'))
    gzipcall = subprocess.Popen(['gzip'], stdin=admincall.stdout, stdout=handle)
    admincall.wait()
    gzipcall.wait()
    if admincall.returncode != 0 or gzipcall.returncode != 0:
        print("SVN dump failed", file=sys.stderr)
        result = 1
    os.close(handle)
    tar_output.add(filename, arcname='svn/db.gz')
    os.unlink(filename)
    return result
Esempio n. 24
0
    def run(self, args, argv):
        # Create a temporary tarball with our whole build context and
        # dockerfile for the update
        tmp = tempfile.NamedTemporaryFile(suffix="dckr.tar.gz")
        tmp_tar = TarFile(fileobj=tmp, mode='w')

        # Add the executable to the tarball, using the current
        # configured binfmt_misc path. If we don't get a path then we
        # only need the support libraries copied
        ff, enabled = _check_binfmt_misc(args.executable)

        if not enabled:
            print("binfmt_misc not enabled, update disabled")
            return 1

        if ff:
            tmp_tar.add(args.executable, arcname=ff)

        # Add any associated libraries
        libs = _get_so_libs(args.executable)
        if libs:
            for l in libs:
                tmp_tar.add(os.path.realpath(l), arcname=l)

        # Create a Docker buildfile
        df = StringIO()
        df.write(u"FROM %s\n" % args.tag)
        df.write(u"ADD . /\n")

        df_bytes = BytesIO(bytes(df.getvalue(), "UTF-8"))

        df_tar = TarInfo(name="Dockerfile")
        df_tar.size = df_bytes.getbuffer().nbytes
        tmp_tar.addfile(df_tar, fileobj=df_bytes)

        tmp_tar.close()

        # reset the file pointers
        tmp.flush()
        tmp.seek(0)

        # Run the build with our tarball context
        dkr = Docker()
        dkr.update_image(args.tag, tmp, quiet=args.quiet)

        return 0
Esempio n. 25
0
    def run(self, args, argv):
        # Create a temporary tarball with our whole build context and
        # dockerfile for the update
        tmp = tempfile.NamedTemporaryFile(suffix="dckr.tar.gz")
        tmp_tar = TarFile(fileobj=tmp, mode='w')

        # Add the executable to the tarball, using the current
        # configured binfmt_misc path. If we don't get a path then we
        # only need the support libraries copied
        ff, enabled = _check_binfmt_misc(args.executable)

        if not enabled:
            print("binfmt_misc not enabled, update disabled")
            return 1

        if ff:
            tmp_tar.add(args.executable, arcname=ff)

        # Add any associated libraries
        libs = _get_so_libs(args.executable)
        if libs:
            for l in libs:
                tmp_tar.add(os.path.realpath(l), arcname=l)

        # Create a Docker buildfile
        df = StringIO()
        df.write("FROM %s\n" % args.tag)
        df.write("ADD . /\n")
        df.seek(0)

        df_tar = TarInfo(name="Dockerfile")
        df_tar.size = len(df.buf)
        tmp_tar.addfile(df_tar, fileobj=df)

        tmp_tar.close()

        # reset the file pointers
        tmp.flush()
        tmp.seek(0)

        # Run the build with our tarball context
        dkr = Docker()
        dkr.update_image(args.tag, tmp, quiet=args.quiet)

        return 0
Esempio n. 26
0
def upload(ssh_client, source_path, dest_path):
    remote_command = "mkdir -p {0}; cd {0}; tar xf - >/dev/null 2>&1"

    transport = ssh_client.get_transport()

    channel = transport.open_channel("session")
    #channel.exec_command("cat > test.tar")
    channel.exec_command(remote_command.format(dest_path))
    #channel.exec_command("cat | ./get_tar.py")

    stream = channel.makefile("wb")

    from tarfile import TarFile
    tar_stream = TarFile(fileobj=stream, mode="w")
    tar_stream.add(source_path)

    tar_stream.close()
    channel.close()
    ssh_client.close()
Esempio n. 27
0
def do_sqlite_backup(comp_name: str, dblocation: str, arcname: str, tar_output: tarfile.TarFile) -> int:
    # Backup contents of a sqlite database. Use sqlite backup command to
    # create a backup first. This essentially copies the db file, but performs
    # all the required lock dancing.
    result = 0
    handle, filename = tempfile.mkstemp()
    backupcall = subprocess.Popen(['sqlite3', dblocation, '.dump'],
                                   stdout=subprocess.PIPE,
                                  stderr=open('/dev/null', 'w'))
    gzipcall = subprocess.Popen(['gzip'], stdin=backupcall.stdout, stdout=handle)
    backupcall.wait()
    gzipcall.wait()
    if backupcall.returncode != 0 or gzipcall.returncode != 0:
        print("{0} DB dump failed".format(comp_name), file=sys.stderr)
        result = 1
    os.close(handle)
    tar_output.add(filename, arcname=arcname + 'sqlite3_dump.gz')
    os.unlink(filename)
    return result
Esempio n. 28
0
    def reader(self, remote_snapshots=None):
        """
        Package up filesystem contents as a tarball.
        """
        result = BytesIO()
        tarball = TarFile(fileobj=result, mode="w")
        for child in self.path.children():
            tarball.add(child.path, arcname=child.basename(), recursive=True)
        tarball.close()

        # You can append anything to the end of a tar stream without corrupting
        # it.  Smuggle some data about the snapshots through here.  This lets
        # tests verify that an incremental stream is really being produced
        # without forcing us to implement actual incremental streams on top of
        # dumb directories.
        if remote_snapshots:
            result.write(u"\nincremental stream based on\n{}".format(
                u"\n".join(snapshot.name
                           for snapshot in remote_snapshots)).encode("ascii"))
        result.seek(0, 0)
        yield result
Esempio n. 29
0
    def run(self, args, argv):
        # Create a temporary tarball with our whole build context and
        # dockerfile for the update
        tmp = tempfile.NamedTemporaryFile(suffix="dckr.tar.gz")
        tmp_tar = TarFile(fileobj=tmp, mode='w')

        # Add the executable to the tarball
        bn = os.path.basename(args.executable)
        ff = "/usr/bin/%s" % bn
        tmp_tar.add(args.executable, arcname=ff)

        # Add any associated libraries
        libs = _get_so_libs(args.executable)
        if libs:
            for l in libs:
                tmp_tar.add(os.path.realpath(l), arcname=l)

        # Create a Docker buildfile
        df = StringIO()
        df.write("FROM %s\n" % args.tag)
        df.write("ADD . /\n")
        df.seek(0)

        df_tar = TarInfo(name="Dockerfile")
        df_tar.size = len(df.buf)
        tmp_tar.addfile(df_tar, fileobj=df)

        tmp_tar.close()

        # reset the file pointers
        tmp.flush()
        tmp.seek(0)

        # Run the build with our tarball context
        dkr = Docker()
        dkr.update_image(args.tag, tmp, quiet=args.quiet)

        return 0
Esempio n. 30
0
    def reader(self, remote_snapshots=None):
        """
        Package up filesystem contents as a tarball.
        """
        result = BytesIO()
        tarball = TarFile(fileobj=result, mode="w")
        for child in self.path.children():
            tarball.add(child.path, arcname=child.basename(), recursive=True)
        tarball.close()

        # You can append anything to the end of a tar stream without corrupting
        # it.  Smuggle some data about the snapshots through here.  This lets
        # tests verify that an incremental stream is really being produced
        # without forcing us to implement actual incremental streams on top of
        # dumb directories.
        if remote_snapshots:
            result.write(
                u"\nincremental stream based on\n{}".format(
                    u"\n".join(snapshot.name for snapshot in remote_snapshots)
                ).encode("ascii")
            )
        result.seek(0, 0)
        yield result
Esempio n. 31
0
    def test_can_put_extracted_file_from_tar(self):
        tempdir = self.make_tempdir()
        tarname = os.path.join(tempdir, 'mytar.tar')
        filename = os.path.join(tempdir, 'foo')

        # Set up a file to add the tarfile.
        with open(filename, 'w') as f:
            f.write('bar')

        # Setup the tar file by adding the file to it.
        # Note there is no context handler for TarFile in python 2.6
        try:
            tar = TarFile(tarname, 'w')
            tar.add(filename, 'foo')
        finally:
            tar.close()

        # See if an extracted file can be uploaded to s3.
        try:
            tar = TarFile(tarname, 'r')
            with closing(tar.extractfile('foo')) as f:
                self.assert_can_put_object(body=f)
        finally:
            tar.close()
Esempio n. 32
0
    def test_can_put_extracted_file_from_tar(self):
        tempdir = self.make_tempdir()
        tarname = os.path.join(tempdir, "mytar.tar")
        filename = os.path.join(tempdir, "foo")

        # Set up a file to add the tarfile.
        with open(filename, "w") as f:
            f.write("bar")

        # Setup the tar file by adding the file to it.
        # Note there is no context handler for TarFile in python 2.6
        try:
            tar = TarFile(tarname, "w")
            tar.add(filename, "foo")
        finally:
            tar.close()

        # See if an extracted file can be uploaded to s3.
        try:
            tar = TarFile(tarname, "r")
            with closing(tar.extractfile("foo")) as f:
                self.assert_can_put_object(body=f)
        finally:
            tar.close()
Esempio n. 33
0
def upload(ssh_client, source_path, dest_path):
    sftp_client = ssh_client.open_sftp()
    sftp_client.mkdir(dest_path)
    with sftp_client.open(dest_path + "/tarextract.py", "w") as outfile:
        outfile.write(tar_extract_script)
    sftp_client.chmod(dest_path + "/tarextract.py", 0o755)

    remote_command = "cd {0}; ./tarextract.py"

    transport = ssh_client.get_transport()

    channel = transport.open_channel("session")

    channel.exec_command(remote_command.format(dest_path))

    stream = channel.makefile("wb")

    from tarfile import TarFile
    tar_stream = TarFile(fileobj=stream, mode="w")
    tar_stream.add(source_path)

    tar_stream.close()
    channel.close()
    ssh_client.close()
Esempio n. 34
0
def do_ide_backup(tar_output: tarfile.TarFile) -> int:
    # Back up user repos: we only want the _master_ copies of everything, not
    # the user checkouts of repos, which I understand are only used for staging
    # changes before being pushed back to master.
    ide_location = config.get('ide', 'location')
    os.chdir(ide_location)
    list_of_dirs = glob.glob('./repos/*/master')

    for dir in list_of_dirs:
        arcname = '/ide' + dir[1:]
        tar_output.add(dir, arcname=arcname, recursive=True)

    # Also back up user settings. This contains team-status data too.
    tar_output.add('settings', arcname='ide/settings', recursive=True)

    # Also the notifications directory: I've no idea what this really is, but
    # it's not large.

    tar_output.add('notifications', arcname='ide/notifications', recursive=True)
    return 0
Esempio n. 35
0
def build_tar(fileobj, mode="w"):
    tarfile = TarFile(mode=mode, fileobj=fileobj)
    tarfile.add(schema_for("data/address.json"))
    tarfile.add(schema_for("data/name.json"))
    tarfile.add(schema_for("data/record.json"))
Esempio n. 36
0
def do_forum_attachments_backup(tar_output: tarfile.TarFile) -> int:
    tsimg_location = config.get('forum_attachments', 'location')
    os.chdir(tsimg_location)
    tar_output.add('.', arcname='forum_attachments', recursive=True)
    return 0
Esempio n. 37
0
def do_ldap_backup(tar_output: tarfile.TarFile) -> int:
    # Produce an ldif of all users and groups. All other ldap objects, such as
    # the organizational units and the Manager entity, are managed by puppet in
    # the future.
    result = 0
    handle, tmpfilename1 = tempfile.mkstemp()
    os.close(handle)
    ret = os.system('ldapsearch -LLL -z 0 -D cn=Manager,o=sr -y /etc/ldap.secret -x -h localhost "(objectClass=posixAccount)" -b ou=users,o=sr > {0}'.format(tmpfilename1))
    if not os.WIFEXITED(ret) or os.WEXITSTATUS(ret) != 0:
        print("Couldn't backup ldap users", file=sys.stderr)
        result = 1

    ret = os.system('ldapsearch -LLL -z 0 -D cn=Manager,o=sr -y /etc/ldap.secret -x -h localhost "(objectClass=posixGroup)" -b ou=groups,o=sr >> {0}'.format(tmpfilename1))
    if not os.WIFEXITED(ret) or os.WEXITSTATUS(ret) != 0:
        print("Couldn't backup ldap groups", file=sys.stderr)
        result = 1

    # Code below procured from ldif parser documentation. Is fed an ldap,
    # reformats a couple of entries to be modifications rather than additions.
    # This is so that some special, puppet-created and configured groups, can be
    # backed up and restored. Without this, adding groups like shell-users
    # during backup restore would be an error.

    make_modify = ["cn=shell-users,ou=groups,o=sr", "cn=mentors,ou=groups,o=sr",
                    "cn=srusers,ou=groups,o=sr", "cn=withdrawn,ou=groups,o=sr",
                    "cn=media-consent,ou=groups,o=sr"]
    remove = ["uid=ide,ou=users,o=sr", "uid=anon,ou=users,o=sr"]

    # This class hooks into processing an ldif
    class MyLDIF(LDIFParser):
        def __init__(self,input,output):
            LDIFParser.__init__(self,input)
            self.writer = LDIFWriter(output)

       # Encode special dn-specific backup logic here.
        def handle(self,dn,entry):
            if dn in make_modify:
                if not 'memberUid' in entry:
                    # No members in this group, discard
                    return

                members = entry['memberUid']
                self.writer.unparse(dn,[(ldap.MOD_REPLACE,'memberUid',members)])
                return
            elif dn in remove:
                return
            elif dn == None:
                return
            else:
                self.writer.unparse(dn,entry)

    # Open the ldif generated before, dump it into another tmpe file with
    # relevant modification.
    handle, tmpfilename2 = tempfile.mkstemp()
    os.close(handle)
    infile = open(tmpfilename1, 'r')
    outfile = open(tmpfilename2, 'w')
    parser = MyLDIF(infile, outfile)
    parser.parse()
    infile.close()
    outfile.close()

    tar_output.add(tmpfilename2, arcname="ldap/ldap_backup")

    os.unlink(tmpfilename1)
    os.unlink(tmpfilename2)
    return result
Esempio n. 38
0
#!/usr/bin/env python

import logging
logging.basicConfig(filename="ssh_upload.log")
log = logging.getLogger("paramiko")
log.setLevel(logging.DEBUG)

import paramiko
client = paramiko.SSHClient()
client.load_system_host_keys()
client.set_missing_host_key_policy(paramiko.AutoAddPolicy())

client.connect('192.168.56.101', username='******', password="******")

transport = client.get_transport()
channel = transport.open_channel("session")
#channel.exec_command("cat > test.tar")
channel.exec_command("tar xf - 2>/dev/null")
#channel.exec_command("cat | ./get_tar.py")
stream = channel.makefile("wb")

from tarfile import TarFile
tar_stream = TarFile(fileobj=stream, mode="w")
tar_stream.add("test")

tar_stream.close()
channel.close()
client.close()
Esempio n. 39
0
    def run(self, args, argv):
        # Create a temporary tarball with our whole build context and
        # dockerfile for the update
        tmp = tempfile.NamedTemporaryFile(suffix="dckr.tar.gz")
        tmp_tar = TarFile(fileobj=tmp, mode='w')

        # Create a Docker buildfile
        df = StringIO()
        df.write(u"FROM %s\n" % args.tag)

        if args.executable:
            # Add the executable to the tarball, using the current
            # configured binfmt_misc path. If we don't get a path then we
            # only need the support libraries copied
            ff, enabled = _check_binfmt_misc(args.executable)

            if not enabled:
                print("binfmt_misc not enabled, update disabled")
                return 1

            if ff:
                tmp_tar.add(args.executable, arcname=ff)

            # Add any associated libraries
            libs = _get_so_libs(args.executable)
            if libs:
                for l in libs:
                    so_path = os.path.dirname(l)
                    name = os.path.basename(l)
                    real_l = os.path.realpath(l)
                    try:
                        tmp_tar.add(real_l, arcname="%s/%s" % (so_path, name))
                    except FileNotFoundError:
                        print("Couldn't add %s/%s to archive" %
                              (so_path, name))
                        pass

            df.write(u"ADD . /\n")

        if args.user:
            uid = os.getuid()
            uname = getpwuid(uid).pw_name
            df.write("\n")
            df.write("RUN id %s 2>/dev/null || useradd -u %d -U %s" %
                     (uname, uid, uname))

        df_bytes = BytesIO(bytes(df.getvalue(), "UTF-8"))

        df_tar = TarInfo(name="Dockerfile")
        df_tar.size = df_bytes.getbuffer().nbytes
        tmp_tar.addfile(df_tar, fileobj=df_bytes)

        tmp_tar.close()

        # reset the file pointers
        tmp.flush()
        tmp.seek(0)

        # Run the build with our tarball context
        dkr = Docker()
        dkr.update_image(args.tag, tmp, quiet=args.quiet)

        return 0