Example #1
0
    def create_bundle(self, path_prefix):
        # Fill out all the relevant info needed for a tarball
        tarinfo = tarfile.TarInfo(self.args['prefix'])
        tarinfo.size = self.args['image_size']

        # The pipeline begins with self.args['image'] feeding a bundling pipe
        # segment through a progress meter, which has to happen on the main
        # thread, so we add that to the pipeline last.

        # meter --(bytes)--> bundler
        bundle_in_r, bundle_in_w = euca2ools.bundle.util.open_pipe_fileobjs()
        partwriter_in_r, partwriter_in_w = \
            euca2ools.bundle.util.open_pipe_fileobjs()
        digest_result_mpconn = create_bundle_pipeline(bundle_in_r,
                                                      partwriter_in_w,
                                                      self.args['enc_key'],
                                                      self.args['enc_iv'],
                                                      tarinfo,
                                                      debug=self.debug)
        bundle_in_r.close()
        partwriter_in_w.close()

        # bundler --(bytes)-> part writer
        bundle_partinfo_mpconn = create_bundle_part_writer(
            partwriter_in_r,
            path_prefix,
            self.args['part_size'],
            debug=self.debug)
        partwriter_in_r.close()

        # part writer --(part info)-> part info aggregator
        # (needed for building the manifest)
        bundle_partinfo_aggr_mpconn = create_mpconn_aggregator(
            bundle_partinfo_mpconn, debug=self.debug)
        bundle_partinfo_mpconn.close()

        # disk --(bytes)-> bundler
        # (synchronous)
        label = self.args.get('progressbar_label') or 'Bundling image'
        pbar = self.get_progressbar(label=label,
                                    maxval=self.args['image_size'])
        with self.args['image'] as image:
            try:
                read_size = copy_with_progressbar(image,
                                                  bundle_in_w,
                                                  progressbar=pbar)
            except ValueError:
                self.log.debug('error from copy_with_progressbar',
                               exc_info=True)
                raise RuntimeError('corrupt bundle: input size was larger '
                                   'than expected image size of {0}'.format(
                                       self.args['image_size']))
        bundle_in_w.close()
        if read_size != self.args['image_size']:
            raise RuntimeError('corrupt bundle: input size did not match '
                               'expected image size  (expected size: {0}, '
                               'read: {1})'.format(self.args['image_size'],
                                                   read_size))

        # All done; now grab info about the bundle we just created
        try:
            digest = digest_result_mpconn.recv()
            partinfo = bundle_partinfo_aggr_mpconn.recv()
        except EOFError:
            self.log.debug('EOFError from reading bundle info', exc_info=True)
            raise RuntimeError(
                'corrupt bundle: bundle process was interrupted')
        finally:
            digest_result_mpconn.close()
            bundle_partinfo_aggr_mpconn.close()
        self.log.info('%i bundle parts written to %s', len(partinfo),
                      os.path.dirname(path_prefix))
        self.log.debug('bundle digest: %s', digest)
        return digest, partinfo
Example #2
0
    def create_and_upload_bundle(self, path_prefix, key_prefix):
        part_write_sem = multiprocessing.Semaphore(
            max(1, self.args['max_pending_parts']))

        # Fill out all the relevant info needed for a tarball
        tarinfo = tarfile.TarInfo(self.args['prefix'])
        tarinfo.size = self.args['image_size']

        # disk --(bytes)-> bundler
        partwriter_in_r, partwriter_in_w = \
            euca2ools.bundle.util.open_pipe_fileobjs()
        digest_result_mpconn = create_bundle_pipeline(self.args['image'],
                                                      partwriter_in_w,
                                                      self.args['enc_key'],
                                                      self.args['enc_iv'],
                                                      tarinfo,
                                                      debug=self.debug)
        partwriter_in_w.close()

        # bundler --(bytes)-> part writer
        bundle_partinfo_mpconn = create_bundle_part_writer(
            partwriter_in_r,
            path_prefix,
            self.args['part_size'],
            part_write_sem=part_write_sem,
            debug=self.debug)
        partwriter_in_r.close()

        # part writer --(part info)-> part uploader
        # This must be driven on the main thread since it has a progress bar,
        # so for now we'll just set up its output pipe so we can attach it to
        # the remainder of the pipeline.
        uploaded_partinfo_mpconn_r, uploaded_partinfo_mpconn_w = \
            multiprocessing.Pipe(duplex=False)

        # part uploader --(part info)-> part deleter
        if not self.args.get('preserve_bundle', False):
            deleted_partinfo_mpconn_r, deleted_partinfo_mpconn_w = \
                multiprocessing.Pipe(duplex=False)
            create_bundle_part_deleter(uploaded_partinfo_mpconn_r,
                                       out_mpconn=deleted_partinfo_mpconn_w)
            uploaded_partinfo_mpconn_r.close()
            deleted_partinfo_mpconn_w.close()
        else:
            # Bypass this stage
            deleted_partinfo_mpconn_r = uploaded_partinfo_mpconn_r

        # part deleter --(part info)-> part info aggregator
        # (needed for building the manifest)
        bundle_partinfo_aggregate_mpconn = create_mpconn_aggregator(
            deleted_partinfo_mpconn_r, debug=self.debug)
        deleted_partinfo_mpconn_r.close()

        # Now drive the pipeline by uploading parts.
        try:
            self.upload_bundle_parts(
                bundle_partinfo_mpconn,
                key_prefix,
                partinfo_out_mpconn=uploaded_partinfo_mpconn_w,
                part_write_sem=part_write_sem,
                show_progress=self.args.get('show_progress'))
        finally:
            # Make sure the writer gets a chance to exit
            part_write_sem.release()

        # All done; now grab info about the bundle we just created
        try:
            digest = digest_result_mpconn.recv()
            partinfo = bundle_partinfo_aggregate_mpconn.recv()
        except EOFError:
            self.log.debug('EOFError from reading bundle info', exc_info=True)
            raise RuntimeError(
                'corrupt bundle: bundle process was interrupted')
        finally:
            digest_result_mpconn.close()
            bundle_partinfo_aggregate_mpconn.close()
        self.log.info('%i bundle parts uploaded to %s', len(partinfo),
                      self.args['bucket'])
        self.log.debug('bundle digest: %s', digest)
        return digest, partinfo
Example #3
0
    def create_bundle(self, path_prefix):
        # Fill out all the relevant info needed for a tarball
        tarinfo = tarfile.TarInfo(self.args['prefix'])
        tarinfo.size = self.args['image_size']

        # The pipeline begins with self.args['image'] feeding a bundling pipe
        # segment through a progress meter, which has to happen on the main
        # thread, so we add that to the pipeline last.

        # meter --(bytes)--> bundler
        bundle_in_r, bundle_in_w = euca2ools.bundle.util.open_pipe_fileobjs()
        partwriter_in_r, partwriter_in_w = \
            euca2ools.bundle.util.open_pipe_fileobjs()
        digest_result_mpconn = create_bundle_pipeline(
            bundle_in_r, partwriter_in_w, self.args['enc_key'],
            self.args['enc_iv'], tarinfo, debug=self.debug)
        bundle_in_r.close()
        partwriter_in_w.close()

        # bundler --(bytes)-> part writer
        bundle_partinfo_mpconn = create_bundle_part_writer(
            partwriter_in_r, path_prefix, self.args['part_size'],
            debug=self.debug)
        partwriter_in_r.close()

        # part writer --(part info)-> part info aggregator
        # (needed for building the manifest)
        bundle_partinfo_aggr_mpconn = create_mpconn_aggregator(
            bundle_partinfo_mpconn, debug=self.debug)
        bundle_partinfo_mpconn.close()

        # disk --(bytes)-> bundler
        # (synchronous)
        label = self.args.get('progressbar_label') or 'Bundling image'
        pbar = self.get_progressbar(label=label,
                                    maxval=self.args['image_size'])
        with self.args['image'] as image:
            try:
                read_size = copy_with_progressbar(image, bundle_in_w,
                                                  progressbar=pbar)
            except ValueError:
                self.log.debug('error from copy_with_progressbar',
                               exc_info=True)
                raise RuntimeError('corrupt bundle: input size was larger '
                                   'than expected image size of {0}'
                                   .format(self.args['image_size']))
        bundle_in_w.close()
        if read_size != self.args['image_size']:
            raise RuntimeError('corrupt bundle: input size did not match '
                               'expected image size  (expected size: {0}, '
                               'read: {1})'
                               .format(self.args['image_size'], read_size))

        # All done; now grab info about the bundle we just created
        try:
            digest = digest_result_mpconn.recv()
            partinfo = bundle_partinfo_aggr_mpconn.recv()
        except EOFError:
            self.log.debug('EOFError from reading bundle info', exc_info=True)
            raise RuntimeError(
                'corrupt bundle: bundle process was interrupted')
        finally:
            digest_result_mpconn.close()
            bundle_partinfo_aggr_mpconn.close()
        self.log.info('%i bundle parts written to %s', len(partinfo),
                      os.path.dirname(path_prefix))
        self.log.debug('bundle digest: %s', digest)
        return digest, partinfo
    def create_and_upload_bundle(self, path_prefix, key_prefix):
        part_write_sem = multiprocessing.Semaphore(
            max(1, self.args['max_pending_parts']))

        # Fill out all the relevant info needed for a tarball
        tarinfo = tarfile.TarInfo(self.args['prefix'])
        tarinfo.size = self.args['image_size']

        # disk --(bytes)-> bundler
        partwriter_in_r, partwriter_in_w = \
            euca2ools.bundle.util.open_pipe_fileobjs()
        digest_result_mpconn = create_bundle_pipeline(
            self.args['image'], partwriter_in_w, self.args['enc_key'],
            self.args['enc_iv'], tarinfo, debug=self.debug)
        partwriter_in_w.close()

        # bundler --(bytes)-> part writer
        bundle_partinfo_mpconn = create_bundle_part_writer(
            partwriter_in_r, path_prefix, self.args['part_size'],
            part_write_sem=part_write_sem, debug=self.debug)
        partwriter_in_r.close()

        # part writer --(part info)-> part uploader
        # This must be driven on the main thread since it has a progress bar,
        # so for now we'll just set up its output pipe so we can attach it to
        # the remainder of the pipeline.
        uploaded_partinfo_mpconn_r, uploaded_partinfo_mpconn_w = \
            multiprocessing.Pipe(duplex=False)

        # part uploader --(part info)-> part deleter
        if not self.args.get('preserve_bundle', False):
            deleted_partinfo_mpconn_r, deleted_partinfo_mpconn_w = \
                multiprocessing.Pipe(duplex=False)
            create_bundle_part_deleter(uploaded_partinfo_mpconn_r,
                                       out_mpconn=deleted_partinfo_mpconn_w)
            uploaded_partinfo_mpconn_r.close()
            deleted_partinfo_mpconn_w.close()
        else:
            # Bypass this stage
            deleted_partinfo_mpconn_r = uploaded_partinfo_mpconn_r

        # part deleter --(part info)-> part info aggregator
        # (needed for building the manifest)
        bundle_partinfo_aggregate_mpconn = create_mpconn_aggregator(
            deleted_partinfo_mpconn_r, debug=self.debug)
        deleted_partinfo_mpconn_r.close()

        # Now drive the pipeline by uploading parts.
        try:
            self.upload_bundle_parts(
                bundle_partinfo_mpconn, key_prefix,
                partinfo_out_mpconn=uploaded_partinfo_mpconn_w,
                part_write_sem=part_write_sem,
                show_progress=self.args.get('show_progress'))
        finally:
            # Make sure the writer gets a chance to exit
            part_write_sem.release()

        # All done; now grab info about the bundle we just created
        try:
            digest = digest_result_mpconn.recv()
            partinfo = bundle_partinfo_aggregate_mpconn.recv()
        except EOFError:
            self.log.debug('EOFError from reading bundle info', exc_info=True)
            raise RuntimeError(
                'corrupt bundle: bundle process was interrupted')
        finally:
            digest_result_mpconn.close()
            bundle_partinfo_aggregate_mpconn.close()
        self.log.info('%i bundle parts uploaded to %s', len(partinfo),
                      self.args['bucket'])
        self.log.debug('bundle digest: %s', digest)
        return digest, partinfo