def unpack_and_install(archive, files, destination='/usr/local/bin'): '''Untar the archive and copy the services to the destination directory.''' files_directory = os.path.join(hookenv.charm_dir(), 'files') hookenv.log('Extracting {0} to {1}'.format(archive, files_directory)) # Extract the archive to the files directory. extract(archive, files_directory) # Copy each of the files to the destination directory. if files and isinstance(files, list): for file_name in files: source = os.path.join(files_directory, file_name) hookenv.log('Copying {0} to {1}'.format(source, destination)) shutil.copy2(source, destination)
def configure_examples(self): """ Install sparkpi.sh and sample data to /home/ubuntu. The sparkpi.sh script demonstrates spark-submit with the SparkPi class included with Spark. This small script is packed into the spark charm source in the ./scripts subdirectory. The sample data is used for benchmarks (only PageRank for now). This may grow quite large in the future, so we utilize Juju Resources for getting this data onto the unit. Sample data originated as follows: - PageRank: https://snap.stanford.edu/data/web-Google.html """ # Handle sparkpi.sh script_source = 'scripts/sparkpi.sh' script_path = Path(script_source) if script_path.exists(): script_target = '/home/ubuntu/sparkpi.sh' new_hash = host.file_hash(script_source) old_hash = unitdata.kv().get('sparkpi.hash') if new_hash != old_hash: hookenv.log('Installing SparkPi script') script_path.copy(script_target) Path(script_target).chmod(0o755) Path(script_target).chown('ubuntu', 'hadoop') unitdata.kv().set('sparkpi.hash', new_hash) hookenv.log('SparkPi script was installed successfully') # Handle sample data sample_source = hookenv.resource_get('sample-data') sample_path = sample_source and Path(sample_source) if sample_path and sample_path.exists() and sample_path.stat().st_size: sample_target = '/home/ubuntu' new_hash = host.file_hash(sample_source) old_hash = unitdata.kv().get('sample-data.hash') if new_hash != old_hash: hookenv.log('Extracting Spark sample data') # Extract the sample data; since sample data does not impact # functionality, log any extraction error but don't fail. try: archive.extract(sample_path, destpath=sample_target) except Exception: hookenv.log( 'Unable to extract Spark sample data: {}'.format( sample_path)) else: unitdata.kv().set('sample-data.hash', new_hash) hookenv.log('Spark sample data was extracted successfully')
def configure_examples(self): """ Install sparkpi.sh and sample data to /home/ubuntu. The sparkpi.sh script demonstrates spark-submit with the SparkPi class included with Spark. This small script is packed into the spark charm source in the ./scripts subdirectory. The sample data is used for benchmarks (only PageRank for now). This may grow quite large in the future, so we utilize Juju Resources for getting this data onto the unit. Sample data originated as follows: - PageRank: https://snap.stanford.edu/data/web-Google.html """ # Handle sparkpi.sh script_source = 'scripts/sparkpi.sh' script_path = Path(script_source) if script_path.exists(): script_target = '/home/ubuntu/sparkpi.sh' new_hash = host.file_hash(script_source) old_hash = unitdata.kv().get('sparkpi.hash') if new_hash != old_hash: hookenv.log('Installing SparkPi script') script_path.copy(script_target) Path(script_target).chmod(0o755) Path(script_target).chown('ubuntu', 'hadoop') unitdata.kv().set('sparkpi.hash', new_hash) hookenv.log('SparkPi script was installed successfully') # Handle sample data sample_source = hookenv.resource_get('sample-data') sample_path = sample_source and Path(sample_source) if sample_path and sample_path.exists() and sample_path.stat().st_size: sample_target = '/home/ubuntu' new_hash = host.file_hash(sample_source) old_hash = unitdata.kv().get('sample-data.hash') if new_hash != old_hash: hookenv.log('Extracting Spark sample data') # Extract the sample data; since sample data does not impact # functionality, log any extraction error but don't fail. try: archive.extract(sample_path, destpath=sample_target) except Exception: hookenv.log('Unable to extract Spark sample data: {}' .format(sample_path)) else: unitdata.kv().set('sample-data.hash', new_hash) hookenv.log('Spark sample data was extracted successfully')
def install(self, source, dest=None, checksum=None, hash_type='sha1'): """ Download and install an archive file, with optional checksum validation. The checksum can also be given on the :param:`source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. :param str dest: Local destination path to install to. If not given, installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. :param str hash_type: Algorithm used to generate :param:`checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except urllib2.URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) options = urlparse.parse_qs(url_parts.fragment) for key, value in options.items(): if key in hashlib.algorithms: check_hash(dld_file, value, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest)
def test_extracts_default_dest(self, _defdest, _gethandler, _mkdir): expected_dest = "bar" archive_name = "foo" _defdest.return_value = expected_dest handler = MagicMock() handler.return_value = expected_dest _gethandler.return_value = handler dest = archive.extract(archive_name) self.assertEqual(expected_dest, dest) handler.assert_called_with(archive_name, expected_dest)
def test_extracts(self, _defdest, _gethandler, _mkdir): archive_name = "foo" archive_handler = MagicMock() _gethandler.return_value = archive_handler dest = archive.extract(archive_name, "bar") _gethandler.assert_called_with(archive_name) archive_handler.assert_called_with(archive_name, "bar") _defdest.assert_not_called() _mkdir.assert_called_with("bar") self.assertEqual(dest, "bar")
def install(self, source): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except urllib2.URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) return extract(dld_file)
def install_from_archive_local(archive, dest): dlog("Trying to install from archive") try: ddir = extract(archive) if os.path.exists(dest): dlog("Removing existing directory at {}".format(dest)) shutil.rmtree(dest) src = os.path.join(ddir, "src", "cinder", "volume", "drivers", "datera") dlog("Copying tree. src [{}] dst [{}]".format(src, dest)) shutil.copytree(src, dest) except Exception as e: raise DateraException( "Could not install from archive url: {}".format(e))
def install(self, source): url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except urllib2.URLError as e: return UnhandledSource(e.reason) except OSError as e: return UnhandledSource(e.strerror) finally: if os.path.isfile(dld_file): os.unlink(dld_file) return extract(dld_file)
def install(self, source, dest=None, checksum=None, hash_type='sha1'): """ Download and install an archive file, with optional checksum validation. The checksum can also be given on the `source` URL's fragment. For example:: handler.install('http://example.com/file.tgz#sha1=deadbeef') :param str source: URL pointing to an archive file. :param str dest: Local destination path to install to. If not given, installs to `$CHARM_DIR/archives/archive_file_name`. :param str checksum: If given, validate the archive file after download. :param str hash_type: Algorithm used to generate `checksum`. Can be any hash alrgorithm supported by :mod:`hashlib`, such as md5, sha1, sha256, sha512, etc. """ url_parts = self.parse_url(source) dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched') if not os.path.exists(dest_dir): mkdir(dest_dir, perms=0o755) dld_file = os.path.join(dest_dir, os.path.basename(url_parts.path)) try: self.download(source, dld_file) except URLError as e: raise UnhandledSource(e.reason) except OSError as e: raise UnhandledSource(e.strerror) options = parse_qs(url_parts.fragment) for key, value in options.items(): if not six.PY3: algorithms = hashlib.algorithms else: algorithms = hashlib.algorithms_available if key in algorithms: if len(value) != 1: raise TypeError("Expected 1 hash value, not %d" % len(value)) expected = value[0] check_hash(dld_file, expected, key) if checksum: check_hash(dld_file, checksum, hash_type) return extract(dld_file, dest)