def name_find(name_list): con = None try: parent_directory = os.path.split(os.path.dirname(__file__))[0] db_filepath = os.path.join(parent_directory, "glyphlist", "unicode.db") con = sqlite3.connect(db_filepath, isolation_level=None) cur = con.cursor() for needle in name_list: modified_needle = "%" + needle + "%" cur.execute( "SELECT unicode, unishortname, unilongname FROM Unicodes WHERE unilongname LIKE ?", (modified_needle, )) result_list = cur.fetchall() if len(result_list) > 0: for result in result_list: stdout("'" + needle + "' ==> " + result[0] + " '" + result[2] + "'") else: stderr("[X] " + needle) except Exception as e: stderr("[font-unicode] Error: " + str(e), exit=1) finally: if con: con.close()
def pull_archive_runner(archive_url_dict): file_list = list(archive_url_dict) # the local outfile names in a list number_of_files = len(file_list) # the number of files included in the list if number_of_files > 0: if number_of_files > 1: # multiple binary file pull, each in separate process stdout("[*] doxx: Hang in there. Pulling " + str(number_of_files) + " entire file archives. This may take a bit of time...") processes = [] # list of spawned processes outputlock = Lock() # stdout / stderr writes lock iolock = Lock() # input/output lock # iterate through requested files and execute pull in separate process for each one for file_path in file_list: p = Process(target=_pull_archive_multiprocess, args=(file_path, archive_url_dict[file_path], outputlock, iolock)) p.start() processes.append(p) for process in processes: process.join(timeout=120) else: # single text file pull stdout("[*] doxx: Hang in there. Pulling the entire file archive. This may take a bit of time...") file_path = file_list[0] _pull_archive(file_path, archive_url_dict[file_path]) # file_path is local path for write, dictionary value is the URL else: stderr("[!] doxx: Unable to find archive files to pull in the key file", exit=0)
def _pull_archive_multiprocess(file_path, url, outputlock, iolock): """executes multi-process archive file pulls with I/O and stdout/stderr locks (private function)""" # create OS dependent file path (if necessary) file_path = _make_os_dependent_path(file_path) # make directory structure if necessary for the file path if os.path.dirname(file_path) is not "": iolock.acquire() _create_dirs(file_path) iolock.release() # pull the file and write to local filesystem try: pull_archive_file(url, file_path) except Exception as e: outputlock.acquire() stderr("[!] doxx: Unable to pull the archive file from the URL '" + url + "'. Error: " + str(e), exit=1) outputlock.release() if file_exists(file_path): root_dir = unpack_run(file_path) remove(file_path) outputlock.acquire() stdout("[+] doxx: '" + root_dir + "' ...check!") outputlock.release() else: outputlock.acquire() stderr("[!] doxx: There was an error pulling the repository file. Error: Unable to locate local archive file.", exit=1) outputlock.release()
def unicode_find(needle_list): con = None try: parent_directory = os.path.split(os.path.dirname(__file__))[0] db_filepath = os.path.join(parent_directory, "glyphlist", "unicode.db") con = sqlite3.connect(db_filepath, isolation_level=None) cur = con.cursor() for needle in needle_list: cur.execute( "SELECT unicode, unishortname, unilongname FROM Unicodes WHERE unicode LIKE ?", (needle, )) result = cur.fetchone() if result is not None: if len(result[1]) > 0: result_string = result[0] + "\t" + result[ 1] + " '" + result[2] + "'" else: result_string = result[0] + "\t'" + result[2] + "'" stdout(result_string) else: stderr(needle + "\t" + "NOT FOUND") except Exception as e: stderr("[font-unicode]: Error: " + str(e), exit=1) finally: if con: con.close()
def make_template(self, outpath): try: fw = FileWriter(outpath) fw.write(template_stub) if file_exists(outpath): stdout("[+] doxx: The template stub '" + outpath + "' is now available in the current directory.") except Exception as e: stderr("[!] doxx: Unable to write the template stub to disk. Error: " + str(e), exit=1)
def run_whatis(package_name): cache = DoxxCache() max_cache_seconds = 86400 # 24 hour file cache (same as search module) stdout("[*] doxx: Looking up the package description...") if cache.cached_file_exists('packages.json'): # does the cached file already exist if cache.does_cache_file_require_update(cache.package_repo_json_file, max_cache_seconds): # if so, has the cache time elapsed? master_descriptions = _pull_official_repository_descriptions() # pull new file cache.cache_packagerepo_json(master_descriptions) # push it into the file cache else: master_descriptions = cache.get_cached_packagerepo_json() # get the cached file if there is no need to pull a new version else: master_descriptions = _pull_official_repository_descriptions() # doesn't exist, go get it cache.cache_packagerepo_json(master_descriptions) # try to cache it if len(master_descriptions) > 0: # dictionary from JSON that contains the package descriptions by package name key descriptions_dict = json.loads(master_descriptions) test_package_name = package_name.lower().strip() if test_package_name in descriptions_dict: stdout(" ") stdout(" Package: " + test_package_name) stdout("Description: " + descriptions_dict[test_package_name]) else: stderr("[!] doxx: Unable to locate the package '" + test_package_name + "'", exit=1) else: stderr("[!] doxx: Unable to read the descriptions file. It appears to be empty...", exit=1)
def _pull_binaryfile(file_path, url): """executes single process binary file pulls (private function)""" # create OS dependent file path (if necessary) file_path = _make_os_dependent_path(file_path) # make directory structure if necessary for the file path if os.path.dirname(file_path) is not "": _create_dirs(file_path) # pull the file and write to local filesystem try: pull_binary_file(url, file_path) except Exception as e: stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0) if file_exists(file_path): stdout("[+] doxx: '" + file_path + "' ...check!") else: stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file.", exit=1)
def encrypt_file(self, inpath, force_nocompress=False, force_compress=False, armored=False, checksum=False): """public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation""" if armored: if force_compress: command_stub = self.command_maxcompress_armored elif force_nocompress: command_stub = self.command_nocompress_armored else: if self._is_compress_filetype(inpath): command_stub = self.command_default_armored else: command_stub = self.command_nocompress_armored else: if force_compress: command_stub = self.command_maxcompress elif force_nocompress: command_stub = self.command_nocompress else: if self._is_compress_filetype(inpath): command_stub = self.command_default else: command_stub = self.command_nocompress encrypted_outpath = self._create_outfilepath(inpath) system_command = command_stub + encrypted_outpath + " --passphrase " + quote( self.passphrase) + " --symmetric " + quote(inpath) try: response = muterun(system_command) # check returned status code if response.exitcode == 0: stdout(encrypted_outpath + " was generated from " + inpath) if checksum: # add a SHA256 hash digest of the encrypted file - requested by user --hash flag in command from crypto.library import hash encrypted_file_hash = hash.generate_hash(encrypted_outpath) if len(encrypted_file_hash) == 64: stdout("SHA256 hash digest for " + encrypted_outpath + " :") stdout(encrypted_file_hash) else: stdout( "Unable to generate a SHA256 hash digest for the file " + encrypted_outpath) else: stderr(response.stderr, 0) stderr("Encryption failed") sys.exit(1) except Exception as e: stderr( "There was a problem with the execution of gpg. Encryption failed. Error: [" + str(e) + "]") sys.exit(1)
def browse_docs(query): """browse doxx documentation and associated websites by query term in default web browser""" normalized_query = query.lower() available_queries = docs_dict.keys() if normalized_query in available_queries: webbrowser.open(docs_dict[normalized_query]) if normalized_query in docs_message_dict.keys(): stdout("[*] doxx: Opening the " + docs_message_dict[normalized_query]) else: new_query = sounds_like(normalized_query) # attempt to match using other common terms if new_query in available_queries: webbrowser.open(docs_dict[new_query]) # open the new query term that resulted from the sounds_like function if new_query in docs_message_dict.keys(): stdout("[*] doxx: Opening the " + docs_message_dict[new_query]) else: stderr("[!] doxx: Unable to find a page for your query. The available queries are:", exit=0) stderr(" ", exit=0) for available_query in sorted(available_queries): stderr(available_query, exit=0)
def run_pullkey(package_name): normalized_package_name = package_name.lower().strip() package = OfficialPackage() key_file_url = package.get_package_key_url(normalized_package_name) try: stdout("[*] doxx: Pulling the remote key file...") http = HTTP(key_file_url) if http.get_status_ok(): key_file_text = http.res.text fr = FileWriter('key.yaml') try: fr.write(key_file_text) except Exception as e: stderr("[!] doxx: Unable to write the 'key.yaml' file to disk. Error: " + str(e), exit=1) stdout("[*] doxx: Key file pull complete") elif http.res.status_code == 404: stderr("[!] doxx: Unable to pull the key file because the requested package could not be found. (HTTP status code: 404)", exit=1) else: stderr("[!] doxx: Unable to pull the key file. (HTTP status code: " + str(http.res.status_code) + ")", exit=1) except Exception as e: stderr("[!] doxx: Unable to pull the key file. Error: " + str(e), exit=1)
def _pull_binaryfile_multiprocess(file_path, url, outputlock, iolock): # create OS dependent file path (if necessary) file_path = _make_os_dependent_path(file_path) # make directory structure if necessary for the file path if os.path.dirname(file_path) is not "": iolock.acquire() _create_dirs(file_path) iolock.release() # pull the file and write to local filesystem try: pull_binary_file(url, file_path) except Exception as e: outputlock.acquire() stderr("[!] doxx: Unable to pull '" + file_path + "' from '" + url + "'. Error: " + str(e), exit=0) outputlock.release() if file_exists(file_path): outputlock.acquire() stdout("[+] doxx: '" + file_path + "' ...check!") outputlock.release() else: outputlock.acquire() stderr("[!] doxx: There was an error pulling '" + file_path + "'. Error: Unable to locate local file", exit=0) outputlock.release()
def name_find(name_list): con = None try: parent_directory = os.path.split(os.path.dirname(__file__))[0] db_filepath = os.path.join(parent_directory, "glyphlist", "unicode.db") con = sqlite3.connect(db_filepath, isolation_level=None) cur = con.cursor() for needle in name_list: modified_needle = "%" + needle + "%" cur.execute("SELECT unicode, unishortname, unilongname FROM Unicodes WHERE unilongname LIKE ?", (modified_needle,)) result_list = cur.fetchall() if len(result_list) > 0: for result in result_list: stdout("'" + needle + "' ==> " + result[0] + " '" + result[2] + "'") else: stderr("[X] " + needle) except Exception as e: stderr("[font-unicode] Error: " + str(e), exit=1) finally: if con: con.close()
def encrypt_file(self, inpath, force_nocompress=False, force_compress=False, armored=False, checksum=False): """public method for single file encryption with optional compression, ASCII armored formatting, and file hash digest generation""" if armored: if force_compress: command_stub = self.command_maxcompress_armored elif force_nocompress: command_stub = self.command_nocompress_armored else: if self._is_compress_filetype(inpath): command_stub = self.command_default_armored else: command_stub = self.command_nocompress_armored else: if force_compress: command_stub = self.command_maxcompress elif force_nocompress: command_stub = self.command_nocompress else: if self._is_compress_filetype(inpath): command_stub = self.command_default else: command_stub = self.command_nocompress encrypted_outpath = self._create_outfilepath(inpath) system_command = command_stub + encrypted_outpath + " --passphrase " + quote(self.passphrase) + " --symmetric " + quote(inpath) try: response = muterun(system_command) # check returned status code if response.exitcode == 0: stdout(encrypted_outpath + " was generated from " + inpath) if checksum: # add a SHA256 hash digest of the encrypted file - requested by user --hash flag in command from crypto.library import hash encrypted_file_hash = hash.generate_hash(encrypted_outpath) if len(encrypted_file_hash) == 64: stdout("SHA256 hash digest for " + encrypted_outpath + " :") stdout(encrypted_file_hash) else: stdout("Unable to generate a SHA256 hash digest for the file " + encrypted_outpath) else: stderr(response.stderr, 0) stderr("Encryption failed") sys.exit(1) except Exception as e: stderr("There was a problem with the execution of gpg. Encryption failed. Error: [" + str(e) + "]") sys.exit(1)
def unicode_find(needle_list): con = None try: parent_directory = os.path.split(os.path.dirname(__file__))[0] db_filepath = os.path.join(parent_directory, "glyphlist", "unicode.db") con = sqlite3.connect(db_filepath, isolation_level=None) cur = con.cursor() for needle in needle_list: cur.execute("SELECT unicode, unishortname, unilongname FROM Unicodes WHERE unicode LIKE ?", (needle,)) result = cur.fetchone() if result is not None: if len(result[1]) > 0: result_string = result[0] + "\t" + result[1] + " '" + result[2] + "'" else: result_string = result[0] + "\t'" + result[2] + "'" stdout(result_string) else: stderr(needle + "\t" + "NOT FOUND") except Exception as e: stderr("[font-unicode]: Error: " + str(e), exit=1) finally: if con: con.close()
def _remove_doxt(): # check main directory cwd_doxt_list = list_filter_files_cwd(".doxt") # check for a templates directory if dir_exists('templates'): templates_dir_doxt_list = list_filter_files(".doxt", "templates") else: templates_dir_doxt_list = [] # remove template files from the working directory if len(cwd_doxt_list) > 0: for doxt_file in cwd_doxt_list: try: remove(doxt_file) stdout("[-] doxx: -- " + doxt_file + " ... removed") except Exception as e: stderr("[!] doxx: Unable to remove the file " + doxt_file + "'. Error: " + str(e), exit=0) # remove any template files from the templates directory if len(templates_dir_doxt_list) > 0: for doxt_file in templates_dir_doxt_list: new_doxt_path = make_path('templates', doxt_file) remove(new_doxt_path) stdout("[-] doxx: -- " + new_doxt_path + " ... removed") # if there are files still remaining in the templates directory, leave it # otherwise, remove the templates directory as well if dir_exists('templates'): remaining_template_file_list = list_all_files('templates') # get the remaining non-.doxt files in the directory if len(remaining_template_file_list) > 0: pass # do nothing, skip the removal of the 'templates' directory from the project because other files are present in it else: try: rmtree('templates') stdout("[-] doxx: -- templates (directory) ... removed") except Exception as e: stderr("[!] doxx: Unable to remove the 'templates' directory. Error: " + str(e), exit=1)
def run_repoupdate(): cache = DoxxCache() # confirm that user is not attempting updates too rapidly (10 sec lockout) lockout_time = 10 # test list.txt file if cache.cached_file_exists(cache.package_repo_list_file): if not cache.does_cache_file_require_update(cache.package_repo_list_file, lockout_time): stderr("[!] doxx: There is a " + str(lockout_time) + " second lockout on repository file updates. Take a break, then try again.", exit=1) # test packages.json file if cache.cached_file_exists(cache.package_repo_json_file): if not cache.does_cache_file_require_update(cache.package_repo_json_file, lockout_time): stderr("[!] doxx: There is a " + str(lockout_time) + " second lockout on repository file updates. Take a break, then try again.", exit=1) stdout("[*] doxx: Pulling the list of packages in the Package Repository") # pull the master list of the packages included in the Package Repository master_list = _pull_official_repository_list() if len(master_list) > 0: cache.cache_packagerepo_list(master_list) else: stderr("[!] doxx: Unable to update the " + cache.package_repo_list_file + " file.") if not cache.cached_file_exists(cache.package_repo_list_file): stderr("[!] doxx: Unable to update the " + cache.package_repo_list_file + " file.") # pull the descriptions of the packages included in the Package Repository stdout("[*] doxx: Pulling the descriptions of packages in the Package Repository") master_description_json = _pull_official_repository_json() if len(master_description_json) > 0: cache.cache_packagerepo_json(master_description_json) else: stderr("[!] doxx: Unable to update the " + cache.package_repo_json_file + " file.", exit=1) if not cache.cached_file_exists(cache.package_repo_json_file): stderr("[!] doxx: Unable to update the " + cache.package_repo_json_file + " file.", exit=1) stdout("[*] doxx: repoupdate complete")
def make_project(self): try: # project.yaml file write fw_projyaml = FileWriter('project.yaml') fw_projyaml.write(project_yaml_stub) # pkey.yaml file write fw_pkey = FileWriter('pkey.yaml') fw_pkey.write(key_stub) # templates directory write if not dir_exists('templates'): os.mkdir('templates') # template.doxt file in templates directory template_stub_path = os.path.join('templates', 'stub.doxt') fw_template = FileWriter(template_stub_path) fw_template.write(template_stub) # confirm for user if file_exists('project.yaml'): stdout("[+] doxx: 'project.yaml' ... check") else: stderr("[!] doxx: There was an error writing the 'project.yaml' key file to your project directory") if file_exists('pkey.yaml'): stdout("[+] doxx: 'pkey.yaml' ... check") else: stderr("[!] doxx: There was an error writing the 'pkey.yaml' key file to your project directory") if file_exists(template_stub_path): stdout("[+] doxx: '" + template_stub_path + "' ... check") else: stderr("[!] doxx: There was an error writing the '" + template_stub_path + "' template file to your project directory") except Exception as e: stderr("[!] doxx: Unable to write project files to disk. Error: " + str(e), exit=1)
def _remove_key(): # remove key.yaml if file_exists('key.yaml'): try: remove('key.yaml') stdout("[-] doxx: -- key.yaml ... removed") except Exception as e: stderr("[!] doxx: Unable to remove 'key.yaml'. Error: " + str(e), exit=0) # remove pkey.yaml (project archives) if file_exists('pkey.yaml'): try: remove('pkey.yaml') stdout("[-] doxx: -- pkey.yaml ... removed") except Exception as e: stderr("[!] doxx: Unable to remove 'pkey.yaml'. Error: " + str(e), exit=0) # remove project.yaml (project archives) if file_exists('project.yaml'): try: remove('project.yaml') stdout("[-] doxx: -- project.yaml ... removed") except Exception as e: stderr("[!] doxx: Unable to remove 'project.yaml'. Error: " + str(e), exit=0)
def main(): import sys from Naked.commandline import Command user_platform = platform.system() # ------------------------------------------------------------------------------------------ # [ Instantiate Naked framework command line object ] # used for all subsequent conditional logic in the CLI application # ------------------------------------------------------------------------------------------ c = Command(sys.argv[0], sys.argv[1:]) if not c.command_suite_validates(): from jampack.settings import usage as jampack_usage print(jampack_usage) sys.exit(1) if c.help(): # User requested jampack help information from jampack.settings import help as jampack_help print(jampack_help) sys.exit(0) elif c.usage(): # User requested jampack usage information from jampack.settings import usage as jampack_usage print(jampack_usage) sys.exit(0) elif c.version(): # User requested jampack version information from jampack.settings import app_name, major_version, minor_version, patch_version version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version print(version_display_string) sys.exit(0) if c.argc > 0: # if there is an argument to the executable try: for archive_name in c.argv: lowercase_archive_name = archive_name.lower() if lowercase_archive_name.endswith('.zip'): if zipfile.is_zipfile(archive_name): zipper = zipfile.ZipFile(archive_name, mode="r") zipper.extractall() if user_platform == "Windows": stdout("'" + archive_name + "' was unpacked.") else: stdout("[\033[32m✓\033[0m] '" + archive_name + "' was unpacked.") else: if user_platform == "Windows": stderr("'" + archive_name + "' does not appear to be a zip file") else: stderr("[\033[31m!\033[0m] '" + archive_name + "' does not appear to be a zip file") elif lowercase_archive_name.endswith('.tar.gz') or lowercase_archive_name.endswith('.tgz') or lowercase_archive_name.endswith('.tar.gzip'): if tarfile.is_tarfile(archive_name): tarball = tarfile.open(archive_name, mode="r:gz") tarball.extractall() if user_platform == "Windows": stdout("'" + archive_name + "' was unpacked.") else: stdout("[\033[32m✓\033[0m] '" + archive_name + "' was unpacked.") else: if user_platform == "Windows": stderr("'" + archive_name + "' does not appear to be a tar archive") else: stderr("[\033[31m!\033[0m] '" + archive_name + "' does not appear to be a tar archive") elif lowercase_archive_name.endswith('.tar.bz2') or lowercase_archive_name.endswith('.tar.bzip2'): if tarfile.is_tarfile(archive_name): bzball = tarfile.open(archive_name, mode="r:bz2") bzball.extractall() if user_platform == "Windows": stdout("'" + archive_name + "' was unpacked.") else: stdout("[\033[32m✓\033[0m] '" + archive_name + "' was unpacked.") else: if user_platform == "Windows": stderr("'" + archive_name + "' does not appear to be a tar archive") else: stderr("[\033[31m!\033[0m] '" + archive_name + "' does not appear to be a tar archive") else: if user_platform == "Windows": stderr("Unable to identify the archive type for '" + archive_name + "'. This archive was not unpacked. Please check the file extension and try again.") else: stderr("[\033[31m!\033[0m] Unable to identify the archive type for '" + archive_name + "'. This archive was not unpacked. Please check the file extension and try again.") except Exception as e: if user_platform == "Windows": stderr("Unable to unpack the archive '" + archive_name + "'. Error: " + str(e)) else: stderr( "[\033[31m!\033[0m] Unable to unpack the archive '" + archive_name + "'. Error: " + str(e)) # ------------------------------------------------------------------------------------------ # [ DEFAULT MESSAGE FOR MATCH FAILURE ] # Message to provide to the user when all above conditional logic fails to meet a true condition # ------------------------------------------------------------------------------------------ else: if user_platform == "Windows": print("Could not complete the command that you entered. Please try again.") else: print("[\033[31mX\033[0m] Could not complete the command that you entered. Please try again.") sys.exit(1) # exit
def single_template_run(self, template_path): """Render replacements using a single template file as defined in a doxx key file (public method)""" #---------------------------------------------------------------------------- # NOTE : changes in this method require the same changes to multi_process_run #---------------------------------------------------------------------------- ## Load the data # remote templates if len(template_path) > 6 and (template_path[0:7] == "http://" or template_path[0:8] == "https://"): template = RemoteDoxxTemplate(template_path) try: result = template.load_data() if result[0] == False: # if the method responds False, then HTTP data load did not work stderr(result[1], exit=1) # write out the returned error message in result[1] position of the tuple # halt execution if unsuccessful except Exception as e: stderr("[!] doxx: Unable to load the remote template file '" + template_path + "'. Error message: " + str(e), exit=1) # local templates elif file_exists(template_path): template = DoxxTemplate(template_path) try: template.load_data() except Exception as e: stderr("[!] doxx: Unable to read the local template file '" + template_path + "'. Error message: " + str(e), exit=1) else: stderr("[!] doxx: Unable to find the requested template file " + template_path, exit=1) # print error message and halt execution of application ## Split the data try: template.split_data() except Exception as e: stderr("[!] doxx: Unable to parse the template data. Please verify the template syntax and try again. Error message: " + str(e), exit=1) ## Parse data for errors error_parse_result = template.parse_template_for_errors() if error_parse_result[0] == True: # if there was a parsing error stderr(error_parse_result[1], exit=1) # print the returned error message to stderr and exit application ## Then parse the template text and load instance attributes for the text replacement with Ink below try: template.parse_template_text() except Exception as e: stderr("[!] doxx: An error occurred while parsing the template file. Error message: " + str(e), exit=1) # determine whether this is a verbatim template file (no replacements) or the key file did not include replacement keys if template.verbatim is True or self.no_key_replacements is True: # write template.text out verbatim try: # if the requested destination directory path does not exist, make it outfile_dir_path = make_path(os.path.dirname(self.key_path), os.path.dirname(template.outfile)) if not outfile_dir_path == '' and not dir_exists(outfile_dir_path): make_dirs(outfile_dir_path) # write the file outfile_path = make_path(os.path.dirname(self.key_path), template.outfile) fw = FileWriter(outfile_path) fw.write(template.text) stdout("[+] doxx: '" + outfile_path + "' build... check") except Exception as e: stderr("[!] doxx: There was a file write error. Error message: " + str(e), exit=1) else: # template meta data is in template.meta_data # template text is in template.text # perform the text replacements: try: ink_template = InkTemplate(template.text) ink_renderer = InkRenderer(ink_template, self.key_data) rendered_text = ink_renderer.render() except Exception as e: stderr("[!] doxx: An error occurred during the text replacement attempt. Error message: " + str(e), exit=1) # if the requested destination directory path does not exist, make it outfile_dir_path = make_path(os.path.dirname(self.key_path), os.path.dirname(template.outfile)) if not outfile_dir_path == '' and not dir_exists(outfile_dir_path): make_dirs(outfile_dir_path) # write rendered file to disk try: outfile_path = make_path(os.path.dirname(self.key_path), template.outfile) fw = FileWriter(outfile_path) fw.write(rendered_text) stdout("[+] doxx: -- " + outfile_path + " ... check") except Exception as e: stderr("[!] doxx: There was an error with the rendered file write. Error message: " + str(e), exit=1)
def run_clean(): _remove_key() _remove_doxt() stdout("[*] doxx: Clean complete.")
def main(): from Naked.commandline import Command global PROGRESS_INDICATOR user_platform = platform.system() # ------------------------------------------------------------------------------------------ # [ Instantiate Naked framework command line object ] # used for all subsequent conditional logic in the CLI application # ------------------------------------------------------------------------------------------ c = Command(sys.argv[0], sys.argv[1:]) if c.help(): # User requested jampack help information from jampack.settings import help as jampack_help print(jampack_help) sys.exit(0) elif c.usage(): # User requested jampack usage information from jampack.settings import usage as jampack_usage print(jampack_usage) sys.exit(0) elif c.version(): # User requested jampack version information from jampack.settings import app_name, major_version, minor_version, patch_version version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version print(version_display_string) sys.exit(0) # ------------------------------------------------------------------------------------------ # [ PRIMARY COMMAND LOGIC ] # ------------------------------------------------------------------------------------------ if c.argc == 0: # tar.gz pack the current working directory directory_name = os.path.basename(os.getcwd()) directory_size = get_directory_size(".") package_targz(directory_name, ".") archive_name = directory_name + ".tar.gz" percent_filesize = (100 * (get_file_size(archive_name) / float(directory_size))) display_percent = str(int(percent_filesize)) stdout(" 100%") if user_platform == "Windows": stdout(archive_name + " created " + "[~" + display_percent + "% original]") else: stdout("[\033[32m✓\033[0m] " + archive_name + " created " + "[~" + display_percent + "% original]") sys.exit(0) elif c.argc > 0: if c.arg0 == "zip": if c.argc == 1: # zip pack the current working directory directory_name = os.path.basename(os.getcwd()) directory_size = get_directory_size(".") package_zip(directory_name, ".") archive_name = directory_name + ".zip" percent_filesize = (100 * (get_file_size(archive_name) / float(directory_size))) display_percent = str(int(percent_filesize)) stdout(" 100%") # end of the progress indicator if user_platform == "Windows": stdout(archive_name + " created " + "[~" + display_percent + "% original]") else: stdout("[\033[32m✓\033[0m] " + archive_name + " created " + "[~" + display_percent + "% original]") sys.exit(0) else: directory_list = c.argv[1:] for a_directory in directory_list: if os.path.isdir(a_directory): PROGRESS_INDICATOR = 1 # reset the progress indicator on each new archive that is processed directory_name = os.path.basename(a_directory) directory_size = get_directory_size(a_directory) package_zip(directory_name, a_directory) archive_name = directory_name + ".zip" percent_filesize = (100 * (get_file_size(archive_name) / float(directory_size))) display_percent = str(int(percent_filesize)) stdout(" 100%") # end of the progress indicator if user_platform == "Windows": stdout(archive_name + " created " + "[~" + display_percent + "% original]") else: stdout("[\033[32m✓\033[0m] " + archive_name + " created " + "[~" + display_percent + "% original]") else: if user_platform == "Windows": stderr(a_directory + " is not a directory path") else: stderr("[\033[31mX\033[0m] " + a_directory + " is not a directory path") sys.exit(0) elif c.arg0 == "bz2": if c.argc == 1: # bz2 pack the current working directory directory_name = os.path.basename(os.getcwd()) directory_size = get_directory_size(".") package_bzip2(directory_name, ".") archive_name = directory_name + ".tar.bz2" percent_filesize = (100 * (get_file_size(archive_name) / float(directory_size))) display_percent = str(int(percent_filesize)) stdout(" 100%") # end of the progress indicator if user_platform == "Windows": stdout(archive_name + " created " + "[~" + display_percent + "% original]") else: stdout("[\033[32m✓\033[0m] " + archive_name + " created " + "[~" + display_percent + "% original]") sys.exit(0) else: # bz2 pack one or more explicitly set directory directory_list = c.argv[1:] for a_directory in directory_list: if os.path.isdir(a_directory): PROGRESS_INDICATOR = 1 # reset the progress indicator on each new archive that is processed directory_name = os.path.basename(a_directory) directory_size = get_directory_size(a_directory) package_bzip2(directory_name, a_directory) archive_name = directory_name + ".tar.bz2" percent_filesize = (100 * (get_file_size(archive_name) / float(directory_size))) display_percent = str(int(percent_filesize)) stdout(" 100%") # end of the progress indicator if user_platform == "Windows": stdout(archive_name + " created " + "[~" + display_percent + "% original]") else: stdout("[\033[32m✓\033[0m] " + archive_name + " created " + "[~" + display_percent + "% original]") else: if user_platform == "Windows": stderr(a_directory + " is not a directory path") else: stderr("[\033[31mX\033[0m] " + a_directory + " is not a directory path") sys.exit(0) else: # tar.gz one or more explicitly defined directories for a_directory in c.argv: if os.path.isdir(a_directory): PROGRESS_INDICATOR = 1 # reset the progress indicator on each new archive that is processed directory_name = os.path.basename(a_directory) directory_size = get_directory_size(a_directory) package_targz(directory_name, a_directory) archive_name = directory_name + ".tar.gz" percent_filesize = (100 * (get_file_size(archive_name) / float(directory_size))) display_percent = str(int(percent_filesize)) stdout(" 100%") # end of the progress indicator if user_platform == "Windows": stdout(archive_name + " created " + "[~" + display_percent + "% original]") else: stdout("[\033[32m✓\033[0m] " + archive_name + " created " + "[~" + display_percent + "% original]") else: if user_platform == "Windows": stderr(a_directory + " is not a directory path") else: stderr("[\033[31mX\033[0m] " + a_directory + " is not a directory path") sys.exit(0) # ------------------------------------------------------------------------------------------ # [ DEFAULT MESSAGE FOR MATCH FAILURE ] # Message to provide to the user when all above conditional logic fails to meet a true condition # ------------------------------------------------------------------------------------------ else: if user_platform == "Windows": stdout("Could not complete the command that you entered. Please try again.") else: stdout("[\033[31mX\033[0m] Could not complete the command that you entered. Please try again.") sys.exit(1) # exit
def multi_process_run(self, template_path, iolock, outputlock): """Render replacements over multiple template files as defined in doxx key file using multiple processes (public method)""" #------------------------------------------------------------------------------- # NOTE : changes in this method require the same changes to single_template_run #------------------------------------------------------------------------------- ## Load the data if len(template_path) > 6 and (template_path[0:7] == "http://" or template_path[0:8] == "https://"): template = RemoteDoxxTemplate(template_path) try: result = template.load_data() # load remote template file through HTTP or HTTPS protocol if result[0] == False: # if the method responds False, then HTTP data load was not successful outputlock.acquire() stderr(result[1], exit=0) # write out the returned error message in result[1] position of the tuple outputlock.release() sys.exit(1) # release the lock before raising SystemExit except Exception as e: outputlock.acquire() stderr("[!] doxx: Unable to load the remote template file '" + template_path + "'. Error message: " + str(e), exit=0) outputlock.release() sys.exit(1) # release the lock before raising SystemExit elif file_exists(template_path): template = DoxxTemplate(template_path) iolock.acquire() # acquire the IO lock for template file read try: template.load_data() # load local data except Exception as e: outputlock.acquire() stderr("[!] doxx: Unable to read the local template file '" + template_path + "'. Error message: " + str(e), exit=0) outputlock.release() sys.exit(1) # release the lock before raising SystemExit iolock.release() # release the IO lock for template file read else: outputlock.acquire() # acquire the stderr lock stdout("[!] doxx: Unable to find the requested template file " + template_path) # print error message in standard output, multi-file run so do not end execution of application outputlock.release() # release the stderr lock ## Split the data try: template.split_data() # split the data sections except Exception as e: outputlock.acquire() stderr("[!] doxx: Unable to parse the template data. Please verify the template syntax and try again. Error message: " + str(e), exit=0) outputlock.release() sys.exit(1) # release the lock before raising SystemExit ## Parse the data for errors error_parse_result = template.parse_template_for_errors() if error_parse_result[0] == True: # if there was a parsing error outputlock.acquire() stderr(error_parse_result[1], exit=0) # print the returned error message to stderr and exit application outputlock.release() sys.exit(1) # release the lock before raising SystemExit ## Parse the template text try: template.parse_template_text() except Exception as e: outputlock.acquire() stderr("[!] doxx: An error occurred during the attempt to parse the template file. Error message: " + str(e), exit=0) outputlock.release() sys.exit(1) # release the lock before raising SystemExit # determine whether this is a verbatim template file (no replacements) or the key file did not include replacement keys if template.verbatim is True or self.no_key_replacements is True: # write template.text out verbatim try: # if the requested destination directory path does not exist, make it outfile_dir_path = make_path(os.path.dirname(self.key_path), os.path.dirname(template.outfile)) try: iolock.acquire() if not outfile_dir_path == '' and not dir_exists(outfile_dir_path): make_dirs(outfile_dir_path) iolock.release() except Exception as e: iolock.release() # release the lock then re-raise the exception raise e # write the file outfile_path = make_path(os.path.dirname(self.key_path), template.outfile) # then write the file out verbatim try: iolock.acquire() fw = FileWriter(outfile_path) fw.write(template.text) iolock.release() except Exception as e: iolock.release() # catch and release the iolock before re-raising the exception raise e outputlock.acquire() stdout("[+] doxx: -- " + outfile_path + " ... check") outputlock.release() except Exception as e: outputlock.acquire() stderr("[!] doxx: There was a file write error with '" + template_path + "'. Error message: " + str(e), exit=0) outputlock.release() sys.exit(1) # release the lock before raising SystemExit else: # template meta data is in template.meta_data # template text is in template.text # perform the text replacements: try: ink_template = InkTemplate(template.text) ink_renderer = InkRenderer(ink_template, self.key_data) rendered_text = ink_renderer.render() except Exception as e: outputlock.acquire() stderr("[!] doxx: An error occurred during the text replacement attempt. Error message: " + str(e), exit=0) outputlock.release() sys.exit(1) # release the lock before raising SystemExit # if the requested destination directory path does not exist, make it outfile_dir_path = make_path(os.path.dirname(self.key_path), os.path.dirname(template.outfile)) try: iolock.acquire() if not outfile_dir_path == '' and not dir_exists(outfile_dir_path): make_dirs(outfile_dir_path) iolock.release() except Exception as e: outputlock.acquire() stderr("[!] doxx: Unable to create directory path '" + outfile_dir_path + "' for your file write. Error: " + str(e), exit=0) outputlock.release() iolock.release() sys.exit(1) # release the iolock before raising SystemExit outfile_path = make_path(os.path.dirname(self.key_path), template.outfile) try: iolock.acquire() fw = FileWriter(outfile_path) fw.write(rendered_text) iolock.release() except Exception as e: outputlock.acquire() stderr("[!] doxx: Unable to write the file '" + outfile_path + "'. Error: " + str(e), exit=0) outputlock.release() iolock.release() sys.exit(1) # release the iolock before raising SystemExit outputlock.acquire() stdout("[+] doxx: -- " + outfile_path + " ... check") outputlock.release()
def run_pull(url): # URL pulls for project archive packages, gzip files, text files if is_url(url): file_name = get_file_name(url) if len(file_name) == 0: file_name = "pullfile" # begin file pull stdout("[*] doxx: Pulling file...") if is_tar_gz_archive(file_name): root_dir = None try: pull_binary_file(url, file_name) # pull remote file except Exception as e: stderr("[!] doxx: Unable to pull the tar.gz project. Error: " + str(e), exit=1) stdout("[*] doxx: Unpacking...") try: root_dir = unpack_archive(file_name) # unpack archive and define the root directory except Exception as e: stderr("[!] doxx: Unable to unpack the compressed project file. Error: " + str(e), exit=1) if file_exists(file_name): remove_file(file_name) # remove the archive file if file_exists('pkey.yaml'): if not file_exists('key.yaml'): rename('pkey.yaml', 'key.yaml') # change name of pkey.yaml to key.yaml if there is not already a key.yaml file return root_dir # return the root directory path for calling code that needs it elif is_zip_archive(file_name): root_dir = None try: pull_binary_file(url, file_name) # pull remote file except Exception as e: stderr("[!] doxx: Unable to pull the .zip project. Error: " + str(e), exit=1) stdout("[*] doxx: Unpacking...") try: root_dir = unpack_archive(file_name) # unpack archive and define the root directory except Exception as e: stderr("[!] doxx: Unable to unpack the compressed project file. Error: " + str(e), exit=1) if file_exists(file_name): remove_file(file_name) # remove the arhcive file if file_exists('pkey.yaml'): if not file_exists('key.yaml'): rename('pkey.yaml', 'key.yaml') # change name of pkey.yaml to key.yaml if there is not already a key.yaml file return root_dir # return the root directory path for calling code that needs it elif is_gzip_file(file_name): try: pull_binary_file(url, file_name) # pull the remote gzip file except Exception as e: stderr("[!] doxx: Unable to pull the compressed file. Error: " + str(e), exit=1) stdout("[!] doxx: Decompressing...") try: decompress_gzip(file_name) # decompress the file text except Exception as e: stderr("[!] doxx: Unable to decompress the gzip file. Error: " + str(e), exit=1) if file_exists(file_name): remove_file(file_name) # remove the gzip compressed file and leave the decompressed text file else: try: pull_text_file(url, file_name) # it is assumed to be a plain text template or key file, pull the text except Exception as e: stderr("[!] doxx: Unable to pull the requested file. Error: " + str(e), exit=1) else: # SHORT CODE PULL REQUESTS for Github repository, CDNJS, etc if "/" in url: short_code = url if short_code.startswith('cdnjs:'): pass # add code for cdnjs pulls (syntax: 'cdnjs:/project') else: # default to Github repository shortcode keep_a_file_or_dir = False # indicator for user request to maintain single file or dir from repository # cherry-pick file or directory request if "+" in short_code: # user requested single directory or file from the repository short_code_keep = short_code.split("+") short_code = short_code_keep[0] # split on the + char and eliminate it from the request argument at this point keep_path = short_code_keep[1] # the file or dir path to cherry pick keep_a_file_or_dir = True # switch the indicator short_code_parts = short_code.split('/') if len(short_code_parts) == 2: if ":" in short_code_parts[1]: # non-master branch request (syntax: `user/repo:branch`) user = short_code_parts[0] if ":" in user or "+" in user: stderr("[!] doxx: your short code for a Github repository does not have the proper format") stderr("[!] doxx: the syntax is `user/repository[:branch][+cherrypick_path]`", exit=1) repo_parts = short_code_parts[1].split(':') repo = repo_parts[0] branch = repo_parts[1] targz_filename = repo + "-" + branch + ".tar.gz" url = "https://github.com/{{user}}/{{repository}}/archive/{{branch}}.tar.gz" url = url.replace("{{user}}", user) url = url.replace("{{repository}}", repo) url = url.replace("{{branch}}", branch) user_message = "[*] doxx: Pulling branch '" + branch + "' of Github repository '" + user + "/" + repo + "'..." else: # master branch request (default- syntax: `user/repo`) user = short_code_parts[0] if ":" in user or "+" in user: stderr("[!] doxx: the short code for Github repositories does not have the proper format") stderr("[!] doxx: the syntax is `user/repository[:branch][+cherrypick_path]`", exit=1) repo = short_code_parts[1] targz_filename = repo + "-master.tar.gz" url = "https://github.com/{{user}}/{{repository}}/archive/master.tar.gz" url = url.replace("{{user}}", user) url = url.replace("{{repository}}", repo) user_message = "[*] doxx: Pulling master branch of Github repository '" + user + "/" + repo + "'..." # notify user of the pull stdout(user_message) try: pull_binary_file(url, targz_filename) # pull the archive file except Exception as e: stderr("[!] doxx: Unable to pull the Github repository. Error: " + str(e), exit=1) if file_exists(targz_filename): try: # Unpack and remove the archive file targz_basename = unpack_archive(targz_filename) # unpack the archive locally remove(targz_filename) # remove the archive file except Exception as e: stderr("[!] doxx: Unable to unpack the pulled Github repository. Error: " + str(e), exit=1) try: # Did user request keep of a specific file or directory path? if keep_a_file_or_dir is True: # is this a multilevel path request? # if so, make OS dependent file path from the user argument (keep path argument syntax uses POSIX path style on all platforms) if "/" in keep_path: keep_path_parts = keep_path.split('/') keep_path_depth = len(keep_path_parts) if keep_path_depth > 3: stderr("[!] doxx: doxx supports up to 3 levels of depth in the cherry pick shortcode path. Your request exceeded that level and the requested file or directory was not cherry picked from the repository.", exit=1) # make the OS dependent paths if keep_path_depth == 2: path_part_one = keep_path_parts[0] path_part_two = keep_path_parts[1] keep_path = join(path_part_one, path_part_two) elif keep_path_depth == 3: path_part_one = keep_path_parts[0] path_part_two = keep_path_parts[1] path_part_three = keep_path_parts[2] keep_path = join(path_part_one, path_part_two, path_part_three) else: keep_path_depth = 1 # need to have a definition of depth of file/dir keep for mkdirs code below joined_keep_path = join(targz_basename, keep_path) # the path to the local version of the file or directory following pull if dir_exists(joined_keep_path): stdout("[*] doxx: Cherry picking the directory '" + keep_path + "'") if dir_exists(keep_path): new_dir_path = keep_path + "-new" if dir_exists(new_dir_path): shutil.rmtree(new_dir_path) stdout("[*] doxx: The requested directory already exists locally. Writing to '" + new_dir_path + "' instead.") shutil.copytree(joined_keep_path, new_dir_path) # write to `dir-new` instead of existing `dir` shutil.rmtree(targz_basename) # remove the pulled repository file else: shutil.copytree(joined_keep_path, keep_path) # write to the requested dir path shutil.rmtree(targz_basename) # remove the pulled repository file elif file_exists(joined_keep_path): stdout("[*] doxx: Cherry picking the file '" + keep_path + "'") local_filepath = basename(keep_path) # outfile write path (filename to root directory where user pulled) ## NEW # handle file path if the file already exists to avoid overwrite if file_exists(local_filepath): # the file already exists in the local directory if '.' in local_filepath: file_name_parts = local_filepath.split('.') file_name_parts[0] = file_name_parts[0] + "-new" # add '-new' to the basename of the file, not extension local_filepath = '.'.join(file_name_parts) else: local_filepath = local_filepath + "-new" # add '-new' to the filename (that does not have an extension) stdout("[*] doxx: The requested file already exists in the working directory. Writing the new file to '" + local_filepath + "' instead.") # write the file shutil.copy2(joined_keep_path, local_filepath) # remove the tar.gz archive file shutil.rmtree(targz_basename) else: # could not find the file or dir in the pulled repo stderr("[!] doxx: '" + joined_keep_path + "' does not appear to be a file or directory in the pulled repository. The entire pulled repository was left in the working directory for review.", exit=1) except Exception as e: stderr("[!] doxx: Unable to process the requested keep file or directory path. Error" + str(e), exit=1) else: # archive file not found locally stderr("[!] doxx: The Github repository pull did not complete successfully. Please try again.") else: # length of short_code_parts > 2 stderr("[!] doxx: Short code syntax for Github repository pulls:", exit=0) stderr(" $ doxx pull user/repository") stderr("[!] doxx: with an optional branch or release:") stderr(" $ doxx pull user/repository[:branch]") stderr("[!] doxx: with an optional branch or release AND optional cherry pick file or directory path:") stderr(" $ doxx pull user/repository[:branch][+cherrypick_path]", exit=1) # PROJECT PACKAGES - official repository package pulls else: from doxx.datatypes.package import OfficialPackage package_name = url package = OfficialPackage() package_url = package.get_package_targz_url(package_name) package_archive_name = package_name + ".tar.gz" root_dir = None # pull the package archive file stdout("[*] doxx: Pulling package '" + package_name + "'...") try: pull_binary_file(package_url, package_archive_name) except Exception as e: stderr("[!] doxx: Unable to pull the doxx repository package. Error: " + str(e), exit=1) # unpack the archive file stdout("[*] doxx: Unpacking...") try: root_dir = unpack_archive(package_archive_name) # unpack archive and define the root directory except Exception as e: stderr("[!] doxx: Unable to unpack the project package. Error: " + str(e), exit=1) # remove the archive file if file_exists(package_archive_name): remove_file(package_archive_name) # remove the archive file if file_exists('pkey.yaml'): if not file_exists('key.yaml'): rename('pkey.yaml', 'key.yaml') # change name of pkey.yaml to key.yaml if there is not already a key.yaml file return root_dir # return the root directory path for calling code that needs it
def run_search(search_string): # check for local cached list of the repositories, if not present pull the remote repository list from Amazon S3 store stdout("[*] doxx: Searching remote doxx repositories...") master_text = _get_master_text() master_list = _get_master_list(master_text) search_word_count = len(search_string.split(" ")) # fuzzy search for user search string fuzzy = FuzzySearcher(search_string) # maxheaps for best results and possible results best_results = [] possible_results = [] # indices for maxheap pushes (allows order of matching priorities by FIFO) best_index = 0 possible_index = 0 # iterate through the repository names for repository in master_list: best_ratio = 0 continue_match_attempts = True # full string match attempt match_ratio_fullstring = fuzzy.full_string_ratio(repository) if match_ratio_fullstring > best_ratio: best_ratio = match_ratio_fullstring if match_ratio_fullstring == 1.0: # perfect match, add to good_results continue_match_attempts = False heapq.heappush(best_results, (-1.0, best_index, repository)) # give highest priority level (1.0) for this match best_index += 1 # bump the index number # perfect match on first token in the repository name if continue_match_attempts is True: match_ratio_firsttoken = fuzzy.partial_firstindexitem_dashsplit_ratio(repository) if match_ratio_firsttoken > best_ratio: best_ratio = match_ratio_firsttoken if match_ratio_firsttoken == 1.0: continue_match_attempts = False heapq.heappush(best_results, (-0.98, best_index, repository)) # give priority of 0.98 for perfect match best_index += 1 # bump index number # slice the repository name by same number of characters if length > length of the search string if continue_match_attempts is True: match_ratio_startslice = fuzzy.partial_startslice_ratio(repository) if match_ratio_startslice > best_ratio: best_ratio = match_ratio_startslice if match_ratio_startslice == 1.0: continue_match_attempts = False heapq.heappush(best_results, (-0.99, best_index, repository)) best_index += 1 # (single word search strings ONLY) attempt match for repository tokens split on '-' if continue_match_attempts is True and search_word_count == 1: match_ratio_dashtokens = fuzzy.partial_dashsplit_tokens_ratio(repository) if match_ratio_dashtokens > best_ratio: best_ratio = match_ratio_dashtokens if match_ratio_dashtokens == 1.0: continue_match_attempts = False heapq.heappush(best_results, (-0.95, best_index, repository)) best_index += 1 # (multi word search strings ONLY) attempt match for sequential groups of repository tokens that are same word count as search string word count if continue_match_attempts is True and search_word_count > 1: match_ratio_nword = fuzzy.partial_nword_ratio(repository) if match_ratio_nword > best_ratio: best_ratio = match_ratio_nword if match_ratio_nword == 1.0: continue_match_attempts = False heapq.heappush(best_results, (-0.99, best_index, repository)) best_index += 1 # (multi word search strings ONLY) set intersection + remainder matching attempt if continue_match_attempts is True and search_word_count > 1: match_ratio_set = fuzzy.partial_set_ratio(repository) if match_ratio_set > best_ratio: best_ratio = match_ratio_set if match_ratio_set == 1.0: continue_match_attempts = False heapq.heappush(best_results, (-0.97, best_index, repository)) best_index += 1 # match attempts are complete, determine the quality of match if not previously determined and push to appropriate maxheap to store it if continue_match_attempts is True: if best_ratio > 0.8: heapq.heappush(best_results, (-best_ratio, best_index, repository)) best_index += 1 elif best_ratio > 0.6 and best_ratio <= 0.8: heapq.heappush(possible_results, (-best_ratio, possible_index, repository)) possible_index += 1 # report results of the fuzzy search for the user's search term final_best_results = _get_maxheap_results_list(best_results) final_possible_results = _get_maxheap_results_list(possible_results) if len(final_best_results) > 0: stdout(" ") for result in final_best_results: stdout(result) elif len(final_possible_results) > 0: stdout("[*] doxx: There were no good matches for your search term.") stdout("[*] doxx: Do any of these work? :") stdout(" ") for result in final_possible_results: stdout(result) pass # handle with possible results else: stdout("[*] doxx: No matches found in the Package Repository.") stdout("[*] doxx: Get in touch so that we can build it...") ## TODO: add mechanism for user package submit
def main(): import os import sys from time import sleep import getpass import tarfile from Naked.commandline import Command from Naked.toolshed.shell import execute, muterun from Naked.toolshed.system import dir_exists, file_exists, list_all_files, make_path, stdout, stderr, is_dir #------------------------------------------------------------------------------------------ # [ Instantiate command line object ] # used for all subsequent conditional logic in the CLI application #------------------------------------------------------------------------------------------ c = Command(sys.argv[0], sys.argv[1:]) #------------------------------------------------------------------------------------------ # [ VALIDATION LOGIC ] - early validation of appropriate command syntax # Test that user entered at least one argument to the executable, print usage if not #------------------------------------------------------------------------------------------ if not c.command_suite_validates(): from crypto.settings import usage as crypto_usage print(crypto_usage) sys.exit(1) #------------------------------------------------------------------------------------------ # [ HELP, VERSION, USAGE LOGIC ] # Naked framework provides default help, usage, and version commands for all applications # --> settings for user messages are assigned in the lib/crypto/settings.py file #------------------------------------------------------------------------------------------ if c.help(): # User requested crypto help information from crypto.settings import help as crypto_help print(crypto_help) sys.exit(0) elif c.usage(): # User requested crypto usage information from crypto.settings import usage as crypto_usage print(crypto_usage) sys.exit(0) elif c.version(): # User requested crypto version information from crypto.settings import app_name, major_version, minor_version, patch_version version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version print(version_display_string) sys.exit(0) #------------------------------------------------------------------------------------------ # [ APPLICATION LOGIC ] # #------------------------------------------------------------------------------------------ elif c.argc > 1: # code for multi-file processing and commands that include options use_standard_output = False # print to stdout flag use_file_overwrite = False # overwrite existing file untar_archives = True # untar decrypted tar archives, true by default # set user option flags if c.option('--stdout') or c.option('-s'): use_standard_output = True if c.option('--overwrite') or c.option('-o'): use_file_overwrite = True if c.option('--nountar'): untar_archives = False directory_list = [] # directory paths included in the user entered paths from the command line file_list = [] # file paths included in the user entered paths from the command line (and inside directories entered) for argument in c.argv: if file_exists(argument): # user included a file, add it to the file_list for decryption if argument.endswith('.crypt'): file_list.append(argument) # add .crypt files to the list of files for decryption elif argument.endswith('.gpg'): file_list.append(argument) elif argument.endswith('.asc'): file_list.append(argument) elif argument.endswith('.pgp'): file_list.append(argument) else: # cannot identify as an encrypted file, give it a shot anyways but warn user file_list.append(argument) stdout("Could not confirm that '" + argument + "' is encrypted based upon the file type. Attempting decryption. Keep your fingers crossed...") elif dir_exists(argument): # user included a directory, add it to the directory_list directory_list.append(argument) else: if argument[0] == "-": pass # if it is an option, do nothing else: stderr("'" + argument + "' does not appear to be an existing file or directory. Aborting decryption attempt for this request.") # unroll the contained directory files into the file_list IF they are encrypted file types if len(directory_list) > 0: for directory in directory_list: directory_file_list = list_all_files(directory) for contained_file in directory_file_list: if contained_file.endswith('.crypt'): file_list.append(make_path(directory, contained_file)) # include the file with a filepath 'directory path/contained_file path' elif contained_file.endswith('.gpg'): file_list.append(make_path(directory, contained_file)) elif contained_file.endswith('asc'): file_list.append(make_path(directory, contained_file)) elif contained_file.endswith('.pgp'): file_list.append(make_path(directory, contained_file)) # confirm that there are files for decryption, if not abort if len(file_list) == 0: stderr("Could not identify files for decryption") sys.exit(1) # get passphrase used to symmetrically decrypt the file passphrase = getpass.getpass("Please enter your passphrase: ") if len(passphrase) == 0: # confirm that user entered a passphrase stderr("You did not enter a passphrase. Please repeat your command and try again.") sys.exit(1) passphrase_confirm = getpass.getpass("Please enter your passphrase again: ") if passphrase == passphrase_confirm: # begin decryption of each requested file. the directory path was already added to the file path above for encrypted_file in file_list: # create the decrypted file name decrypted_filename = "" if encrypted_file.endswith('.crypt'): decrypted_filename = encrypted_file[0:-6] elif encrypted_file.endswith('.gpg') or encrypted_file.endswith('.asc') or encrypted_file.endswith('.pgp'): decrypted_filename = encrypted_file[0:-4] else: decrypted_filename = encrypted_file + '.decrypt' # if it was a file without a known encrypted file type, add the .decrypt suffix # determine whether file overwrite will take place with the decrypted file skip_file = False # flag that indicates this file should not be encrypted created_tmp_files = False if not use_standard_output: # if not writing a file, no need to check for overwrite if file_exists(decrypted_filename): if use_file_overwrite: # rename the existing file to temp file which will be erased or replaced (on decryption failures) below tmp_filename = decrypted_filename + '.tmp' os.rename(decrypted_filename, tmp_filename) created_tmp_files = True else: stdout("The file path '" + decrypted_filename + "' already exists. This file was not decrypted.") skip_file = True # begin decryption if not skip_file: if use_standard_output: # using --quiet flag to suppress stdout messages from gpg, just want the file data in stdout stream system_command = "gpg --batch --quiet --passphrase '" + passphrase + "' -d " + encrypted_file successful_execution = execute(system_command) # use naked execute function to directly push to stdout, rather than return stdout if not successful_execution: stderr("Unable to decrypt file '" + encrypted_file + "'", 0) if created_tmp_files: # restore the moved tmp file to original if decrypt failed tmp_filename = decrypted_filename + '.tmp' if file_exists(tmp_filename): os.rename(tmp_filename, decrypted_filename) else: # decryption successful but we are in stdout flag so do not include any other output from decrypto pass else: system_command = "gpg --batch -o " + decrypted_filename + " --passphrase '" + passphrase + "' -d " + encrypted_file response = muterun(system_command) if response.exitcode == 0: stdout("'" + encrypted_file + "' decrypted to '" + decrypted_filename + "'") else: # failed decryption if created_tmp_files: # restore the moved tmp file to original if decrypt failed tmp_filename = decrypted_filename + '.tmp' if file_exists(tmp_filename): os.rename(tmp_filename, decrypted_filename) # report the error stderr(response.stderr) stderr("Decryption failed for " + encrypted_file) # cleanup: remove the tmp file if created_tmp_files: tmp_filename = decrypted_filename + '.tmp' if file_exists(tmp_filename): os.remove(tmp_filename) # untar/extract any detected archive file(s) if untar_archives is True: if decrypted_filename.endswith('.tar') and tarfile.is_tarfile(decrypted_filename): untar_path_tuple = os.path.split(decrypted_filename) untar_path = untar_path_tuple[0] if use_file_overwrite: with tarfile.open(decrypted_filename) as tar: if len(untar_path) > 0: tar.extractall(path=untar_path) # use dir path from the decrypted_filename if not CWD stdout("'" + decrypted_filename + "' unpacked in the directory path '" + untar_path + "'") else: tar.extractall() # else use CWD stdout("'" + decrypted_filename + "' unpacked in the current working directory") else: with tarfile.TarFile(decrypted_filename, 'r', errorlevel=1) as tar: for tarinfo in tar: t_file = tarinfo.name if len(untar_path) > 0: t_file_path = os.path.join(untar_path, t_file) else: t_file_path = t_file if not os.path.exists(t_file_path): try: if len(untar_path) > 0: tar.extract(t_file, path=untar_path) # write to the appropriate dir else: tar.extract(t_file) # write to CWD except IOError as e: stderr( "Failed to unpack the file '" + t_file_path + "' [" + str( e) + "]") elif is_dir(t_file_path): pass # do nothing if it exists and is a directory, no need to warn else: # it is a file and it already exists, provide user error message stderr( "Failed to unpack the file '" + t_file_path + "'. File already exists. Use the --overwrite flag to replace existing files.") # remove the decrypted tar archive file os.remove(decrypted_filename) # overwrite the entered passphrases after file decryption is complete for all files passphrase = "" passphrase_confirm = "" # add a short pause to hinder brute force pexpect style password attacks with decrypto sleep(0.2) # 200ms pause else: # passphrases did not match passphrase = "" passphrase_confirm = "" stderr("The passphrases did not match. Please enter your command again.") sys.exit(1) elif c.argc == 1: # simple single file or directory processing with default settings path = c.arg0 if file_exists(path): # SINGLE FILE check_existing_file = False # check for a file with the name of new decrypted filename in the directory if path.endswith('.crypt'): decrypted_filename = path[0:-6] # remove the .crypt suffix check_existing_file = True elif path.endswith('.gpg') or path.endswith('.pgp') or path.endswith('.asc'): decrypted_filename = path[0:-4] check_existing_file = True else: decrypted_filename = path + ".decrypt" # if there is not a standard file type, then add a .decrypt suffix to the decrypted file name stdout("Could not confirm that the requested file is encrypted based upon the file type. Attempting decryption. Keep your fingers crossed...") # confirm that the decrypted path does not already exist, if so abort with warning message to user if check_existing_file is True: if file_exists(decrypted_filename): stderr("Your file will be decrypted to '" + decrypted_filename + "' and this file path already exists. Please move the file or use the --overwrite option with your command if you intend to replace the current file.") sys.exit(1) # get passphrase used to symmetrically decrypt the file passphrase = getpass.getpass("Please enter your passphrase: ") if len(passphrase) == 0: # confirm that user entered a passphrase stderr("You did not enter a passphrase. Please repeat your command and try again.") sys.exit(1) passphrase_confirm = getpass.getpass("Please enter your passphrase again: ") # confirm that the passphrases match if passphrase == passphrase_confirm: system_command = "gpg --batch -o " + decrypted_filename + " --passphrase '" + passphrase + "' -d " + path response = muterun(system_command) if response.exitcode == 0: # unpack tar archive generated from the decryption, if present if decrypted_filename.endswith('.tar') and tarfile.is_tarfile(decrypted_filename): untar_path_tuple = os.path.split(decrypted_filename) untar_path = untar_path_tuple[0] with tarfile.TarFile(decrypted_filename, 'r', errorlevel=1) as tar: for tarinfo in tar: t_file = tarinfo.name if len(untar_path) > 0: t_file_path = os.path.join(untar_path, t_file) else: t_file_path = t_file if not os.path.exists(t_file_path): try: if len(untar_path) > 0: tar.extract(t_file, path=untar_path) # write to the appropriate dir else: tar.extract(t_file) # write to CWD except IOError as e: stderr("Failed to unpack the file '" + t_file_path + "' [" + str(e) + "]") elif is_dir(t_file_path): pass # do nothing if it exists and is a directory, no need to warn else: # it is a file and it already exists, provide user error message stderr("Failed to unpack the file '" + t_file_path + "'. File already exists. Use the --overwrite flag to replace existing files.") # remove the decrypted tar archive os.remove(decrypted_filename) stdout("Decryption complete") # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" sys.exit(0) else: stderr(response.stderr) stderr("Decryption failed") # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" # add a short pause to hinder brute force pexpect style password attacks with decrypto sleep(0.2) # 200ms pause sys.exit(1) else: stderr("The passphrases did not match. Please enter your command again.") sys.exit(1) elif dir_exists(path): # SINGLE DIRECTORY dirty_directory_file_list = list_all_files(path) directory_file_list = [x for x in dirty_directory_file_list if (x.endswith('.crypt') or x.endswith('.gpg') or x.endswith('.pgp') or x.endswith('.asc'))] # if there are no encrypted files found, warn and abort if len(directory_file_list) == 0: stderr("There are no encrypted files in the directory") sys.exit(1) # prompt for the passphrase passphrase = getpass.getpass("Please enter your passphrase: ") if len(passphrase) == 0: # confirm that user entered a passphrase stderr("You did not enter a passphrase. Please repeat your command and try again.") sys.exit(1) passphrase_confirm = getpass.getpass("Please enter your passphrase again: ") if passphrase == passphrase_confirm: # decrypt all of the encypted files in the directory for filepath in directory_file_list: absolute_filepath = make_path(path, filepath) # combine the directory path and file name into absolute path # remove file suffix from the decrypted file path that writes to disk if absolute_filepath.endswith('.crypt'): decrypted_filepath = absolute_filepath[0:-6] # remove the .crypt suffix elif absolute_filepath.endswith('.gpg') or absolute_filepath.endswith('.pgp') or absolute_filepath.endswith('.asc'): decrypted_filepath = absolute_filepath[0:-4] # confirm that the file does not already exist if file_exists(decrypted_filepath): stdout("The file path '" + decrypted_filepath + "' already exists. This file was not decrypted.") else: system_command = "gpg --batch -o " + decrypted_filepath + " --passphrase '" + passphrase + "' -d " + absolute_filepath response = muterun(system_command) if response.exitcode == 0: stdout("'" + absolute_filepath + "' decrypted to '" + decrypted_filepath + "'") else: stderr(response.stderr) stderr("Decryption failed for " + absolute_filepath) # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" # add a short pause to hinder brute force pexpect style password attacks with decrypto sleep(0.2) # 200ms pause else: # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" stderr("The passphrases did not match. Please enter your command again.") sys.exit(1) else: # error message, not a file or directory. user entry error stderr("The path that you entered does not appear to be an existing file or directory. Please try again.") sys.exit(1) # ------------------------------------------------------------------------------------------ # [ DEFAULT MESSAGE FOR MATCH FAILURE ] # Message to provide to the user when all above conditional logic fails to meet a true condition # ------------------------------------------------------------------------------------------ else: print("Could not complete your request. Please try again.") sys.exit(1)
def main(): import os import sys from time import sleep import getpass import tarfile from Naked.commandline import Command from Naked.toolshed.shell import execute, muterun from Naked.toolshed.system import dir_exists, file_exists, list_all_files, make_path, stdout, stderr, is_dir from shellescape import quote # ------------------------------------------------------------------------------------------ # [ Instantiate command line object ] # used for all subsequent conditional logic in the CLI application # ------------------------------------------------------------------------------------------ c = Command(sys.argv[0], sys.argv[1:]) # ------------------------------------------------------------------------------------------ # [ VALIDATION LOGIC ] - early validation of appropriate command syntax # Test that user entered at least one argument to the executable, print usage if not # ------------------------------------------------------------------------------------------ if not c.command_suite_validates(): from Crypto.settings import usage as crypto_usage print(crypto_usage) sys.exit(1) # ------------------------------------------------------------------------------------------ # [ HELP, VERSION, USAGE LOGIC ] # Naked framework provides default help, usage, and version commands for all applications # --> settings for user messages are assigned in the lib/Crypto/settings.py file # ------------------------------------------------------------------------------------------ if c.help(): # User requested Crypto help information from Crypto.settings import help as crypto_help print(crypto_help) sys.exit(0) elif c.usage(): # User requested Crypto usage information from Crypto.settings import usage as crypto_usage print(crypto_usage) sys.exit(0) elif c.version(): # User requested Crypto version information from Crypto.settings import app_name, major_version, minor_version, patch_version version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version print(version_display_string) sys.exit(0) # ------------------------------------------------------------------------------------------ # [ APPLICATION LOGIC ] # # ------------------------------------------------------------------------------------------ elif c.argc > 1: # code for multi-file processing and commands that include options use_standard_output = False # print to stdout flag use_file_overwrite = False # overwrite existing file untar_archives = True # untar decrypted tar archives, true by default # set user option flags if c.option('--stdout') or c.option('-s'): use_standard_output = True if c.option('--overwrite') or c.option('-o'): use_file_overwrite = True if c.option('--nountar'): untar_archives = False directory_list = [ ] # directory paths included in the user entered paths from the command line file_list = [ ] # file paths included in the user entered paths from the command line (and inside directories entered) for argument in c.argv: if file_exists( argument ): # user included a file, add it to the file_list for decryption if argument.endswith('.crypt'): file_list.append( argument ) # add .crypt files to the list of files for decryption elif argument.endswith('.gpg'): file_list.append(argument) elif argument.endswith('.asc'): file_list.append(argument) elif argument.endswith('.pgp'): file_list.append(argument) else: # cannot identify as an encrypted file, give it a shot anyways but warn user file_list.append(argument) stdout( "Could not confirm that '" + argument + "' is encrypted based upon the file type. Attempting decryption. Keep your fingers crossed..." ) elif dir_exists( argument ): # user included a directory, add it to the directory_list directory_list.append(argument) else: if argument[0] == "-": pass # if it is an option, do nothing else: stderr( "'" + argument + "' does not appear to be an existing file or directory. Aborting decryption attempt for this request." ) # unroll the contained directory files into the file_list IF they are encrypted file types if len(directory_list) > 0: for directory in directory_list: directory_file_list = list_all_files(directory) for contained_file in directory_file_list: if contained_file.endswith('.crypt'): file_list.append( make_path(directory, contained_file) ) # include the file with a filepath 'directory path/contained_file path' elif contained_file.endswith('.gpg'): file_list.append(make_path(directory, contained_file)) elif contained_file.endswith('asc'): file_list.append(make_path(directory, contained_file)) elif contained_file.endswith('.pgp'): file_list.append(make_path(directory, contained_file)) # confirm that there are files for decryption, if not abort if len(file_list) == 0: stderr("Could not identify files for decryption") sys.exit(1) # get passphrase used to symmetrically decrypt the file passphrase = getpass.getpass("Please enter your passphrase: ") if len(passphrase) == 0: # confirm that user entered a passphrase stderr( "You did not enter a passphrase. Please repeat your command and try again." ) sys.exit(1) passphrase_confirm = getpass.getpass( "Please enter your passphrase again: ") if passphrase == passphrase_confirm: # begin decryption of each requested file. the directory path was already added to the file path above for encrypted_file in file_list: # create the decrypted file name decrypted_filename = "" if encrypted_file.endswith('.crypt'): decrypted_filename = encrypted_file[0:-6] elif encrypted_file.endswith( '.gpg') or encrypted_file.endswith( '.asc') or encrypted_file.endswith('.pgp'): decrypted_filename = encrypted_file[0:-4] else: decrypted_filename = encrypted_file + '.decrypt' # if it was a file without a known encrypted file type, add the .decrypt suffix # determine whether file overwrite will take place with the decrypted file skip_file = False # flag that indicates this file should not be encrypted created_tmp_files = False if not use_standard_output: # if not writing a file, no need to check for overwrite if file_exists(decrypted_filename): if use_file_overwrite: # rename the existing file to temp file which will be erased or replaced (on decryption failures) below tmp_filename = decrypted_filename + '.tmp' os.rename(decrypted_filename, tmp_filename) created_tmp_files = True else: stdout( "The file path '" + decrypted_filename + "' already exists. This file was not decrypted." ) skip_file = True # begin decryption if not skip_file: if use_standard_output: # using --quiet flag to suppress stdout messages from gpg, just want the file data in stdout stream system_command = "gpg --batch --quiet --passphrase " + quote( passphrase) + " -d " + quote(encrypted_file) successful_execution = execute( system_command ) # use naked execute function to directly push to stdout, rather than return stdout if not successful_execution: stderr( "Unable to decrypt file '" + encrypted_file + "'", 0) if created_tmp_files: # restore the moved tmp file to original if decrypt failed tmp_filename = decrypted_filename + '.tmp' if file_exists(tmp_filename): os.rename(tmp_filename, decrypted_filename) else: # decryption successful but we are in stdout flag so do not include any other output from decrypto pass else: system_command = "gpg --batch -o " + quote( decrypted_filename) + " --passphrase " + quote( passphrase) + " -d " + quote(encrypted_file) response = muterun(system_command) if response.exitcode == 0: stdout("'" + encrypted_file + "' decrypted to '" + decrypted_filename + "'") else: # failed decryption if created_tmp_files: # restore the moved tmp file to original if decrypt failed tmp_filename = decrypted_filename + '.tmp' if file_exists(tmp_filename): os.rename(tmp_filename, decrypted_filename) # report the error stderr(response.stderr) stderr("Decryption failed for " + encrypted_file) # cleanup: remove the tmp file if created_tmp_files: tmp_filename = decrypted_filename + '.tmp' if file_exists(tmp_filename): os.remove(tmp_filename) # untar/extract any detected archive file(s) if untar_archives is True: if decrypted_filename.endswith( '.tar') and tarfile.is_tarfile(decrypted_filename): untar_path_tuple = os.path.split(decrypted_filename) untar_path = untar_path_tuple[0] if use_file_overwrite: with tarfile.open(decrypted_filename) as tar: if len(untar_path) > 0: tar.extractall( path=untar_path ) # use dir path from the decrypted_filename if not CWD stdout( "'" + decrypted_filename + "' unpacked in the directory path '" + untar_path + "'") else: tar.extractall() # else use CWD stdout( "'" + decrypted_filename + "' unpacked in the current working directory" ) else: with tarfile.TarFile(decrypted_filename, 'r', errorlevel=1) as tar: for tarinfo in tar: t_file = tarinfo.name if len(untar_path) > 0: t_file_path = os.path.join( untar_path, t_file) else: t_file_path = t_file if not os.path.exists(t_file_path): try: if len(untar_path) > 0: tar.extract( t_file, path=untar_path ) # write to the appropriate dir else: tar.extract( t_file) # write to CWD except IOError as e: stderr( "Failed to unpack the file '" + t_file_path + "' [" + str(e) + "]") elif is_dir(t_file_path): pass # do nothing if it exists and is a directory, no need to warn else: # it is a file and it already exists, provide user error message stderr( "Failed to unpack the file '" + t_file_path + "'. File already exists. Use the --overwrite flag to replace existing files." ) # remove the decrypted tar archive file os.remove(decrypted_filename) # overwrite the entered passphrases after file decryption is complete for all files passphrase = "" passphrase_confirm = "" # add a short pause to hinder brute force pexpect style password attacks with decrypto sleep(0.2) # 200ms pause else: # passphrases did not match passphrase = "" passphrase_confirm = "" stderr( "The passphrases did not match. Please enter your command again." ) sys.exit(1) elif c.argc == 1: # simple single file or directory processing with default settings path = c.arg0 if file_exists(path): # SINGLE FILE check_existing_file = False # check for a file with the name of new decrypted filename in the directory if path.endswith('.crypt'): decrypted_filename = path[0:-6] # remove the .crypt suffix check_existing_file = True elif path.endswith('.gpg') or path.endswith( '.pgp') or path.endswith('.asc'): decrypted_filename = path[0:-4] check_existing_file = True else: decrypted_filename = path + ".decrypt" # if there is not a standard file type, then add a .decrypt suffix to the decrypted file name stdout( "Could not confirm that the requested file is encrypted based upon the file type. Attempting decryption. Keep your fingers crossed..." ) # confirm that the decrypted path does not already exist, if so abort with warning message to user if check_existing_file is True: if file_exists(decrypted_filename): stderr( "Your file will be decrypted to '" + decrypted_filename + "' and this file path already exists. Please move the file or use the --overwrite option with your command if you intend to replace the current file." ) sys.exit(1) # get passphrase used to symmetrically decrypt the file passphrase = getpass.getpass("Please enter your passphrase: ") if len(passphrase) == 0: # confirm that user entered a passphrase stderr( "You did not enter a passphrase. Please repeat your command and try again." ) sys.exit(1) passphrase_confirm = getpass.getpass( "Please enter your passphrase again: ") # confirm that the passphrases match if passphrase == passphrase_confirm: system_command = "gpg --batch -o " + quote( decrypted_filename) + " --passphrase " + quote( passphrase) + " -d " + quote(path) response = muterun(system_command) if response.exitcode == 0: # unpack tar archive generated from the decryption, if present if decrypted_filename.endswith( '.tar') and tarfile.is_tarfile(decrypted_filename): untar_path_tuple = os.path.split(decrypted_filename) untar_path = untar_path_tuple[0] with tarfile.TarFile(decrypted_filename, 'r', errorlevel=1) as tar: for tarinfo in tar: t_file = tarinfo.name if len(untar_path) > 0: t_file_path = os.path.join( untar_path, t_file) else: t_file_path = t_file if not os.path.exists(t_file_path): try: if len(untar_path) > 0: tar.extract( t_file, path=untar_path ) # write to the appropriate dir else: tar.extract(t_file) # write to CWD except IOError as e: stderr("Failed to unpack the file '" + t_file_path + "' [" + str(e) + "]") elif is_dir(t_file_path): pass # do nothing if it exists and is a directory, no need to warn else: # it is a file and it already exists, provide user error message stderr( "Failed to unpack the file '" + t_file_path + "'. File already exists. Use the --overwrite flag to replace existing files." ) # remove the decrypted tar archive os.remove(decrypted_filename) stdout("Decryption complete") # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" sys.exit(0) else: stderr(response.stderr) stderr("Decryption failed") # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" # add a short pause to hinder brute force pexpect style password attacks with decrypto sleep(0.2) # 200ms pause sys.exit(1) else: stderr( "The passphrases did not match. Please enter your command again." ) sys.exit(1) elif dir_exists(path): # SINGLE DIRECTORY dirty_directory_file_list = list_all_files(path) directory_file_list = [ x for x in dirty_directory_file_list if (x.endswith('.crypt') or x.endswith('.gpg') or x.endswith('.pgp') or x.endswith('.asc')) ] # if there are no encrypted files found, warn and abort if len(directory_file_list) == 0: stderr("There are no encrypted files in the directory") sys.exit(1) # prompt for the passphrase passphrase = getpass.getpass("Please enter your passphrase: ") if len(passphrase) == 0: # confirm that user entered a passphrase stderr( "You did not enter a passphrase. Please repeat your command and try again." ) sys.exit(1) passphrase_confirm = getpass.getpass( "Please enter your passphrase again: ") if passphrase == passphrase_confirm: # decrypt all of the encypted files in the directory for filepath in directory_file_list: absolute_filepath = make_path( path, filepath ) # combine the directory path and file name into absolute path # remove file suffix from the decrypted file path that writes to disk if absolute_filepath.endswith('.crypt'): decrypted_filepath = absolute_filepath[ 0:-6] # remove the .crypt suffix elif absolute_filepath.endswith( '.gpg') or absolute_filepath.endswith( '.pgp') or absolute_filepath.endswith('.asc'): decrypted_filepath = absolute_filepath[0:-4] # confirm that the file does not already exist if file_exists(decrypted_filepath): stdout( "The file path '" + decrypted_filepath + "' already exists. This file was not decrypted.") else: system_command = "gpg --batch -o " + quote( decrypted_filepath) + " --passphrase " + quote( passphrase) + " -d " + quote(absolute_filepath) response = muterun(system_command) if response.exitcode == 0: stdout("'" + absolute_filepath + "' decrypted to '" + decrypted_filepath + "'") else: stderr(response.stderr) stderr("Decryption failed for " + absolute_filepath) # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" # add a short pause to hinder brute force pexpect style password attacks with decrypto sleep(0.2) # 200ms pause else: # overwrite user entered passphrases passphrase = "" passphrase_confirm = "" stderr( "The passphrases did not match. Please enter your command again." ) sys.exit(1) else: # error message, not a file or directory. user entry error stderr( "The path that you entered does not appear to be an existing file or directory. Please try again." ) sys.exit(1) # ------------------------------------------------------------------------------------------ # [ DEFAULT MESSAGE FOR MATCH FAILURE ] # Message to provide to the user when all above conditional logic fails to meet a true condition # ------------------------------------------------------------------------------------------ else: print("Could not complete your request. Please try again.") sys.exit(1)
def main(): import sys from Naked.commandline import Command user_platform = platform.system() # ------------------------------------------------------------------------------------------ # [ Instantiate Naked framework command line object ] # used for all subsequent conditional logic in the CLI application # ------------------------------------------------------------------------------------------ c = Command(sys.argv[0], sys.argv[1:]) if not c.command_suite_validates(): from jampack.settings import usage as jampack_usage print(jampack_usage) sys.exit(1) if c.help(): # User requested jampack help information from jampack.settings import help as jampack_help print(jampack_help) sys.exit(0) elif c.usage(): # User requested jampack usage information from jampack.settings import usage as jampack_usage print(jampack_usage) sys.exit(0) elif c.version(): # User requested jampack version information from jampack.settings import app_name, major_version, minor_version, patch_version version_display_string = app_name + ' ' + major_version + '.' + minor_version + '.' + patch_version print(version_display_string) sys.exit(0) if c.argc > 0: # if there is an argument to the executable try: for archive_name in c.argv: lowercase_archive_name = archive_name.lower() if lowercase_archive_name.endswith('.zip'): if zipfile.is_zipfile(archive_name): zipper = zipfile.ZipFile(archive_name, mode="r") zipper.extractall() if user_platform == "Windows": stdout("'" + archive_name + "' was unpacked.") else: stdout("[\033[32m✓\033[0m] '" + archive_name + "' was unpacked.") else: if user_platform == "Windows": stderr("'" + archive_name + "' does not appear to be a zip file") else: stderr("[\033[31m!\033[0m] '" + archive_name + "' does not appear to be a zip file") elif lowercase_archive_name.endswith( '.tar.gz') or lowercase_archive_name.endswith( '.tgz') or lowercase_archive_name.endswith( '.tar.gzip'): if tarfile.is_tarfile(archive_name): tarball = tarfile.open(archive_name, mode="r:gz") tarball.extractall() if user_platform == "Windows": stdout("'" + archive_name + "' was unpacked.") else: stdout("[\033[32m✓\033[0m] '" + archive_name + "' was unpacked.") else: if user_platform == "Windows": stderr("'" + archive_name + "' does not appear to be a tar archive") else: stderr("[\033[31m!\033[0m] '" + archive_name + "' does not appear to be a tar archive") elif lowercase_archive_name.endswith( '.tar.bz2') or lowercase_archive_name.endswith( '.tar.bzip2'): if tarfile.is_tarfile(archive_name): bzball = tarfile.open(archive_name, mode="r:bz2") bzball.extractall() if user_platform == "Windows": stdout("'" + archive_name + "' was unpacked.") else: stdout("[\033[32m✓\033[0m] '" + archive_name + "' was unpacked.") else: if user_platform == "Windows": stderr("'" + archive_name + "' does not appear to be a tar archive") else: stderr("[\033[31m!\033[0m] '" + archive_name + "' does not appear to be a tar archive") else: if user_platform == "Windows": stderr( "Unable to identify the archive type for '" + archive_name + "'. This archive was not unpacked. Please check the file extension and try again." ) else: stderr( "[\033[31m!\033[0m] Unable to identify the archive type for '" + archive_name + "'. This archive was not unpacked. Please check the file extension and try again." ) except Exception as e: if user_platform == "Windows": stderr("Unable to unpack the archive '" + archive_name + "'. Error: " + str(e)) else: stderr("[\033[31m!\033[0m] Unable to unpack the archive '" + archive_name + "'. Error: " + str(e)) # ------------------------------------------------------------------------------------------ # [ DEFAULT MESSAGE FOR MATCH FAILURE ] # Message to provide to the user when all above conditional logic fails to meet a true condition # ------------------------------------------------------------------------------------------ else: if user_platform == "Windows": print( "Could not complete the command that you entered. Please try again." ) else: print( "[\033[31mX\033[0m] Could not complete the command that you entered. Please try again." ) sys.exit(1) # exit