def is_time_valid(time): try: return match(r"([0-1][0-9]|[2][0-3]):[0-5][0-9]:[0-5][0-9]", time).group(0) except: raise ArgumentTypeError("Invalid format for time provided")
def is_positive_integer(supposed_positive): value = int(supposed_positive) if value <= 0: raise ArgumentTypeError('{0} is not an positive integer'.format(value)) return value
def __call__(self, string): """Parse `string` and return `self.key_value_class()` instance. The best of `self.separators` is determined (first found, longest). Back slash escaped characters aren't considered as separators (or parts thereof). Literal back slash characters have to be escaped as well (r'\\'). """ class Escaped(str): """Represents an escaped character.""" def tokenize(string): """Tokenize `string`. There are only two token types - strings and escaped characters: tokenize(r'foo\=bar\\baz') => ['foo', Escaped('='), 'bar', Escaped('\\'), 'baz'] """ tokens = [''] characters = iter(string) for char in characters: if char == '\\': char = next(characters, '') if char not in self.special_characters: tokens[-1] += '\\' + char else: tokens.extend([Escaped(char), '']) else: tokens[-1] += char return tokens tokens = tokenize(string) # Sorting by length ensures that the longest one will be # chosen as it will overwrite any shorter ones starting # at the same position in the `found` dictionary. separators = sorted(self.separators, key=len) for i, token in enumerate(tokens): if isinstance(token, Escaped): continue found = {} for sep in separators: pos = token.find(sep) if pos != -1: found[pos] = sep if found: # Starting first, longest separator found. sep = found[min(found.keys())] key, value = token.split(sep, 1) # Any preceding tokens are part of the key. key = ''.join(tokens[:i]) + key # Any following tokens are part of the value. value += ''.join(tokens[i + 1:]) break else: raise ArgumentTypeError( u'"%s" is not a valid value' % string) return self.key_value_class( key=key, value=value, sep=sep, orig=string)
def check_is_directory(value): if value is None or not os.path.isdir(value): raise ArgumentTypeError("%s is not a directory!" % value) return value
def date_arg(s): try: return datetime.strptime(s, "%Y-%m-%d") except ValueError as e: raise ArgumentTypeError(e) from e
def print_html_report(covdata: CovData, output_file, options): css_data = CssRenderer.render(options) medium_threshold = options.html_medium_threshold high_threshold = options.html_high_threshold show_decision = options.show_decision data = {} root_info = RootInfo(options) data["info"] = root_info data["SHOW_DECISION"] = show_decision data["COVERAGE_MED"] = medium_threshold data["COVERAGE_HIGH"] = high_threshold self_contained = options.html_self_contained if self_contained is None: self_contained = not options.html_details if output_file == "-": if not self_contained: raise ArgumentTypeError( "Only self contained reports can be printed to STDOUT" ) elif options.html_details: raise ArgumentTypeError("Detailed reports can not be printed to STDOUT") if output_file.endswith(os.sep): output_file += ( "coverage_details.html" if options.html_details else "coverage.html" ) formatter = get_formatter(options) css_data += formatter.get_css() if self_contained: data["css"] = css_data else: css_output = os.path.splitext(output_file)[0] + ".css" with open_text_for_writing(css_output) as f: f.write(css_data) if options.relative_anchors: css_link = os.path.basename(css_output) else: css_link = css_output data["css_link"] = css_link root_info.set_coverage(covdata) # Generate the coverage output (on a per-package basis) # source_dirs = set() files = [] dirs = [] filtered_fname = "" keys = sort_coverage( covdata, show_branch=False, by_num_uncovered=options.sort_uncovered, by_percent_uncovered=options.sort_percent, ) cdata_fname = {} cdata_sourcefile = {} for f in keys: filtered_fname = options.root_filter.sub("", f) files.append(filtered_fname) dirs.append(os.path.dirname(filtered_fname) + os.sep) cdata_fname[f] = filtered_fname if options.html_details: cdata_sourcefile[f] = _make_short_sourcename(output_file, filtered_fname) else: cdata_sourcefile[f] = None # Define the common root directory, which may differ from options.root # when source files share a common prefix. root_directory = "" if len(files) > 1: commondir = commonpath(files) if commondir != "": root_directory = commondir else: dir_, _file = os.path.split(filtered_fname) if dir_ != "": root_directory = dir_ + os.sep root_info.set_directory(root_directory) for f in keys: root_info.add_file(covdata[f], cdata_sourcefile[f], cdata_fname[f]) if options.html_details: (output_prefix, output_suffix) = os.path.splitext(os.path.abspath(output_file)) if output_suffix == "": output_suffix = ".html" functions_fname = f"{output_prefix}.functions{output_suffix}" data["FUNCTIONS_FNAME"] = os.path.basename(functions_fname) html_string = templates().get_template("root_page.html").render(**data) with open_text_for_writing( output_file, encoding=options.html_encoding, errors="xmlcharrefreplace" ) as fh: fh.write(html_string + "\n") # Return, if no details are requested if not options.html_details: return # # Generate an HTML file for every source file # error_occurred = False all_functions = dict() for f in keys: cdata = covdata[f] data["filename"] = cdata_fname[f] # Only use demangled names (containing a brace) data["function_list"] = [] for name in sorted(cdata.functions.keys()): fcdata = cdata.functions[name] fdata = dict() fdata["name"] = name fdata["filename"] = cdata_fname[f] fdata["html_filename"] = os.path.basename(cdata_sourcefile[f]) fdata["line"] = fcdata.lineno fdata["count"] = fcdata.count data["function_list"].append(fdata) all_functions[(fdata["name"], fdata["filename"])] = fdata def coverage_class(percent: Optional[float]) -> str: return coverage_to_class(percent, medium_threshold, high_threshold) data["functions"] = dict_from_stat(cdata.function_coverage(), coverage_class) data["branches"] = dict_from_stat(cdata.branch_coverage(), coverage_class) data["decisions"] = dict_from_stat(cdata.decision_coverage(), coverage_class) data["lines"] = dict_from_stat(cdata.line_coverage(), coverage_class) data["source_lines"] = [] currdir = os.getcwd() os.chdir(options.root_dir) max_line_from_cdata = max(cdata.lines.keys(), default=0) try: with io.open( data["filename"], "r", encoding=options.source_encoding, errors="replace", ) as source_file: lines = formatter.highlighter_for_file(data["filename"])( source_file.read() ) for ctr, line in enumerate(lines, 1): data["source_lines"].append( source_row(ctr, line, cdata.lines.get(ctr)) ) if ctr < max_line_from_cdata: logger.warning( f"File {data['filename']} has {ctr} line(s) but coverage data has {max_line_from_cdata} line(s)." ) except IOError as e: logger.warning(f'File {data["filename"]} not found: {repr(e)}') for ctr in range(1, max_line_from_cdata): data["source_lines"].append( source_row( ctr, "!!! File not found !!!" if ctr == 1 else "", cdata.lines.get(ctr), ) ) error_occurred = True os.chdir(currdir) html_string = templates().get_template("source_page.html").render(**data) with open_text_for_writing( cdata_sourcefile[f], encoding=options.html_encoding, errors="xmlcharrefreplace", ) as fh: fh.write(html_string + "\n") data["all_functions"] = [all_functions[k] for k in sorted(all_functions)] html_string = templates().get_template("functions_page.html").render(**data) with open_text_for_writing( functions_fname, encoding=options.html_encoding, errors="xmlcharrefreplace" ) as fh: fh.write(html_string + "\n") return error_occurred
def is_valid_key(text_key): """Check if it's a valid familiar key.""" if len(text_key) != 44: raise ArgumentTypeError("Invalid familiar key length. It must be 44.") return text_key
def restricted_float(x): #avoid nonsense values for the threshold x = float(x) if x < 0.0 or x > 1.0: raise ArgumentTypeError("%r not in range [0.0, 1.0]" % (x, )) return x
def hash_type(string: str) -> str: # check hash's length, prefix, lowcase. if not is_valid_hash(string): raise ArgumentTypeError(f"Invalid transaction hash '{string}'") return string
def statut_type(s): if s not in STATUS_MAPPING: tous_statuts = ", ".join(f"'{s}'" for s in STATUS_MAPPING) raise ArgumentTypeError(f"statut '{s}' inconnu (doit être un de {tous_statuts}") return STATUS_MAPPING[s]
def positive_int(x): #avoid nonsense negative parameter values x = int(x) if x < 0: raise ArgumentTypeError("%r is not a positive int" % (x, )) return x
def check_positive_number_or_equal_to_negative_one(value): if value: value = int(value) if value == 0 or value < -1: raise ArgumentTypeError("{} is not valid.".format(value)) return value
def check_positive_number(value): if value: value = int(value) if value <= 0: raise ArgumentTypeError("{} should be a positive number.") return value
def check_no_spaces(value): """checks for spaces in string""" if ' ' in value: raise ArgumentTypeError("{} should not have whitespace(s).") return value
def _validate_format(self, value): if value not in self._formats: raise ArgumentTypeError(f'Incorrect format of export {value}') return value
def check_subnet(value): if value[-3:] != '/21': raise ArgumentTypeError(value + ' is not a valid subnet mask. Use x.x.x.x/21') return value
def directory(path): if not os.path.isdir(path): raise ArgumentTypeError("'{}' does not exist.".format(path)) return path
def percentage(n): p = int(n) if not p > 0 and p < 100: raise ArgumentTypeError("Percent param must be 1-99") return p
def valid_quarter(raw): result = int(raw) if not 1 <= result <= 4: raise ArgumentTypeError('Quarter should be 1-4') return result
def boolean(x): if x in [True, False]: return x raise ArgumentTypeError("Argument must be a boolean")
def regexp_type(regexp): try: re.compile(regexp) return regexp except re.error as e: raise ArgumentTypeError(e)
def main(args): data_io.show_header() # Understand input data format if os.path.isdir(args.input_fname): tiff_input = True if args.dataset_name == "": args.dataset_name = "data" elif args.input_fname.split('.')[-1] in ("hdf5", "h5"): tiff_input = False if args.dataset_name == "": raise ArgumentTypeError("dataset-name required for hdf5") else: raise ArgumentTypeError("input file type not recognized. must be tiff folder or hdf5 file") input_fname = args.input_fname # set up output file name / path / chunks parameter if args.output_fpath == "": args.output_fpath = os.path.split(args.input_fname)[0] args.output_fname = args.output_fname.split('.')[0] + ".hdf5" output_fname = os.path.join(args.output_fpath, args.output_fname) if type(args.chunk_param) in (int, float): chunk_size = args.chunk_param/1e3 # convert to GB chunk_shape = None elif type(args.chunk_param) == tuple: chunk_shape = args.chunk_param chunk_size = None else: chunk_shape = None chunk_size = None chunked_slice_size = args.chunked_slice_size # print("Type chunk_param" + str(type(args.chunk_param))) # print("Type chunked_slice_size" + str(type(args.chunked_slice_size))) # print("Overwrite OK is %s"%args.overwrite_OK) # print("Stats only is %s"%args.stats_only) # print("Delete is %s"%args.delete) # sys.exit() # Define DataFile instances - quit here if stats_only requested r_dfile = data_io.DataFile(input_fname, tiff = tiff_input, \ data_tag = args.dataset_name, \ VERBOSITY = args.verbosity) print("Input data stats:") r_dfile.show_stats() if args.stats_only: sys.exit() w_shape = r_dfile.d_shape # future implementation must allow resampling dataset w_dtype = r_dfile.d_type # future implementation must allow changing dtype (with renormalization) w_dfile = data_io.DataFile(output_fname, tiff = False, \ data_tag = args.dataset_name, \ VERBOSITY = args.verbosity, \ d_shape = w_shape, d_type = w_dtype, \ chunk_shape = chunk_shape, \ chunk_size = chunk_size, \ chunked_slice_size = chunked_slice_size) print("\nChunking scheme estimated as: %s"%str(w_dfile.chunk_shape)) input("\nHDF5 file will be saved to the following location.\n%s\nPress any key to continue."%output_fname) w_dfile.create_new(overwrite = args.overwrite_OK) t0 = time.time() slice_start = 0 print("\n") pbar = tqdm(total = r_dfile.d_shape[0]) while slice_start < r_dfile.d_shape[0]: dd, s = r_dfile.read_chunk(axis = 0, slice_start = slice_start, \ max_GB = mem_thres, \ chunk_shape = w_dfile.chunk_shape) w_dfile.write_chunk(dd, axis = 0, s = s) slice_start = s.stop pbar.update(s.stop - s.start) pbar.close() total_time = (time.time() - t0)/60.0 # minutes print("\nTotal time: %.2f minutes"%(total_time)) if args.delete: input("Delete old file? Press any key") if tiff_input: rmtree(input_fname) else: os.remove(input_fname)
def arg_type_directory(string): if not isdir(string): raise ArgumentTypeError(string + " is not directory") return string
def validateIP(ip): try: if socket.inet_aton(ip): return ip except socket.error: raise ArgumentTypeError('{}[x] Invalid ip provided{}'.format(FR, S))
def __valid_dir_path(file_path: str) -> str: '''Verifies that specified file path exists.''' file_path = path.abspath(file_path) if not path.isdir(file_path): raise ArgumentTypeError('{} does not exist.'.format(file_path)) return file_path
def __init__(self, regex, path_context=None): if not regex: raise ArgumentTypeError("filter cannot be empty") super(NonEmptyFilterOption, self).__init__(regex, path_context)
def upload_limit(x): limit = int(x) if limit < 0: raise ArgumentTypeError("invalid value: {}, must be positive".format(limit)) return limit
def _validate_path(value): if not exists(value): raise ArgumentTypeError(f'Incorrect path {value}') return value
def readable_file_arg(filename): try: open(filename, 'rb') except IOError as ex: raise ArgumentTypeError('%s: %s' % (filename, ex.args[1])) return filename
def is_date_valid(date): try: return match(r"[0-9]{4}-[0-9]{1,2}-[0-9]{1,2}", date).group(0) except: raise ArgumentTypeError("Invalid format for date provided")