def format_output(topics, info): rm = json.loads(info) host_names = "" host_ips = "" total_dus = 0 for host in host_data: total_dus += average(host_data[host]) keys = host.split(" : ") host_names += keys[0] + " " host_ips += keys[1] + " " host_name = host_names host_ip = host_ips total_size = total_dus if agg_level == 0: # convert Bytes to GB #total_size = total_dus/(1024*1024*1024) total_size = total_dus inf = common.Output(host_name, host_ip, common.get_current_time(), total_size, topics, note) for topic in inf.topics: t_us = topic.percent_utilization * inf.total_disk_utilized * 1.0 topic.utilization_size = round(t_us / 100.0, 2) create_csv_file(inf) return json.dumps(inf, default=lambda o: o.__dict__)
def complete_sources(self): if not self.do_sources: return write = common.Output(self.SOURCES, 'Python').write write('\n' "# This is derived from Bruno Haible's `libiconv' package.\n" '\n' 'iconv_data = [\n') commented = False for comment, charset, aliases in self.data: if not commented and not comment: comment = 'General character sets' if comment: if commented: write(' ),\n') write('\n' ' (%r,\n' '\n' % comment) commented = True if aliases: write(' (%r' % charset) for alias in aliases: write(',\n %r' % alias) write('),\n') else: write(' (%r,),\n' % charset) if commented: write(' ),\n') write(']\n')
def main(*arguments): import re assert not arguments, arguments margin = ' ' write = common.Output('inc-iconv.txt', 'ReST', margin=margin).write for data in libiconv.iconv_data: comment = data[0] write('\n' '%s+ *%s*\n' % (margin, comment)) for group in data[1:]: charset = group[0] aliases = group[1:] write('\n' '%s :charset:`%s`\n' % (margin, charset)) if aliases: write('\n' '%s .. :tindex %s, aliases\n' % (margin, re.sub(':([0-9]+)', r'(\1)', charset))) for alias in aliases: write('%s .. :tindex %s\n' % (margin, re.sub(':([0-9]+)', r'(\1)', alias))) write('\n') if len(aliases) == 1: write( '%s :charset:`%s` is an alias for this charset.\n' % (margin, aliases[0])) else: write('%s :charset:`%s` and :charset:`%s` are aliases' ' for this charset.\n' % (margin, '`, :charset:`'.join( aliases[:-1]), aliases[-1])) else: write('\n' '%s .. :tindex %s\n' % (margin, re.sub(':([0-9]+)', r'(\1)', charset)))
def main(self, *arguments): assert not arguments, arguments self.study_python_modules() write = common.Output('builtin.py', 'Python').write write('\n' 'import recode\n') self.write_aliases(write) self.write_methods(write)
def complete_rest(self): margin = ' ' if run.french_mode: write = common.Output('fr-%s' % self.REST, 'ReST', margin=margin).write write('\n' '%s+ *Charsets provenant de RFC 1345*\n' % margin) else: write = common.Output(self.REST, 'ReST', margin=margin).write write('\n' '%s+ *Charsets described within RFC 1345*\n' % margin) charsets = self.remark_map.keys() charsets.sort() for charset in charsets: write('\n' '%s:charset:`%s`\n' % (margin, charset)) write('\n' '%s .. :tindex %s, aliases and source\n' % (margin, re.sub(':([0-9]+)', r'(\1)', charset))) aliases = self.aliases_map[charset] if aliases: if len(aliases) == 1: if aliases[0]: # FIXME: pourquoi parfois vide ?? write( '%s .. :tindex %s\n' % (margin, re.sub(':([0-9]+)', r'(\1)', aliases[0]))) write('\n' '%s :charset:`%s` is an alias' ' for this charset.\n' % (margin, aliases[0])) else: for alias in aliases: write('%s .. :tindex %s\n' % (margin, re.sub(':([0-9]+)', r'(\1)', alias))) write('\n' '%s :charset:`%s` and :charset:`%s` are aliases' ' for this charset.\n' % (margin, '`, :charset:`'.join( aliases[:-1]), aliases[-1])) else: write('\n') for line in self.remark_map[charset]: if line[0].islower(): line = line[0].upper() + line[1:] write('%s %s' % (margin, line)) if line[-1] != '.': write('.') write('\n')
def main(self, *arguments): # Decode options. import getopt options, arguments = getopt.getopt(arguments, 'v') for option, value in options: if option == '-v': self.verbose = True # Import all modules. modules = [getattr(__import__('Recode.' + module_name), module_name) for module_name in arguments] # Register aliases into clusters. self.clusters = {} self.handle_declare(recode.UNICODE_STRING) self.handle_declare((recode.TRIVIAL_SURFACE, 'Data')) for module in modules: try: declares = module.declares except AttributeError: sys.stderr.write("No `declares' in `%s'\n" % module.__file__) else: for declare in declares: self.handle_declare(declare) # Register implied surfaces. self.implied = {} for module in modules: if hasattr(module, 'implied_surfaces'): for alias, surface in module.implied_surfaces: self.implied[recode.cleaned_alias(alias)] = ( recode.cleaned_alias(surface)) # Register recode methods. self.methods = {} for module, module_name in zip(modules, arguments): for name in dir(module): codec = getattr(module, name) if (hasattr(codec, 'internal_coding') and hasattr(codec, 'external_coding')): self.handle_codec(module_name, name, codec) # Write out the Python source. write = common.Output('preset.py', 'Python').write write('\n' 'aliases = {\n') items = self.clusters.items() items.sort() for alias, cluster in items: write(' %r: (%r, %r),\n' % (alias, cluster[0], self.implied.get(alias))) write(' }\n' '\n' 'methods = {\n') items = self.methods.items() items.sort() for (before, after), (module_name, codec_name, use_encode) in items: write(' (%r, %r): (%r, %r, %r),\n' % (before, after, module_name, codec_name, use_encode)) write(' }\n')
def main(*arguments): mtime = None for file in arguments: value = os.path.getmtime(file) if mtime is None or value > mtime: mtime = value common.Output('inc-stamp.txt', 'ReST').write( '\n' '.. |package| replace:: %s\n' '.. |version| replace:: %s\n' '.. |date| replace:: %s\n' % (version.package, version.version, time.strftime('%Y-%m-%d', time.localtime(mtime))))
def format_output(): topics = [] logging.info("Total Disk Utilized="+str(total_disk_utilized)) for topic_name in folder_info: value = folder_info[topic_name] percent_value = (value*100.0)/(total_disk_utilized*1.0) logging.info("Topic Name="+topic_name+" ,Utilization="+ str(value)+" ,Percentage="+str(percent_value)) topic = common.Topic(topic_name, percent_value, value) topics.append(topic) host_name, host_ip = common.get_Host_name_IP() inf = common.Output(host_name,host_ip,common.get_current_time(), total_disk_utilized, topics, note) output_data = json.dumps(inf, default=lambda o: o.__dict__) file_name = output_directory+"/"+common.get_file_name()+"."+host_name+".json" common.create_output_file(file_name,output_data)
def complete_sources(self): write = common.Output(self.SOURCES, 'Python').write write('\n' 'max_mnemonic_length = %d\n' % self.MAX_MNEMONIC_LENGTH) write('\n' 'table = {\n') pairs = self.mnemonic_map.items() pairs.sort() for unicode, mnemonic in pairs: write(' 0x%04X: %r,\n' % (unicode, mnemonic)) write(' }\n') write('\n' 'inverse = {\n') pairs = [(mnemonic, unicode) for unicode, mnemonic in self.mnemonic_map.items()] pairs.sort() for mnemonic, unicode in pairs: write(' %r: 0x%04X,\n' % (mnemonic, unicode)) write(' }\n')
def main(*arguments): assert not arguments, arguments write = common.Output('rfc1345.h', 'C').write inverse_map = {} write('\n') write('#define TABLE_LENGTH %d\n' % len(rfc1345.table)) write('#define MAX_MNEMONIC_LENGTH %d\n' % rfc1345.max_mnemonic_length) write('\n' 'struct entry\n' ' {\n' ' recode_ucs2 code;\n' ' const char *rfc1345;\n' ' };\n' '\n' 'static const struct entry table[TABLE_LENGTH] =\n' ' {\n') items = rfc1345.table.items() items.sort() count = 0 import re for unicode, mnemonic in items: write(' /* %4d */ {0x%04X, "%s"},\n' % (count, unicode, re.sub(r'([\"])', r'\\\1', mnemonic))) inverse_map[mnemonic] = count count += 1 write(' };\n' '\n' 'static const unsigned short inverse[TABLE_LENGTH] =\n' ' {') count = 0 items = inverse_map.items() items.sort() for mnemonic, unicode in items: if count % 10 == 0: if count != 0: write(',') write('\n /* %4d */ ' % count) else: write(', ') write('%4d' % unicode) count += 1 write('\n' ' };\n')
def main(self, *arguments): assert not arguments, arguments # Rewrite strip data, merging common strips as we go. self.strips = [] self.strip_index = {} self.add_strip(u'\uFFFF' * recode.STRIP_SIZE) strip_data = [] for charset, data, indices in common.all_strip_data(): strip_data.append( (recode.cleaned_alias(charset), charset, [self.add_strip(data[index:index+recode.STRIP_SIZE]) for index in indices])) # Write the strip pool. write = common.Output('strip.c', 'C').write write('\n' '#include \"common.h\"\n' '\n' 'const recode_ucs2 ucs2_data_pool[%d] =\n' ' {' % (len(self.strips) * recode.STRIP_SIZE)) count = 0 for strip in self.strips: for character in strip: if count % 8 == 0: if count != 0: write(',') write('\n /* %4d */ ' % count) else: write(', ') write('0x%0.4X' % ord(character)) count += 1 write('\n' ' };\n') # Write out all strip codecs. strip_data.sort() ordinal = 0 for key, charset, indices in strip_data: write('\n' '/* %s */\n' '\n' 'static struct strip_data data_%d =\n' ' {\n' ' ucs2_data_pool,\n' ' {\n' % (charset, ordinal)) count = 0 for indice in indices: if count % 12 == 0: if count != 0: write(',\n') write(' ') else: write(', ') write('%4d' % indice) count += 1 write('\n' ' }\n' ' };\n') ordinal += 1 # Print the collectable initialisation function. write('\n' 'bool\n' 'module_strips (struct recode_outer *outer)\n' '{\n' ' RECODE_ALIAS alias;\n') charsets = {} for key, charset, indices in strip_data: charsets[charset] = [] for alias, (charset, surface) in recode.registry.aliases.iteritems(): if charset in charsets: charsets[charset].append((alias, surface)) ordinal = 0 for key, charset, indices in strip_data: write('\n' ' if (!declare_strip_data (outer, &data_%d, "%s"))\n' ' return false;\n' % (ordinal, charset)) for alias, surface in charsets[charset]: if surface is None: write(' if (!declare_alias (outer, "%s", "%s"))\n' ' return false;\n' % (alias, charset)) else: write(' if (alias = declare_alias (outer, "%s", "%s"),' ' !alias)\n' ' return false;\n' % (alias, charset)) write(' if (!declare_implied_surface (outer, alias,' ' outer->%s_surface))\n' ' return false;\n' % surface) ordinal += 1 write('\n' ' return true;\n' '}\n')
def init_write(self): if self.do_sources and not self.write: # Table fragments will be produced while reading data tables. write = self.write = common.Output(self.STRIP, 'Python').write write('\n' 'import recode\n' '\n' 'declares = [\n')
def complete(self): if not self.do_sources: return if run.french_mode: write = common.Output('fr_%s' % self.SOURCES, 'Python').write else: write = common.Output(self.SOURCES, 'Python').write # Establish a mild compression scheme. Words word[0:singles] # will be represented by a single byte running from 1 to # singles. All remaining words will be represented by two # bytes, the first one running slowly from singles+1 to 255, # the second cycling faster from 1 to 255. sys.stderr.write(' sorting words...') pairs = [(-self.code_map[word], word) for word in self.code_map] pairs.sort() words = [pair[1] for pair in pairs] pairs = None sys.stderr.write(' %d of them\n' % len(words)) count = len(words) singles = (255 * 255 - count) // 254 # Transmit a few values for further usage by the code. sys.stderr.write(' sorting names...') unicode_table = self.charname_map.keys() unicode_table.sort() sys.stderr.write(' %d of them\n' % len(unicode_table)) write('\n' 'number_of_singles = %d\n' 'max_charname_length = %d\n' 'number_of_charnames = %d\n' % (singles, self.max_length, len(unicode_table))) # Establish a mild compression scheme (one or two bytes per word). sys.stderr.write(" writing words\n") write('\n' 'word = [\n') char1 = 1 char2 = 1 for counter in range(singles): word = words[counter] write(' %-28s# \\%0.3o\n' % ('%r,' % word, char1)) self.code_map[words[counter]] = char1 char1 += 1 for counter in range(singles, count): word = words[counter] write(' %-28s# \\%0.3o\\%0.3o\n' % ('%r,' % word, char1, char2)) self.code_map[words[counter]] = 256 * char1 + char2 if char2 == 255: char1 += 1 char2 = 1 else: char2 += 1 write(' ]\n') sys.stderr.write(" writing names\n") write('\n' 'charname = {\n') for unicode in unicode_table: write(' 0x%04X: "' % unicode) for word in self.charname_map[unicode].split(): if word in self.code_map: code = self.code_map[word] if code < 256: write('\\%0.3o' % code) else: write('\\%0.3o\\%0.3o' % divmod(code, 256)) else: sys.stderr.write('??? %s\n' % word) write('",\n') write(' }\n')