Esempio n. 1
0
def _render_tmpl_func(tmpl, func, **kwargs):
    # 先尝试寻找本template中的func
    func = str(func)
    if tmpl.has_def(func):
        _t = tmpl.get_def(func)
    else:
        # 在当前template的parent中寻找该func
        from mako import util
        from mako.template import DefTemplate
        from mako.runtime import Context, _populate_self_namespace

        # disable unicode 状态下生成 context
        buf = util.StringIO()
        context = Context(buf, **kwargs)
        context._with_template = tmpl

        # 将当前的namespace, 如'self', 'next'等放入context
        func_render_body, context = _populate_self_namespace(context, tmpl)

        # 找到当前template的inherits
        self_ns = context['self']
        inherit_m = self_ns.inherits.module

        # 在inherits中寻找该func // mako 模板中为render_xxx
        func_name = "render_%s" % func
        if hasattr(inherit_m, func_name):
            _t = DefTemplate(tmpl, getattr(inherit_m, func_name))
        else:
            _t = None

    if 'self' in kwargs:
        kwargs.pop('self')

    return _t and _t.render(**kwargs) or ''
Esempio n. 2
0
def _render_tmpl_func(tmpl, func, **kwargs):
    func = str(func)
    if tmpl.has_def(func):
        _t = tmpl.get_def(func)
    else:
        from mako import util
        from mako.template import DefTemplate
        from mako.runtime import Context, _populate_self_namespace

        buf = util.StringIO()
        context = Context(buf, **kwargs)
        context._with_template = tmpl

        func_render_body, context = _populate_self_namespace(context, tmpl)

        self_ns = context['self']
        inherit_m = self_ns.inherits.module

        func_name = "render_%s" % func
        if hasattr(inherit_m, func_name):
            _t = DefTemplate(tmpl, getattr(inherit_m, func_name))
        else:
            _t = None

    if 'self' in kwargs:
        kwargs.pop('self')

    return _t and _t.render(**kwargs) or ''
Esempio n. 3
0
    {
        'src': './plotly_streaming.pre.cpp',
        'destination': '../',
        'folder_prefix': 'plotly_streaming_'
    },
    {
        'src': './plotly_streaming.pre.h',
        'destination': '../',
        'folder_prefix': 'plotly_streaming_'
    },
    {
        'src': './simple.pre.ino',
        'destination': '../examples',
        'folder_prefix': ''
    },
]

for m in meta:
    for lib in libs:
        src = m['src']
        # Write file to a destination folder
        directory = m['destination'] + '/' + m['folder_prefix'] + lib.lower()
        if not os.path.exists(directory):
            os.makedirs(directory)
        fout = open(directory + '/' + src.replace('.pre', '_' + lib.lower()),
                    'w')

        mytemplate = Template(filename=src)
        ctx = Context(fout, lib=lib)
        mytemplate.render_context(ctx)
        fout.close()
"""from mako.template import Template
print Template("hello ${data}!").render(data="world")"""

"""from mako.template import Template

mytemplate = Template("hello ,${name}")
print mytemplate.render(name="Pavan")"""

from mako.template import Template
from mako.runtime import Context
from StringIO import StringIO

mytemplate = Template("hello, ${name}!")
buf = StringIO()
ctx = Context(buf, name="jack")
mytemplate.render_context(ctx)
print buf.getvalue()
Esempio n. 5
0
def main(argv: List[str]) -> None:
    got_input = False
    module_directory = None
    preprocessed_output = None
    dictionary = {}
    json_dict = {}
    got_output = False
    output_name = None
    got_preprocessed_input = False
    output_merged = None

    try:
        opts, args = getopt.getopt(argv, 'hM:m:o:t:P:')
    except getopt.GetoptError:
        out('Unknown option')
        showhelp()
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            out('Displaying showhelp')
            showhelp()
            sys.exit()
        elif opt == '-o':
            if got_output:
                out('Got more than one output')
                showhelp()
                sys.exit(3)
            got_output = True
            output_name = arg
        elif opt == '-m':
            if module_directory is not None:
                out('Got more than one cache directory')
                showhelp()
                sys.exit(4)
            module_directory = arg
        elif opt == '-M':
            if output_merged is not None:
                out('Got more than one output merged path')
                showhelp()
                sys.exit(5)
            output_merged = arg
        elif opt == '-P':
            assert not got_preprocessed_input
            assert json_dict == {}
            with open(arg, 'rb') as dict_file:
                dictionary = pickle.load(dict_file)
            got_preprocessed_input = True

    cleared_dir = False
    for arg in args:
        got_input = True
        with open(arg) as f:
            srcs = list(yaml.load_all(f.read(), Loader=yaml.FullLoader))
        for src in srcs:
            if isinstance(src, str):
                assert len(srcs) == 1
                template = Template(src,
                                    filename=arg,
                                    module_directory=module_directory,
                                    lookup=TemplateLookup(directories=['.']))
                with open(output_name, 'w') as output_file:
                    render_template(template, Context(output_file,
                                                      **dictionary))
            else:
                # we have optional control data: this template represents
                # a directory
                if not cleared_dir:
                    if not os.path.exists(output_name):
                        pass
                    elif os.path.isfile(output_name):
                        os.unlink(output_name)
                    else:
                        shutil.rmtree(output_name, ignore_errors=True)
                    cleared_dir = True
                items = []
                if 'foreach' in src:
                    for el in dictionary[src['foreach']]:
                        if 'cond' in src:
                            args = dict(dictionary)
                            args['selected'] = el
                            if not eval(src['cond'], {}, args):
                                continue
                        items.append(el)
                    assert items
                else:
                    items = [None]
                for item in items:
                    args = dict(dictionary)
                    args['selected'] = item
                    item_output_name = os.path.join(
                        output_name,
                        Template(src['output_name']).render(**args))
                    if not os.path.exists(os.path.dirname(item_output_name)):
                        os.makedirs(os.path.dirname(item_output_name))
                    template = Template(
                        src['template'],
                        filename=arg,
                        module_directory=module_directory,
                        lookup=TemplateLookup(directories=['.']))
                    with open(item_output_name, 'w') as output_file:
                        render_template(template, Context(output_file, **args))

    if not got_input and not preprocessed_output:
        out('Got nothing to do')
        showhelp()
Esempio n. 6
0
 def serialize(self, resource):
     buf = StringIO()
     gresource = NepomukResource(resource)
     ctx = Context(buf, resource=gresource, mapper=self.mapper)
     self.tmpl.render_context(ctx)
     fileutil.appendStringToFile(buf.getvalue(), self.outputPath)
Esempio n. 7
0
 def context(self, new_context: dict):
     """ Replaces current context with new context and refreshes buffer """
     self._buffer = StringIO()
     self._context = Context(self._buffer, **new_context)
Esempio n. 8
0
#!/usr/bin/env python2

from mako.template import Template
from mako.runtime import Context
from StringIO import StringIO

mytemplate = Template("Hello , ${name}")
buf = StringIO()
ctx = Context(buf, name="atupal")
mytemplate.render_context(ctx)
print buf.getvalue()

Esempio n. 9
0
def main(argv):
    got_input = False
    module_directory = None
    preprocessed_output = None
    dictionary = {}
    json_dict = {}
    got_output = False
    plugins = []
    output_name = None
    got_preprocessed_input = False
    output_merged = None

    try:
        opts, args = getopt.getopt(argv, 'hM:m:d:o:p:t:P:w:')
    except getopt.GetoptError:
        out('Unknown option')
        showhelp()
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            out('Displaying showhelp')
            showhelp()
            sys.exit()
        elif opt == '-o':
            if got_output:
                out('Got more than one output')
                showhelp()
                sys.exit(3)
            got_output = True
            output_name = arg
        elif opt == '-m':
            if module_directory is not None:
                out('Got more than one cache directory')
                showhelp()
                sys.exit(4)
            module_directory = arg
        elif opt == '-M':
            if output_merged is not None:
                out('Got more than one output merged path')
                showhelp()
                sys.exit(5)
            output_merged = arg
        elif opt == '-P':
            assert not got_preprocessed_input
            assert json_dict == {}
            sys.path.insert(
                0,
                os.path.abspath(
                    os.path.join(os.path.dirname(sys.argv[0]), 'plugins')))
            with open(arg, 'rb') as dict_file:
                dictionary = pickle.load(dict_file)
            got_preprocessed_input = True
        elif opt == '-d':
            assert not got_preprocessed_input
            with open(arg, 'r') as dict_file:
                bunch.merge_json(
                    json_dict,
                    yaml.load(dict_file.read(), Loader=yaml.SafeLoader))
        elif opt == '-p':
            plugins.append(import_plugin(arg))
        elif opt == '-w':
            preprocessed_output = arg

    if not got_preprocessed_input:
        for plugin in plugins:
            plugin.mako_plugin(json_dict)
        if output_merged:
            with open(output_merged, 'w') as yaml_file:
                yaml_file.write(yaml.dump(json_dict))
        for k, v in json_dict.items():
            dictionary[k] = bunch.to_bunch(v)

    if preprocessed_output:
        with open(preprocessed_output, 'wb') as dict_file:
            pickle.dump(dictionary, dict_file)

    cleared_dir = False
    for arg in args:
        got_input = True
        with open(arg) as f:
            srcs = list(yaml.load_all(f.read(), Loader=yaml.SafeLoader))
        for src in srcs:
            if isinstance(src, str):
                assert len(srcs) == 1
                template = Template(src,
                                    filename=arg,
                                    module_directory=module_directory,
                                    lookup=TemplateLookup(directories=['.']))
                with open(output_name, 'w') as output_file:
                    template.render_context(Context(output_file, **dictionary))
            else:
                # we have optional control data: this template represents
                # a directory
                if not cleared_dir:
                    if not os.path.exists(output_name):
                        pass
                    elif os.path.isfile(output_name):
                        os.unlink(output_name)
                    else:
                        shutil.rmtree(output_name, ignore_errors=True)
                    cleared_dir = True
                items = []
                if 'foreach' in src:
                    for el in dictionary[src['foreach']]:
                        if 'cond' in src:
                            args = dict(dictionary)
                            args['selected'] = el
                            if not eval(src['cond'], {}, args):
                                continue
                        items.append(el)
                    assert items
                else:
                    items = [None]
                for item in items:
                    args = dict(dictionary)
                    args['selected'] = item
                    item_output_name = os.path.join(
                        output_name,
                        Template(src['output_name']).render(**args))
                    if not os.path.exists(os.path.dirname(item_output_name)):
                        os.makedirs(os.path.dirname(item_output_name))
                    template = Template(
                        src['template'],
                        filename=arg,
                        module_directory=module_directory,
                        lookup=TemplateLookup(directories=['.']))
                    with open(item_output_name, 'w') as output_file:
                        template.render_context(Context(output_file, **args))

    if not got_input and not preprocessed_output:
        out('Got nothing to do')
        showhelp()
def main():

    successful = False
    err = None

    global iniFilePath

    # obtain any command-line arguments
    # overriding any values set so far
    nextArg = ""
    for argv in sys.argv:
        if nextArg != "":
            if nextArg == "iniFilePath":
                iniFilePath = argv
            nextArg = ""
        else:
            if argv.lower() == "--inifilepath" or argv.lower() == "-inifilepath":
                nextArg = "iniFilePath"
        
    # expand any leading tilde
    # to the user's home path
    if iniFilePath.startswith("~"):
        iniFilePath = os.path.expanduser(iniFilePath)
    
    iniFilePath = os.path.abspath(iniFilePath)
    
    print ("Attempting to load INI file: %s" % iniFilePath)
    
    # if INI file path does not exist
    if not os.path.exists(iniFilePath):
        # output error message
        sys.stderr.write('iniFilePath does not exist: "%s"\n' % iniFilePath)
        # cease further processing
        sys.exit(0)
    
    # obtain the settings
    # from the INI file path
    config = configparser.ConfigParser(interpolation=ExtendedInterpolation(), delimiters=('='))
    config.optionxform = str #this will preserve the case of the section names
    config.read(iniFilePath)
    
    # -----------------------------
    # Logging settings
    # -----------------------------

    logFilePathExpanded = 'PyDqsStatsGen.log'
    # if the log file
    # starts with a tilde (~)
    if logFilePathExpanded.startswith("~"):
        # derive the log file name's expanded path
        logFilePathExpanded = os.path.expanduser(logFilePathExpanded)
    
    # if the expanded log file name contains a folder prefix
    if os.path.dirname(logFilePathExpanded) != '':    
        # if the expanded log file's parent folder does not yet exist
        if not os.path.exists(os.path.dirname(logFilePathExpanded)):
            try:
                # create the log file's parent folder
                os.makedirs(os.path.dirname(logFilePathExpanded))
            except Exception as e:
                logging.error(str(e))
    
    # if the specified log file exists            
    if os.path.exists(logFilePathExpanded):
        # delete it
        os.remove(logFilePathExpanded)
    
    # maximum logging level that
    # will output to the STDOUT stream
    MAX_STDOUT_LEVEL = logging.INFO
    
    # obtain the [folders] section's settings from the INI file
    outFolder = config['folders'].get('outFolder', '~/temp')
    logSubFolder = config['folders'].get('logSubFolder', 'logFiles')
    tgtSubFolder = config['folders'].get('tgtSubFolder', 'tgtFiles')

    tgtFullPath = os.path.join(outFolder, tgtSubFolder)

    # obtain the [logging] section's settings from the INI file
    logFileName = config['logging'].get('logFileName', 'PyDqsStatsGen.log')
    maxStdOutLvl = config['logging'].get('MAX_STDOUT_LEVEL', 'info')
    
    # if log file name
    # was not specified    
    if logFileName == "":
        # default the log file name
        logFileName = "PyDqsStatsGen.log"

    logFullPathExpanded = logFileName
    # if the log file
    # starts with a tilde (~)
    if logFullPathExpanded.startswith("~"):
        # derive the log file name's expanded path
        logFullPathExpanded = os.path.expanduser(logFullPathExpanded)

    # if the expanded log file name does NOT contain a folder prefix
    if os.path.dirname(logFullPathExpanded) == '':
        # expand the output folder joined to the default log subfolder joined to the log file name
        logFullPathExpanded = os.path.expanduser(os.path.join(outFolder, logSubFolder, logFullPathExpanded))
    
    # if the expanded log file name contains a folder prefix
    if os.path.dirname(logFullPathExpanded) != '':    
        # if the expanded log file's parent folder does not yet exist
        if not os.path.exists(os.path.dirname(logFullPathExpanded)):
            try:
                # create the log file's parent folder
                os.makedirs(os.path.dirname(logFullPathExpanded))
            except Exception as e:
                logging.error(str(e))
                return successful, err
    
    # if the specified log file exists            
    if os.path.exists(logFullPathExpanded):
        # delete it
        os.remove(logFullPathExpanded)
    
    if maxStdOutLvl.lower() == 'info':
        MAX_STDOUT_LEVEL = logging.INFO
    elif maxStdOutLvl.lower() == 'debug':
        MAX_STDOUT_LEVEL = logging.DEBUG
    elif maxStdOutLvl.lower() == 'warning':
        MAX_STDOUT_LEVEL = logging.WARNING
    elif maxStdOutLvl.lower() == 'error':
        MAX_STDOUT_LEVEL = logging.ERROR
    elif maxStdOutLvl.lower() == 'critical':
        MAX_STDOUT_LEVEL = logging.CRITICAL
    else:
        MAX_STDOUT_LEVEL = logging.INFO
    
    # instantiate the logger object
    # logger = logging.getLogger(__name__)
    
    # remove any existing log handlers
    logging.getLogger('').handlers = []
    
    # set the default logger's values
    logging.basicConfig(level=MAX_STDOUT_LEVEL,
                        format='%(asctime)s\t%(levelname)s\t%(name)s\t%(message)s',
                        datefmt='%Y-%m-%d %H:%M',
                        filename=logFullPathExpanded,
                        filemode='w')
    
    # attach stdout to the logger
    # so that outputing to the log also
    # outputs to the stdout console
    logStdOut = logging.StreamHandler(sys.stdout)
    logStdOut.addFilter(MaxLogLevelFilter(MAX_STDOUT_LEVEL))
    logging.getLogger('').addHandler(logStdOut)                    
    
    # attach stderr to the logger
    # so that outputing to the log also
    # outputs to the stderr console
    logStdErr = logging.StreamHandler(sys.stderr)
    logStdErr.addFilter(MinLogLevelFilter(MAX_STDOUT_LEVEL))
    logging.getLogger('').addHandler(logStdErr)
    
    # output a message to the log file
    # with the log file's location info
    logging.info('Log file: %s', logFullPathExpanded)
    # logging.debug('DEBUG: Log file located at %s', logFilePathExpanded)
    # logging.warning('WARNING: Log file located at %s', logFilePathExpanded)
    # logging.error('Log file located at %s', logFilePathExpanded)

    # -------------------------------------------------------------------------

    # max data rows
    # to be processed
    # 0 means unlimited
    maxRows = int(config['DEFAULT'].get('maxRows', '0'))
    
    # show progress messages
    # every 'flushCount' number
    # of data rows
    flushCount = int(config['DEFAULT'].get('flushCount', '10000'))
    
    # maximum allowed column
    # count mismatches before
    # terminating the program
    maxColCountMisMatches = int(config['DEFAULT'].get('maxColCountMisMatches', '0'))
    
    # max number of HTML value
    # frequencies per column to report
    maxHtmlCount = int(config['DEFAULT'].get('maxHtmlCount', '5'))
    
    # max number of JDBC value
    # frequencies per column to report
    maxJdbcCount = int(config['DEFAULT'].get('maxJdbcCount', '10'))
    
    # the date of execution's output format string    
    runDateFormatString = config['DEFAULT'].get('runDateFormatString', '%A %d %b %Y %I:%M %p %Z')
    
    # MAKO template path
    # for HTML output generation
    makoHtmlTemplateName = config['DEFAULT'].get('makoHtmlTemplateName', 'DqsStatsHtml.mako')

    # MAKO template path
    # for JDBC output generation
    makoJdbcTemplateName = config['DEFAULT'].get('makoJdbcTemplateName', 'DqsStatsJdbc.mako')
     
    srcFullPath = config['srcSpecs'].get('srcFullPath')
    srcDelim = config['srcSpecs'].get('srcDelim', ',')
    srcHeaderRows = int(config['srcSpecs'].get('srcHeaderRows', '1'))
    # handle edge-values
    # of source header rows
    if srcHeaderRows <= 0:
        srcHeaderRows = 1;
    # assume the source file
    # is minimally-quoted
    srcQuote = csv.QUOTE_MINIMAL

    # data provider's acronym    
    dataProvider = config['srcSpecs'].get('dataProvider', 'unspecified')
    
    # comma-delimited list of columns upon
    # which statistics are to be calculated,
    # an empty ACCEPT list will signal the
    # processing of ALL of the row's columns
    acceptColNames = {}
    acceptColNamesStr = config['srcSpecs'].get('acceptColNames', '')
    if acceptColNamesStr != '':
        tempValues = acceptColNamesStr.split(',')
        for tempValue in tempValues:
            acceptColNames[tempValue] = tempValue
    
    # comma-delimited IGNORE list will suppress calculations
    # of the value frequency statistics for the specified columns
    # ignoreColNames = 'voter_reg_num','ncid'
    ignoreColNames = {}
    ignoreColNamesStr = config['srcSpecs'].get('ignoreColNames','')
    if ignoreColNamesStr != '':
        tempValues = ignoreColNamesStr.split(',')
        for tempValue in tempValues:
            ignoreColNames[tempValue] = tempValue
    
    # comma-delimited UNIQUE list will suppress calculations
    # of the value frequency statistics for the specified columns
    # uniqueColNames = 'voter_reg_num','ncid'
    uniqueColNames = {}
    uniqueColNamesStr = config['srcSpecs'].get('uniqueColNames','')
    if uniqueColNamesStr != '':
        tempValues = uniqueColNamesStr.split(',')
        for tempValue in tempValues:
            uniqueColNames[tempValue] = tempValue
            
    bypassColNames = {}
    for colName in uniqueColNames.keys():
        if colName not in bypassColNames: 
            bypassColNames[colName] = colName
    for colName in ignoreColNames.keys():
        if colName not in bypassColNames: 
            bypassColNames[colName] = colName

    # obtain the JDBC database connection parameters    
    jdbcType = config['jdbcSpecs'].get('jdbcType', 'pgsql').lower()
    jdbcHost = config['jdbcSpecs'].get('jdbcHost', 'localhost')
    jdbcPort = int(config['jdbcSpecs'].get('jdbcPort', '5432')) # defaults to PostgreSQL's port
    jdbcDatabase = config['jdbcSpecs'].get('jdbcDatabase', 'dqsvalidator')
    jdbcUID = config['jdbcSpecs'].get('jdbcUID', 'dqsvalidator')
    jdbcPWD = config['jdbcSpecs'].get('jdbcPWD', '[redacted]')
    jdbcDropTableIfExistsCompliant = (config['jdbcSpecs'].get('jdbcDropTableIfExistsCompliant', 'True') == 'True')
    
    # tweak the connection parameters
    # depending upon the target database
    
    # is it MySQL
    if jdbcType == 'mysql':
        jdbcParms = {
            'host':jdbcHost,
            'port': jdbcPort,
            'database':jdbcDatabase,
            'user':jdbcUID,
            'password':jdbcPWD        
            }
    # is it SQL Server?
    elif jdbcType == 'mssql':
        jdbcParms = {
            'server':jdbcHost + ":" + str(jdbcPort),
            'port': jdbcPort,
            'database':jdbcDatabase,
            'user':jdbcUID,
            'password':jdbcPWD        
            }
    # otherwise
    else:
        # default to PostgreSQL
        jdbcParms = {
            'host':jdbcHost,
            'port': jdbcPort,
            'dbname':jdbcDatabase,
            'user':jdbcUID,
            'password':jdbcPWD        
    }

    executorName = getpass.getuser()
    runDate = time.strftime(runDateFormatString)
    
    srcPathExpanded = srcFullPath
    if srcFullPath.startswith('~'):
        srcPathExpanded = os.path.expanduser(srcFullPath)
    srcPathExpanded = os.path.abspath(srcPathExpanded)
    logging.info("SRC file: %s" % srcPathExpanded)
    if not os.path.exists(srcPathExpanded):
        logging.error("SRC file does NOT exist: %s" % srcPathExpanded)
        successful = False
        return successful, err

    tgtPathExpanded = tgtFullPath
    if tgtFullPath.startswith('~'):
        tgtPathExpanded = os.path.expanduser(tgtFullPath)
    tgtDqsStatsHtmlExpanded = os.path.join(tgtPathExpanded, os.path.splitext(os.path.basename(srcPathExpanded))[0] + ".html")
    tgtDqsStatsJdbcExpanded = os.path.join(tgtPathExpanded, os.path.splitext(os.path.basename(srcPathExpanded))[0] + ".sqlite")
    tgtDqsStatsHtmlExpanded = os.path.abspath(tgtDqsStatsHtmlExpanded)
    tgtDqsStatsJdbcExpanded = os.path.abspath(tgtDqsStatsJdbcExpanded)
    logging.info("TGT DQS Statistics HTML file: %s" % tgtDqsStatsHtmlExpanded)
    logging.info("TGT DQS Statistics JDBC file: %s" % tgtDqsStatsJdbcExpanded)
    if not os.path.exists(os.path.dirname(tgtDqsStatsHtmlExpanded)):
        os.makedirs(os.path.dirname(tgtDqsStatsHtmlExpanded))
    if not os.path.exists(os.path.dirname(tgtDqsStatsJdbcExpanded)):
        os.makedirs(os.path.dirname(tgtDqsStatsJdbcExpanded))

    # remove any pre-existing statistics files
    if os.path.exists(tgtDqsStatsHtmlExpanded):
        try:
            os.remove(tgtDqsStatsHtmlExpanded)
        except Exception as e:
            logging.error('Failed to remove pre-existing statistics file "%s"' % tgtDqsStatsHtmlExpanded)
            logging.error('Processing terminated immediately.')
            err = str(e)
            return successful,err
            
    # remove any pre-existing statistics files
    if os.path.exists(tgtDqsStatsJdbcExpanded):
        try:
            os.remove(tgtDqsStatsJdbcExpanded)
        except Exception as e:
            logging.error('Failed to remove pre-existing statistics file "%s"' % tgtDqsStatsJdbcExpanded)
            logging.error('Processing terminated immediately.')
            err = str(e)
            return successful,err
        
    makoHtmlPathExpanded = makoHtmlTemplateName
    if makoHtmlPathExpanded.startswith('~'):
        makoHtmlPathExpanded = os.path.expanduser(makoHtmlPathExpanded)
    makoHtmlPathExpanded = os.path.abspath(makoHtmlPathExpanded)
    logging.info("DQS Statistics HTML MAKO template file: %s" % makoHtmlPathExpanded)
    if not os.path.exists(makoHtmlPathExpanded):
        logging.error("MAKO template file for HTML output does NOT exist: %s" % makoHtmlPathExpanded)
        successful = False
        return successful, err
        
    makoJdbcPathExpanded = makoJdbcTemplateName
    if makoJdbcPathExpanded.startswith('~'):
        makoJdbcPathExpanded = os.path.expanduser(makoJdbcPathExpanded)
    makoJdbcPathExpanded = os.path.abspath(makoJdbcPathExpanded)
    logging.info("DQS Statistics JDBC MAKO template file: %s" % makoJdbcPathExpanded)
    if not os.path.exists(makoJdbcPathExpanded):
        logging.error("MAKO template file for JDBC output does NOT exist: %s" % makoJdbcPathExpanded)
        successful = False
        return successful, err
        
    colNames = []

    logging.info('Accept columns: %s' % list(set(acceptColNames.keys())))
    logging.info('Unique columns: %s' % list(set(uniqueColNames.keys())))
    logging.info('Ignore columns: %s' % list(set(ignoreColNames.keys())))
    
    # derive the columns for which NO value frequencies are to be calculated    
    # bypassColNames = list(set(uniqueColNames)|set(ignoreColNames))
    logging.info("Bypass value frequency processing for columns: %s" % list(set(bypassColNames)))
       
    # open the source file for reading
    srcFile = codecs.open(srcPathExpanded, 'r', 'cp1252')
    csvReader = csv.reader(srcFile, delimiter=srcDelim, quoting=srcQuote)

    bgnTime = time.time()
    
    fileRows = 0
    dataRows = 0
    for rowData in csvReader:
        fileRows += 1
        # if this is the last
        # of the header rows
        if fileRows == srcHeaderRows:
            colNames, err = analyzeHead(rowData, colNames, acceptColNames, ignoreColNames, uniqueColNames)
            if err:
                # cease further processing
                logging.error("Processing terminated due to incorrect 'acceptColNames', 'ignoreColNames', or 'uniqueColNames' INI file settings.")
                break
        # otherwise, if this is a data row
        elif fileRows > srcHeaderRows:
            dataRows += 1
            analyzeData(rowData, colNames, acceptColNames, bypassColNames, fileRows, dataRows)
        if maxRows > 0 and dataRows >= maxRows:
            break
        if dataRows > 0 and dataRows % flushCount == 0:
            endTime = time.time()
            seconds = endTime - bgnTime
            if seconds > 0:
                rcdsPerSec = dataRows / seconds
            else:
                rcdsPerSec = 0
            logging.info("Read {:,} data rows in {:,.0f} seconds @ {:,.0f} records/second".format(dataRows, seconds, rcdsPerSec))
        # if maximum column count mismatches value exceeded
        if maxColCountMisMatches > 0 and len(colCountMisMatches) >= maxColCountMisMatches:
            # cease further processing
            logging.error("Processing terminated due to the number of column count mismatches %d exceeding maximum allowed %d." % (colCountMisMatches, maxColCountMisMatches))
            break
            
    del csvReader
    srcFile.close()

    endTime = time.time()
    seconds = endTime - bgnTime
    if seconds > 0:
        rcdsPerSec = dataRows / seconds
    else:
        rcdsPerSec = 0
        
    logging.info('')
    logging.info("Read {:,} data rows in {:,.0f} seconds @ {:,.0f} records/second".format(dataRows, seconds, rcdsPerSec))

    # if error found
    if err:
        # bypass further processing
        return successful, err
        
    # column-by-column
    for colName in colNames:
        # if there were
        # row of data found
        if dataRows > 0:
            # calculate the average width
            avgWidths[colName] = (totWidths[colName] * 1.0) / (dataRows * 1.0)
            # calculate the coverage percent
            cvgPrcnts[colName] = (nonBlanks[colName] * 1.0) / (dataRows * 1.0)
        else:
            avgWidths[colName] = 0.0
            cvgPrcnts[colName] = 0.0

    # column-by-column sort the value frequencies            
    frqValueAscs = collections.OrderedDict()
    for colName in colNames:
        frqValueAscs[colName] = {}
        # bypass columns with unprocessed columns
        # since no value frequencies were tracked for them
        if not colName in bypassColNames:
            frqValueAscs[colName] = sorted(frqValues[colName].items(), key=lambda x:x[0])

    # column-by-column, sort the width frequencies            
    frqWidthAscs = collections.OrderedDict()
    for colName in colNames:
        frqWidthAscs[colName] = sorted(frqWidths[colName].items(), key=lambda x:x[0])

    # -------------------------------------------------------------------------
    # Output DQS statistics to HTML file               
    # -------------------------------------------------------------------------

    valueFreqs = collections.OrderedDict()
    for colName in colNames:
        valueFreqs[colName] = {}
        valueFreqs[colName]['frqValValAsc'] = {}
        valueFreqs[colName]['frqValFrqAsc'] = {}
        valueFreqs[colName]['frqValFrqDsc'] = {}
        if not colName in bypassColNames:
            if maxHtmlCount > 0:
                valueFreqs[colName]['frqValValAsc'] = heapq.nsmallest(maxHtmlCount, frqValues[colName].items(), key=lambda x:x[0])
                valueFreqs[colName]['frqValFrqAsc'] = heapq.nsmallest(maxHtmlCount, frqValues[colName].items(), key=lambda x:x[1])
                valueFreqs[colName]['frqValFrqDsc'] = heapq.nlargest(maxHtmlCount, frqValues[colName].items(), key=lambda x:x[1])
            else:
                valueFreqs[colName]['frqValValAsc'] = sorted(frqValues[colName].items(), key=lambda x:x[0])
                valueFreqs[colName]['frqValFrqAsc'] = sorted(frqValues[colName].items(), key=lambda x:x[1])
                valueFreqs[colName]['frqValFrqDsc'] = sorted(frqValues[colName].items(), key=lambda x:x[1], reverse=True)

    htmlWriter = codecs.open(tgtDqsStatsHtmlExpanded, 'w', 'cp1252')
                    
    makoHtmlTemplate = Template(filename=makoHtmlPathExpanded)
    buffer = StringIO()
    attrs = {}
    parms = {
        'attrs':attrs,
        'dataProvider': dataProvider,
        'executorName': executorName,
        'runDate': runDate,
        'srcPathExpanded':srcPathExpanded,
        'srcPathBaseName':os.path.basename(srcPathExpanded),
        'srcDelim':srcDelim,
        'srcHeaderRows':srcHeaderRows,
        'maxRows':maxRows,
        'maxHtmlCount':maxHtmlCount,
        'maxJdbcCount':maxJdbcCount,
        'tgtDqsStatsJdbcExpanded':tgtDqsStatsJdbcExpanded,
        'inputRows':dataRows,
        'inputCols':len(colNames),
        'colNames':colNames,
        'acceptColNames':acceptColNames,
        'ignoreColNames':ignoreColNames,
        'uniqueColNames':uniqueColNames,
        'nonBlanks':nonBlanks,
        'valueFreqs':valueFreqs,
        'minWidths':minWidths,
        'maxWidths':maxWidths,
        'avgWidths':avgWidths,
        'frqValueAscs':frqValueAscs,
        'frqWidthAscs':frqWidthAscs,
        'colCountMisMatches':colCountMisMatches
        }
    context = Context(buffer, **parms)
    makoHtmlTemplate.render_context(context)
    
    htmlWriter.write(buffer.getvalue())
    htmlWriter.close()

    # -------------------------------------------------------------------------
    # Output DQS statistics to JDBC file (SQLite)               
    # -------------------------------------------------------------------------

    valueFreqs.clear()
    for colName in colNames:
        valueFreqs[colName] = {}
        valueFreqs[colName]['frqValValAsc'] = {}
        valueFreqs[colName]['frqValFrqAsc'] = {}
        valueFreqs[colName]['frqValFrqDsc'] = {}
        # don't sort unprocessed columns as no
        # value frequencies were calculated for them
        if not colName in bypassColNames:
            if maxJdbcCount > 0:
                valueFreqs[colName]['frqValValAsc'] = heapq.nsmallest(maxJdbcCount, frqValues[colName].items(), key=lambda x:x[0])
                valueFreqs[colName]['frqValFrqAsc'] = heapq.nsmallest(maxJdbcCount, frqValues[colName].items(), key=lambda x:x[1])
                valueFreqs[colName]['frqValFrqDsc'] = heapq.nlargest(maxJdbcCount, frqValues[colName].items(), key=lambda x:x[1])
            else:
                valueFreqs[colName]['frqValValAsc'] = sorted(frqValues[colName].items(), key=lambda x:x[0])
                valueFreqs[colName]['frqValFrqAsc'] = sorted(frqValues[colName].items(), key=lambda x:x[1])
                valueFreqs[colName]['frqValFrqDsc'] = sorted(frqValues[colName].items(), key=lambda x:x[1], reverse=True)
    
    # TODO: implement SQL as parameterized queries, necessitating NOT using MAKO template

    # push statistic records to SQLite database
    sqliteConn = sqlite3.connect(tgtDqsStatsJdbcExpanded)
    sqliteCursor = sqliteConn.cursor()
                    
    makoJdbcTemplate = Template(filename=makoJdbcPathExpanded)
    buffer = StringIO()
    attrs = {}
    parms = {
        'attrs':attrs,
        'dataProvider': dataProvider,
        'executorName': executorName,
        'runDate': runDate,
        'srcPathExpanded':srcPathExpanded,
        'srcPathBaseName':os.path.basename(srcPathExpanded),
        'srcDelim':srcDelim,
        'srcHeaderRows':srcHeaderRows,
        'maxRows':maxRows,
        'maxHtmlCount':maxHtmlCount,
        'maxJdbcCount':maxJdbcCount,
        'tgtDqsStatsHtmlExpanded':tgtDqsStatsHtmlExpanded,
        'tgtDqsStatsJdbcExpanded':tgtDqsStatsJdbcExpanded,
        'inputRows':dataRows,
        'inputCols':len(colNames),
        'colNames':colNames,
        'acceptColNames':acceptColNames,
        'ignoreColNames':ignoreColNames,
        'uniqueColNames':uniqueColNames,
        'nonBlanks':nonBlanks,
        'valueFreqs':valueFreqs,
        'minWidths':minWidths,
        'maxWidths':maxWidths,
        'avgWidths':avgWidths,
        'frqValueAscs':frqValueAscs,
        'frqWidthAscs':frqWidthAscs,
        'colCountMisMatches':colCountMisMatches,
        'jdbcDropTableIfExistsCompliant': True # True for SQLite databases
        }
    context = Context(buffer, **parms)
    makoJdbcTemplate.render_context(context)
    
    sqlCmd = ''
    lines = buffer.getvalue().split(os.linesep)
    for line in lines:
        sqlCmd = sqlCmd + line.strip()
        if line.strip().endswith(';'):
            # print (sqlCmd)
            # print ('')
            try:
                sqliteCursor.execute(sqlCmd)
            except Exception as e:
                logging.error(sqlCmd)
                logging.error(str(e))
                break;
            sqlCmd = ''
    
    sqliteConn.commit()
    sqliteConn.close()
        
    # push statistic records to traditional database

    # if jdbcType not found
    # default to PostgreSQL
    jdbcConn = None
    if jdbcType.lower() == 'mysql':
        try:
            jdbcConn = mysql.connector.connect(**jdbcParms)
        except Exception as e:
            logging.error('Failed to connect to MySQL database')
            logging.error(jdbcParms)
            logging.error(str(e))
    elif jdbcType.lower() == 'mssql':
        try:
            jdbcConn = pymssql.connect(**jdbcParms)
        except Exception as e:
            logging.error('Failed to connect to MySQL database')
            logging.error(jdbcParms)
            logging.error(str(e))
    else:
        try:
            jdbcConn = psycopg2.connect(**jdbcParms)
        except Exception as e:
            logging.error('Failed to connect to MySQL database')
            logging.error(jdbcParms)
            logging.error(str(e))
    jdbcCursor = jdbcConn.cursor()
                    
    makoJdbcTemplate = Template(filename=makoJdbcPathExpanded)
    buffer = StringIO()
    attrs = {}
    parms = {
        'attrs':attrs,
        'dataProvider': dataProvider,
        'executorName': executorName,
        'runDate': runDate,
        'srcPathExpanded':srcPathExpanded,
        'srcPathBaseName':os.path.basename(srcPathExpanded),
        'srcDelim':srcDelim,
        'srcHeaderRows':srcHeaderRows,
        'maxRows':maxRows,
        'maxHtmlCount':maxHtmlCount,
        'maxJdbcCount':maxJdbcCount,
        'tgtDqsStatsHtmlExpanded':tgtDqsStatsHtmlExpanded,
        'tgtDqsStatsJdbcExpanded':tgtDqsStatsJdbcExpanded,
        'inputRows':dataRows,
        'inputCols':len(colNames),
        'colNames':colNames,
        'acceptColNames':acceptColNames,
        'ignoreColNames':ignoreColNames,
        'uniqueColNames':uniqueColNames,
        'nonBlanks':nonBlanks,
        'valueFreqs':valueFreqs,
        'minWidths':minWidths,
        'maxWidths':maxWidths,
        'avgWidths':avgWidths,
        'frqValueAscs':frqValueAscs,
        'frqWidthAscs':frqWidthAscs,
        'colCountMisMatches':colCountMisMatches,
        'jdbcDropTableIfExistsCompliant': jdbcDropTableIfExistsCompliant # True for PostgreSQL and MySQL databases, False for Sql Server
        }
    context = Context(buffer, **parms)
    makoJdbcTemplate.render_context(context)
    
    sqlCmd = ''
    lines = buffer.getvalue().split(os.linesep)
    for line in lines:
        sqlCmd = sqlCmd + line.strip()
        if line.strip().endswith(';'):
            # print (sqlCmd)
            # print ('')
            try:
                jdbcCursor.execute(sqlCmd)
            except Exception as e:
                logging.error(sqlCmd)
                logging.error(str(e))
                break;
            sqlCmd = ''

    jdbcConn.commit()
    jdbcConn.close()
    
    return
Esempio n. 11
0
def main(argv):
    got_input = False
    module_directory = None
    dictionary = {}
    json_dict = {}
    got_output = False
    output_file = sys.stdout
    plugins = []

    try:
        opts, args = getopt.getopt(argv, 'hm:d:o:p:')
    except getopt.GetoptError:
        out('Unknown option')
        showhelp()
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            out('Displaying showhelp')
            showhelp()
            sys.exit()
        elif opt == '-o':
            if got_output:
                out('Got more than one output')
                showhelp()
                sys.exit(3)
            got_output = True
            output_file = open(arg, 'w')
        elif opt == '-m':
            if module_directory is not None:
                out('Got more than one cache directory')
                showhelp()
                sys.exit(4)
            module_directory = arg
        elif opt == '-d':
            dict_file = open(arg, 'r')
            bunch.merge_json(json_dict, simplejson.loads(dict_file.read()))
            dict_file.close()
        elif opt == '-p':
            plugins.append(import_plugin(arg))

    for plugin in plugins:
        plugin.mako_plugin(json_dict)

    for k, v in json_dict.items():
        dictionary[k] = bunch.to_bunch(v)

    ctx = Context(output_file, **dictionary)

    for arg in args:
        got_input = True
        template = Template(filename=arg,
                            module_directory=module_directory,
                            lookup=TemplateLookup(directories=['.']))
        template.render_context(ctx)

    if not got_input:
        out('Got nothing to do')
        showhelp()

    output_file.close()
Esempio n. 12
0
    def _inject_stuff(self):
        try:
            with open(self._input_file, 'r') as infile:
                for thing in infile:
                    key, val = self._prop_to_yaml(thing.strip())
                    if key and val:
                        self._food += spacer + key + ': ' + val + '\n'

            buf = StringIO()
            t = Template(filename=self._template_file)

            if self._sns_topic_arn_found:
                sns_var_bits = sns_topic_arn
                sns_resource_bits = sns_subcription_resource
            else:
                sns_var_bits = ''
                sns_resource_bits = ''

            if self._trusted_service_found:
                trusted_service_var_bits = trusted_service
                trusted_service_resource_bits = trusted_service_resource
            else:
                trusted_service_var_bits = ''
                trusted_service_resource_bits = ''

            if self._schedule_found:
                schedule_var_bits = schedule_expression
                schedule_resource_bits = schedule_resource
            else:
                schedule_var_bits = ''
                schedule_resource_bits = ''

            if self._create_service:
                the_api_bits = get_the_api_chunk(region=self._region,
                                                 stage_name=self._stage_name,
                                                 short_name=self._short_name,
                                                 account=self._account)
            else:
                the_api_bits = ''

            if self._import_role:
                current_role_parameter_section = ''
                role = self._find_imported_csv(
                    self._stack_properties.get('role', None))
                role_specification = imported_role_spec.format(role)
            else:
                current_role_parameter_section = role_parameter_section
                role_specification = parameter_role_spec

            subnet_specification = None
            if self._import_subnets:
                current_subnets_parameter_section = ''
                subnets = self._find_imported_csv(
                    self._stack_properties.get('subnetIds', None))
                for subnet in subnets.split(','):
                    if subnet_specification:
                        subnet_specification = subnet_specification + \
                            '\n' + spacer + \
                            imported_subnets_spec.format(subnet)
                    else:
                        subnet_specification = imported_subnets_spec.format(
                            subnet)
            else:
                current_subnets_parameter_section = subnets_parameter_section
                subnet_specification = subnets_parameter_spec

            sg_specification = None
            if self._import_security_group:
                current_sg_parameter_section = ''
                sg_csv = self._find_imported_csv(
                    self._stack_properties.get('securityGroupIds', None))
                for sg in sg_csv.split(','):
                    if sg_specification:
                        sg_specification = sg_specification + \
                            '\n' + spacer + \
                            imported_sg_spec.format(sg)
                    else:
                        sg_specification = imported_sg_spec.format(sg)
            else:
                current_sg_parameter_section = sg_parameter_section
                sg_specification = sg_parameter_spec

            ctx = Context(
                buf,
                environment_section=self._food,
                snsTopicARN=sns_var_bits,
                snsSubscriptionResource=sns_resource_bits,
                trustedService=trusted_service_var_bits,
                trustedServiceResource=trusted_service_resource_bits,
                scheduleExpression=schedule_var_bits,
                scheduleResource=schedule_resource_bits,
                theAPI=the_api_bits,
                roleParameterSection=current_role_parameter_section,
                roleSpecification=role_specification,
                subnetsParameterSection=current_subnets_parameter_section,
                subnetIds=subnet_specification,
                sgParameterSection=current_sg_parameter_section,
                securityGroupIds=sg_specification)
            # securityGroupIds=sg_parameter_spec

            t.render_context(ctx)
            logging.info('writing template {}'.format(self._output_file))
            with open(self._output_file, "w") as outfile:
                outfile.write(buf.getvalue())
        except Exception as wtf:
            logging.error('Exception caught in inject_stuff(): {}'.format(wtf))
            traceback.print_exc(file=sys.stdout)
            sys.exit(1)
Esempio n. 13
0
def render(tmpl, dict1):
	buf = StringIO()
	ctx = Context(buf, **dict1)
	tmpl.render_context(ctx)
	return buf.getvalue()
Esempio n. 14
0
def create_citation_command_from_footnote_list(footnote_list):
    print("Getting information for footnote with information " +
          str(footnote_list))
    result = "EMPTY-CITATION-FIX-ME"
    if output_lyx is True:
        print("Outputting LyX citation command")
        mytemplate = Template(
            filename='lyx_template/lyx_citation_command_template.tmpl')
        buf = io.StringIO()

        # create context to impose on the template in the current iteration
        key_list = []
        if footnote_list is not None:
            for entry in footnote_list:
                if 'uri' in entry.keys():
                    key_list.append(
                        get_biblatex_cite_key_from_zotero_api(entry['uri']))

        print(','.join(key_list))

        pretext_list = []
        if footnote_list is not None:
            for entry in footnote_list:
                if 'prefix' in entry.keys():
                    pretext_list.append(
                        get_biblatex_cite_key_from_zotero_api(entry['uri']))
                    pretext_list.append(' ')
                    pretext_list.append(entry['prefix'].replace("\"", "\\\""))
                    pretext_list.append('\t')

        posttext_list = []
        if footnote_list is not None:
            for entry in footnote_list:
                if 'locator' in entry.keys():
                    posttext_list.append(
                        get_biblatex_cite_key_from_zotero_api(entry['uri']))
                    posttext_list.append(' ')
                    posttext_list.append(entry['locator'].replace(
                        "\"", "\\\""))
                    posttext_list.append('\t')

        ctx = Context(buf,
                      key_list=','.join(key_list),
                      pretext_list=''.join(pretext_list),
                      posttext_list=''.join(posttext_list))

        # render template with context we've just generated
        mytemplate.render_context(ctx)

        result = buf.getvalue()
        print(result)
    else:
        result = "EMPTY-CITATION-FIX-ME"
        if footnote_list is not None:
            result = "\\autocites"
            for entry in footnote_list:
                if 'prefix' in entry.keys():
                    result += ("[" + entry['prefix'] + "]")
                else:
                    result += "[]"
                if 'locator' in entry.keys():
                    result += ("[" + entry['locator'] + "]")
                else:
                    result += "[]"
                if 'uri' in entry.keys():
                    result += ("{" + get_biblatex_cite_key_from_zotero_api(
                        entry['uri']) + "}")
    return result
Esempio n. 15
0
def main():
    tree = etree.parse(os.path.join(document_temp_path, "word",
                                    "document.xml"))

    root = tree.getroot()

    document_content = []

    for child in root[0]:
        new_list_object = List_object()
        new_list_object.text = "\n\n"
        document_content.append(new_list_object)
        for child in child:
            for child in child:
                if etree.QName(child.tag).localname is 't':
                    if child.text is not None:
                        new_list_object = List_object()
                        new_list_object.footnote = False
                        if child.get(
                                '{http://www.w3.org/XML/1998/namespace}space'
                        ) is not None:
                            new_list_object.text = ' ' + child.text + ' '
                        else:
                            new_list_object.text = (
                                child.text).lstrip(' ').rstrip(' ')
                        document_content.append(new_list_object)

                else:
                    if child.get(
                            '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}id'
                    ) is not None:
                        new_list_object = List_object()
                        new_list_object.footnote = True
                        new_list_object.id = str(
                            child.get(
                                '{http://schemas.openxmlformats.org/wordprocessingml/2006/main}id'
                            ))
                        new_list_object.footnote_information = get_footnote_information(
                            new_list_object.id)
                        document_content.append(new_list_object)
        new_list_object = List_object()
        new_list_object.text = '\n'
        document_content.append(new_list_object)

    if output_lyx is True:
        mytemplate = Template(filename='lyx_template/lyx_template.tmpl')
        buf = io.StringIO()

        content_list = []
        content_list.append("\\begin_layout Standard\n")

        for entry in document_content:
            try:
                if entry.text == "\n\n":
                    content_list.append("\\end_layout\n")
                    content_list.append("\\begin_layout Standard\n")
            except AttributeError:
                pass
            if entry.footnote is True:
                if FOOTNOTE_WRITING is True:
                    content_list.append(
                        str(
                            create_citation_command_from_footnote_list(
                                entry.footnote_information)))
                else:
                    content_list.append(
                        "\n\\color red\nDEBUG: Writing footnotes disabled\n\\color inherit\n"
                    )
            else:
                current_entry = str(entry)
                current_entry = current_entry.replace(
                    '“', """\\begin_inset Quotes eld
\\end_inset
""")
                current_entry = current_entry.replace(
                    '”', """\\begin_inset Quotes erd
\\end_inset
""")
                content_list.append(current_entry)
        content_list.append("\\end_layout\n")
        ctx = Context(buf, document_content=''.join(content_list))

        # render template with context we've just generated
        mytemplate.render_context(ctx)

        output_file = open("output.lyx", 'w')

        # write the string buffer to the file
        output_file.write(buf.getvalue())

        # close file handle
        output_file.close()

    else:
        output_file = open("output.tex", 'w')
        for entry in document_content:
            if entry.footnote is True:
                if FOOTNOTE_WRITING is True:
                    output_file.write(
                        str(
                            create_citation_command_from_footnote_list(
                                entry.footnote_information)))
                else:
                    output_file.write(
                        "\n\\textcolor{red}{DEBUG: Writing footnotes disabled}\n"
                    )
            else:
                current_entry = str(entry)
                current_entry = current_entry.replace("&", "\\&")
                current_entry = current_entry.replace("%", "\\%")
                current_entry = current_entry.replace("$", "\\$")

                current_entry = current_entry.replace("“", "``")
                current_entry = current_entry.replace("„", "``")
                current_entry = current_entry.replace("”", "\'\'")
                output_file.write(current_entry)

        output_file.close()
Esempio n. 16
0
    else:
        ntop.printHTMLHeader('Host Map: Region View', 1, 0)

        if totalHosts == 0:
            ntop.printFlagedWarning('No hosts have been detected by ntop yet')
        elif len(dictionaryCountries) == 0:
            ntop.printFlagedWarning(
                'No hosts have been successfully geo-located by ntop yet')
        else:
            try:
                basedir = os.getenv('DOCUMENT_ROOT', '.') + '/python/templates'
                mylookup = TemplateLookup(directories=[basedir],
                                          output_encoding='utf-8',
                                          input_encoding='latin1',
                                          encoding_errors='replace',
                                          default_filters=['decode.utf8'])
                myTemplate = mylookup.get_template('GeoPacketVisualizer.tmpl')
                buf = StringIO()
                ctx = Context(buf,
                              countries=dictionaryCountries,
                              totalHosts=totalHosts,
                              unknownCountries=unknownCountries,
                              unknownCities=unknownCities,
                              filename=os.path.basename(__file__))
                myTemplate.render_context(ctx)
                ntop.sendString(buf.getvalue())
            except:
                ntop.sendString(exceptions.html_error_template().render())

        ntop.printHTMLFooter()
Esempio n. 17
0
 def generator(self):
     self._buf = StringIO()
     ctx = Context(self._buf, java_class=self._java_class)
     self._template.render_context(ctx)
     pass
Esempio n. 18
0
def render_interop_html_report(client_langs, server_langs, test_cases,
                               auth_test_cases, http2_cases,
                               http2_badserver_cases,
                               client_langs_http2_badserver_cases, resultset,
                               num_failures, cloud_to_prod, prod_servers,
                               http2_interop):
    """Generate HTML report for interop tests."""
    template_file = 'tools/run_tests/interop/interop_html_report.template'
    try:
        mytemplate = Template(filename=template_file, format_exceptions=True)
    except NameError:
        print(
            'Mako template is not installed. Skipping HTML report generation.')
        return
    except IOError as e:
        print('Failed to find the template %s: %s' % (template_file, e))
        return

    sorted_test_cases = sorted(test_cases)
    sorted_auth_test_cases = sorted(auth_test_cases)
    sorted_http2_cases = sorted(http2_cases)
    sorted_http2_badserver_cases = sorted(http2_badserver_cases)
    sorted_client_langs_http2_badserver_cases = sorted(
        client_langs_http2_badserver_cases)
    sorted_client_langs = sorted(client_langs)
    sorted_server_langs = sorted(server_langs)
    sorted_prod_servers = sorted(prod_servers)

    args = {
        'client_langs':
        sorted_client_langs,
        'server_langs':
        sorted_server_langs,
        'test_cases':
        sorted_test_cases,
        'auth_test_cases':
        sorted_auth_test_cases,
        'http2_cases':
        sorted_http2_cases,
        'http2_badserver_cases':
        sorted_http2_badserver_cases,
        'client_langs_http2_badserver_cases':
        (sorted_client_langs_http2_badserver_cases),
        'resultset':
        resultset,
        'num_failures':
        num_failures,
        'cloud_to_prod':
        cloud_to_prod,
        'prod_servers':
        sorted_prod_servers,
        'http2_interop':
        http2_interop
    }

    html_report_out_dir = 'reports'
    if not os.path.exists(html_report_out_dir):
        os.mkdir(html_report_out_dir)
    html_file_path = os.path.join(html_report_out_dir, 'index.html')
    try:
        with open(html_file_path, 'w') as output_file:
            mytemplate.render_context(Context(output_file, **args))
    except:
        print(exceptions.text_error_template().render())
        raise
Esempio n. 19
0
    #BlogPublished must execute before Published is reassigned
    BlogPublished = Published.strftime("%m%d")
    Published = Published.strftime("%B %d, %Y")
    #Parsing Artwork
    ArtHomePage = Art[:-3] + "html"

#Rendering the HTML frame
StoryHTMLFrame = Template(filename='0000-Mako-Template.html')

buf = StringIO()
ctx = Context(buf,
              Headline=Headline,
              Subject=Subject,
              Subtopics=Subtopics,
              Topic=Topic,
              Type=Type,
              Published=Published,
              ArtHomePage=ArtHomePage,
              Art=Art,
              Link=Link,
              Description=Description)

StoryHTMLFrame.render_context(ctx)

f = open("Delivery/" + FileName + ".html", "w")
f.write(buf.getvalue())
f.close()

#vestigial Function
#Rendering the Intro text for the blog
#First, rework the filename to include the day instead:
Esempio n. 20
0
                        default_filters=['decode.utf8'],
                        encoding_errors='replace')

#buffer = codecs.open(fileName, 'w', encoding='utf-8')
buffer = open(fileName, 'w')

BITFIELDs = ''
for i in reversed(range(0, data_width)):
  BITFIELDs += str(i) + '|'

REG_OV = _('Register Overview').encode('utf8')
ADDR   = _('Address').encode('utf8')
REG    = _('Register').encode('utf8')
RSTVAL = _('Reset value').encode('utf8')

buffer.write('.' + REG_OV + '\n')
buffer.write('[cols="^3e,^3s,'+ str(data_width) +'*^1,^5",options="header"]\n')
buffer.write('|===================================================\n')
buffer.write('| ' + ADDR + ' | ' + REG + ' |' + BITFIELDs + RSTVAL + '\n')

for reg in addressBlock.register:
    template = lookup.get_template('adoc_regs_overview.mako')
    ctx = Context(buffer,
    	          reg = reg,
    	          cfg = cfg,
    	          addr_width = addr_width,
    	          data_width = data_width)
    template.render_context(ctx)

buffer.write("\n|===================================================\n")
Esempio n. 21
0
def context(buffer=None, **maps):
    if not buffer:
        buffer = StringIO()

    ctx = Context(buffer, **maps)
    return ctx
Esempio n. 22
0
 def reset_buffer(self):
     """ Refreshes the internal buffer and resets the context """
     self._buffer = StringIO()
     self._context = Context(self._buffer, **self._context.kwargs)
Esempio n. 23
0
def begin():
    # Imports for mako
    try:
        from mako.template import Template
        from mako.runtime import Context
        from mako.lookup import TemplateLookup
        from mako import exceptions
    except:
        ntop.printHTMLHeader('ntop Python Configuration Error', 1, 1)
        ntop.sendString(
            "<b><center><font color=red>Please install <A HREF=http://www.makotemplates.org/>Mako</A> template engine</font><p></b><br>(1) 'sudo yum install python-setuptools' (on RedHat-like systems)<br>(2) 'sudo easy_install Mako'</font></center>"
        )
        ntop.printHTMLFooter()
        return
    # Fix encoding
    #reload(sys)
    #sys.setdefaultencoding("latin1")

    templateFilename = 'ipPortQuery.tmpl'

    #fb_DB = '/tmp/'               #ntop.getPreference ("fastbitDBPath");    #default location of the fastbit DB
    databasePath = ntop.getPreference("fastbit.DBPath")
    #default location of the fastbit DB

    if databasePath is None or databasePath == '':
        ntop.printHTMLHeader('ntop Fastbit Configuration Error', 1, 1)
        ntop.sendString(
            "<b><center><font color=red>Please set fastbit.DBPath ntop preferences from <i>Admin/Configure/Preferences</i> menu (es: fastbit.DBPath=/tmp/)</b></font></center>"
        )
        ntop.printHTMLFooter()
        return

    #pathFastBit=os.path.join(databasePath,'fastbit'+os.path.sep)

    form = cgi.FieldStorage()
    #get from the url the parameter configfile that contains the
    #path+filename of the configfile to read
    fromAuto = form.getvalue('fromAuto')

    if fromAuto:
        #print>>sys.stderr, "AJAX REQUEST FOR PARTITION IN  "+databasePath+" "+fromAuto
        jsonString = expandFrom(fromAuto, os.path.join(databasePath, ""))
        ntop.sendHTTPHeader(12)
        ntop.sendString(jsonString)
        return

    documentRoot = os.getenv('DOCUMENT_ROOT', '.')

    selectArg = 'PROTOCOL,IPV4_SRC_ADDR,L4_SRC_PORT,IPV4_DST_ADDR,L4_DST_PORT,IN_BYTES,IN_PKTS'
    fromArg = form.getvalue('partition')

    ipSrc = form.getvalue('ipSrc')
    ipDst = form.getvalue('ipDst')

    portSrc = form.getvalue('portSrc')
    portDst = form.getvalue('portDst')

    limit = int(form.getvalue('limit', 100))

    if limit < 0:
        limit = 0
    res = None  #variable to store the results of the query
    ntop.printHTMLHeader('IP-Port Query', 1, 0)
    #regex to check passed parameters
    ipV4Type = re.compile(
        r'(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)'
    )
    portType = re.compile(r'\d{1,5}')

    #pprint.pprint(ipV4Type.match(str(ipSrc)), sys.stderr)
    formatErrorString = ''
    #without the partition do nothing
    if fromArg:
        whereArg = '1=1'  #to avoid leaving the where clause empty
        if ipSrc:
            #print>>sys.stderr, 'ECCO '+str(ipV4Type.match(ipSrc))
            if ipV4Type.match(ipSrc):
                whereArg = whereArg + ' AND IPV4_SRC_ADDR=' + str(
                    ipToNumber(ipSrc))
            else:
                formatErrorString = formatErrorString + 'Source ip format invalid! ipv4 format required. '
        if portSrc:
            if portType.match(portSrc):
                whereArg = whereArg + ' AND L4_SRC_PORT=' + str(portSrc)
            else:
                formatErrorString = formatErrorString + 'Source Port format invalid! Number required. '
        if ipDst:
            if ipV4Type.match(ipDst):
                whereArg = whereArg + ' AND IPV4_DST_ADDR=' + str(
                    ipToNumber(ipDst))
            else:
                formatErrorString = formatErrorString + 'Destination ip format invalid! ipv4 format required. '
        if portDst:
            if portType.match(portDst):
                whereArg = whereArg + ' AND L4_DST_PORT=' + str(portDst)
            else:
                formatErrorString = formatErrorString + 'Destination Port format invalid! Number required. '
        try:
            #pipe = subprocess.Popen (['ntop.getPreference ("fastbit.fbquery")', "-c", selectArg, "-d", fromArg, "-q", whereArg, "-P", "-L", limit],
            #print>>sys.stderr, "Query passed: SELECT %s FROM %s WHERE %s LIMIT %i" %(selectArg,os.path.join(databasePath, fromArg),  whereArg, limit)
            if formatErrorString == '':
                res = fastbit.query(os.path.join(databasePath, fromArg),
                                    selectArg, whereArg, limit)
            else:
                print >> sys.stderr, 'ipPortQuery: ERROR ' + formatErrorString
                ntop.sendString('<center><font color=red>%s</font></center>' %
                                formatErrorString)
            #print>>sys.stderr, 'Number of records: %i' % len(res['values'])
        except:
            print >> sys.stderr, 'ERROR Executing query: ' + (
                "SELECT %s FROM %s WHERE %s LIMIT %i" %
                (selectArg, os.path.join(databasePath,
                                         fromArg), whereArg, limit))
            res = {}
        if res is not None and 'columns' in res and 'values' in res:

            toFormat = getAddrCols(
                selectArg)  #get a list of addr column numbers

            for x in res['values']:
                x[0] = getNameByProto(x[0])  #format protocol number to text
                for j in toFormat:
                    #for every number in the list format as an IP ADDR
                    ipStr = numberToIp(x[j])
                    x[j] = '<a href="/%s.html" class="tooltip">%s</a>' % (
                        ipStr, ipStr
                    )  #format ip number to string ip and create a link to ntop host page
    '''else:
        print>>sys.stderr, 'ipPortQuery: ERROR partition required'
        ntop.sendString('<center><font color=red>Partition field required!</font></center>')'''
    #pprint.pprint(res, sys.stderr)
    #if res is not None:
    #    res['columns']=['Protocol', 'IP Source Addr', 'IP Dest. Addr', 'Source Port', 'Dest. Port', 'Bytes Rcvd', 'Packets Rcvd']

    try:
        basedir = os.path.join(documentRoot, 'python/templates')
        mylookup = TemplateLookup(directories=[basedir])
        myTemplate = mylookup.get_template(templateFilename)
        buf = StringIO()

        ctx = Context(buf, results=res)

        myTemplate.render_context(ctx)
        ntop.sendString(buf.getvalue())
    except:
        ntop.sendString(exceptions.html_error_template().render())

    ntop.printHTMLFooter()
Esempio n. 24
0
            data[
                'hours'] = 'Half an hour past sunrise to half an hour past sunset.'
        else:
            data['hours'] = 'Sunrise to Sunset'

    if (not 'parking' in data):
        data['parking'] = ''

    if (not 'directions' in data):
        data['directions'] = ''

    if (not 'address' in data):
        data['address'] = ''
        data['directionsX'] = ''
        data['directionsY'] = ''

    if (not 'description' in data):
        data['description'] = ''

    data['baseFileName'] = baseFileName

    #print(data)

    buf = StringIO()
    ctx = Context(buf, **data)
    mytemplate.render_context(ctx)

    outputFile = codecs.open(outputFileName, encoding='utf-8', mode='w')
    outputFile.write(buf.getvalue())
    outputFile.close()
Esempio n. 25
0
def begin():
    rrdFilesPath = os.path.join(ntop.getDBPath(), 'rrd')
    ''' Check the existence of the rrd database files that maintain history of this script'''
    def updateDBS(time, resultsValue, durationValue):
        #global rrdFilesPath
        rrdAlarmDbPath = os.path.join(rrdFilesPath, 'rrdAlarm/')
        if not os.path.exists(rrdAlarmDbPath) or not os.path.isdir(
                rrdAlarmDbPath):
            os.mkdir(rrdAlarmDbPath)

        nameDuration = "duration"
        nameResults = "results"
        #nameDuration="duration.rrd"
        #nameResults="results.rrd"
        #archives=['RRA:AVERAGE:0.5:1:60',
        #         'RRA:AVERAGE:0.5:30:6']#1 hour and 3 hours data
        try:
            #rrdtool.update(os.path.join(rrdAlarmDbPath,nameDuration), 'N:'+str(durationValue))
            ntop.updateRRDGauge(rrdAlarmDbPath, nameDuration, durationValue, 0)
        #print>>sys.stderr, 'Updating'+str(durationValue)
        except:
            print >> sys.stderr, 'RRDAlarm: Error Updating rrdDB ' + nameDuration
            '''
            dataSources=['DS:duration:GAUGE:120:0:U']
            rrdtool.create(rrdAlarmDbPath+nameDuration, '--start', str(time), '--step', str(60), dataSources[0], archives[0], archives[1] )
            rrdtool.update(rrdAlarmDbPath+nameDuration,'N:'+str(durationValue))'''

        try:
            #rrdtool.update(os.path.join(rrdAlarmDbPath,nameResults), 'N:'+str(resultsValue))
            ntop.updateRRDGauge(rrdAlarmDbPath, nameResults, resultsValue, 0)
        except:
            print >> sys.stderr, 'RRDAlarm: Error Updating rrdDB ' + nameResults
            '''
            dataSources=['DS:results:GAUGE:120:0:U']
            rrdtool.create(rrdAlarmDbPath+nameResults, '--start', str(time), '--step', str(60), dataSources[0], archives[0], archives[1] )
            rrdtool.update(rrdAlarmDbPath+nameResults, 'N:'+str(resultsValue))'''

    '''Function that must be called as a new thread so its execution time can 
       be controlled and limited by the main thread.
       all the controls on the rrds are executed by this function
       '''

    def controlT():
        #global rrdFilesPath
        ntopSpoolPath = ntop.getSpoolPath()
        nameFileConfig = 'rrdAlarmConfig.txt'
        tempFileName = 'rrdAlarmStart.tmp'

        configuration = None
        TWODECIMAL = decimal.Decimal(10)**-2
        timeStart = time.time()

        alarmsFired = 0
        checkedFiles = 0
        fmt = '%a, %d %b %Y %H:%M:%S'  #format of the time showed
        form = cgi.FieldStorage()
        #get the parameter passed via the url
        noHTML = bool(form.getvalue('noHTML'))
        configFile = form.getvalue('configFile')
        if configFile and len(configFile) > 0:
            nameFileConfig = str(configFile)
        try:
            tempFile = open(os.path.join(ntopSpoolPath, tempFileName), 'r')

            configuration = pickle.load(tempFile)
            tempFile.close()

            if configuration and (
                    timeStart < float(configuration['timeStart']) + float(60)):
                ntop.sendHTTPHeader(1)
                ntop.printHTMLHeader('RRD Alarm Called too early!', 1, 0)
                ntop.sendString(
                    "Wait at least a minute. Last Time started: %s" %
                    time.strftime(fmt,
                                  time.localtime(configuration['timeStart'])))
                ntop.printHTMLFooter()
                return 0  #exit because the script was started less than one minute ago

            else:
                configuration['timeStart'] = timeStart

        except IOError:  #the tempFile does not exist or some other problem
            print >> sys.stderr, 'RRDAlarm: IOError while accessing tempfile ' + tempFileName
            configuration = createNewConfiguration(
                rrdFilesPath, os.path.join(ntopSpoolPath, nameFileConfig),
                timeStart)

        except pickle.PickleError, pickle.UnpicklingError:
            print >> sys.stderr, "RRDAlarm: Problems during the UnPickling load, tempFile Delete..."
            os.remove(os.path.join(ntopSpoolPath, tempFileName))
            return -1

        if configuration['lastModified'] != os.path.getmtime(
                os.path.join(ntopSpoolPath, nameFileConfig)):
            #if the configuration file has been changed the temp file must be rebuild and so the configuration dictionary
            configuration = createNewConfiguration(
                rrdFilesPath, os.path.join(ntopSpoolPath, nameFileConfig),
                timeStart)

        listRows = []
        parameterDic = {
        }  #for each parameter inserted as a key a tupla will be assigned 'parameter':{actionToPerform, text}

        for threshold in configuration[
                'listThresholds']:  #for all the thresholds
            listFiles = threshold.getListFilename()
            checkedFiles = checkedFiles + len(listFiles)

            for fileN in listFiles:  #for all the filenames referred by the threshold
                #rrd_argv=[fileN,'AVERAGE', '--start', threshold.getStartTime(), '--end', threshold.getStartTime()]
                #Return :((start, end, step), (name1, name2, ...), [(data1, data2, ..), ...])

                #print>>sys.stderr, '\nLOOK for the parameters '+str(threshold.getStartTime())+' '+str(threshold.getEndTime())+' '+str(fileN)
                rrdObj = ((0, 0, 0), (), [])  #empty object
                try:
                    #rrdObj=rrdtool.fetch(fileN, 'AVERAGE', '--start', threshold.getStartTime(), '--end', threshold.getEndTime())
                    rrdObj = ntop.rrd_fetch(fileN, 'AVERAGE',
                                            threshold.getStartTime(),
                                            threshold.getEndTime())
                except Exception as e:
                    print >> sys.stderr, 'start.py PyRRDTool exception ' + str(
                        e)

                step = rrdObj[0][2]
                start = float(rrdObj[0][0])
                end = float(rrdObj[0][1])

                valueDataTuple = rrdObj[2]
                #for all the values returned check the threshold (from the end) if alarm has to be fired
                i = len(valueDataTuple)

                while i > 0:
                    #for value in valueDataTuple:
                    timeA = (step * i) + start
                    i = i - 1
                    value = valueDataTuple[i]

                    if threshold.checkIfFire(
                            value[0]):  #controls if the threshold was exceeded
                        notFired = False
                        alarmsFired = alarmsFired + 1
                        listRows.append(
                            (threshold.getUniqueId(), fileN, value[0],
                             threshold.getType(), threshold.getValue(),
                             time.strftime(fmt, time.localtime(timeA)), timeA,
                             threshold.getActionToPerform(), 'ALARM FIRED'))
                        strAlarm = '<ALARM>\nID: %i FILENAME: %s\nVALUE: %s TYPE: %s THRESHOLD VALUE: %f\n LOCALTIME: %s START: %s END: %s\n</ALARM>\n' % (
                            threshold.getUniqueId(), fileN, value[0],
                            threshold.getType(), threshold.getValue(),
                            time.strftime(fmt, time.localtime(timeA)),
                            threshold.getStartTime(), threshold.getEndTime())
                        if parameterDic.has_key(
                                threshold.getActionParameter()):
                            parameterDic[threshold.getActionParameter(
                            )]['textAlarm'] = parameterDic[
                                threshold.getActionParameter(
                                )]['textAlarm'] + strAlarm
                        else:
                            parameterDic[threshold.getActionParameter()] = {
                                'actionToPerform':
                                threshold.getActionToPerform(),
                                'textAlarm': strAlarm
                            }
                        break
                        #print>>sys.stderr, 'The type of the threshold was misconfigured!'
                else:
                    #no alarm was fired
                    listRows.append(
                        (threshold.getUniqueId(), fileN, '-',
                         threshold.getType(), threshold.getValue(),
                         time.strftime(fmt,
                                       time.localtime(end)), end, 'None', 'OK')
                    )  #at least one alarm was fired, adding the action to perform, text and parameter to the global dictionary

        saveTempFile(
            configuration, os.path.join(ntopSpoolPath, tempFileName)
        )  #save all the informations usefull for future calling of this script

        documentRoot = os.getenv('DOCUMENT_ROOT', '.')
        performActions(
            parameterDic, documentRoot
        )  #performs all the actions for the alarms fired (if any)

        duration = decimal.Decimal(str(time.time() - timeStart)).quantize(
            TWODECIMAL, decimal.ROUND_CEILING)
        updateDBS(
            int(timeStart), alarmsFired, duration
        )  #update rrds that trace the history of this script  TODO check where to place this

        ntop.sendHTTPHeader(1)

        if not noHTML:  #if this parameter was passed and if true send just the empty http response
            ntop.printHTMLHeader('RRD Alarm Report', 1, 0)
            try:
                basedir = os.path.join(documentRoot, 'python/templates')
                mylookup = TemplateLookup(directories=[basedir])
                myTemplate = mylookup.get_template('rrdAlarmStart.tmpl')
                buf = StringIO()

                ctx = Context(buf,
                              listRows=listRows,
                              duration=duration,
                              checkedFiles=checkedFiles,
                              alarmsFired=alarmsFired)
                myTemplate.render_context(ctx)
                ntop.sendString(buf.getvalue())
            except:
                ntop.sendString(exceptions.html_error_template().render())
                return 1

            #finally:
            #condition.notify()
            ntop.printHTMLFooter()

        print >> sys.stderr, '%s CET Exit rrdAlarm' % time.strftime(
            fmt, time.localtime(time.time()))
        return 0
Esempio n. 26
0
def render_mako_str(template, context):
    t=Template(template)
    buf=StringIO()
    ctx=Context(buf, **context)
    t.render_context(ctx)
    return buf.getvalue()
Esempio n. 27
0
    def extract(self, async_extract=None, **kw):
        """
        Will return a list of all extracted files
        """
        if async_extract == True or (async_extract == None
                                     and ignore_fireworks()):
            return self.async_extract(**kw)

        parse_type = self.read_parse_type(self.filename)

        import os, os.path
        from mako.template import Template
        from mako.runtime import Context
        from StringIO import StringIO

        vol = urllib.quote(os.listdir('/Volumes')[0])
        prefix = 'file:///' + vol

        temp_dir = tempfile.mkdtemp()
        eid = 'FILE'

        data = {
            'in_file': prefix + os.path.abspath(self.filename),
            'out_dir': prefix + temp_dir + '/',
            'eid': eid
        }

        #script template
        here = os.path.dirname(os.path.dirname(__file__))
        script = os.path.join(here, 'backend', 'fireworks_export.js')
        mytemplate = Template(filename=script)
        buf = StringIO()
        ctx = Context(buf, **data)
        mytemplate.render_context(ctx)
        val = buf.getvalue()

        script, scname = self._get_tmp_file(None, dest_format='js')
        script.write(val)
        script.close()

        c = adobe.Fireworks()
        c.connect()

        error = None
        special_script = prefix + scname
        try:
            x = c.call('fw', 'runScript', special_script)
        except adobe.AdobeException as e:
            error = e
            if e.code != adobe.ERROR_DIED:
                raise
            print 'Fireworks died. But we kind of dont care. ON WITH IT, BITCHES.'

        os.remove(scname)

        regex = re.compile('FILE([0-9]+)')

        def key(s):
            m = regex.search(s)
            if m:
                return int(m.group(1))
            return None

        #now there should be files.
        files = os.listdir(temp_dir)
        files.sort(key=key)
        files = [
            ExtractedFile(os.path.join(temp_dir, f), EXTRACT_TYPE_FULL)
            for f in files if key(f) != None
        ]

        if files:
            self.image = Image(files[0].filename)
            files += self.thumbnail()
            self.image.destroy()
        elif error:
            raise error  #ok, maybe it legitimately died.

        return files, ExtractStatus(type=parse_type, file_type='FWPNG')
Esempio n. 28
0
def render(template, **attrs):
    buf = StringIO()
    ctx = Context(buf, **attrs)
    template.render_context(ctx, **attrs)
    return buf.getvalue()
Esempio n. 29
0
                                #failed ipv6 adn ipv4 conversion
                                print >> sys.stderr, "fastbit.py: IMPOSSIBLE TO FORMAT value: " + str(
                                    x[j]) + " TO IP ADDR"

                    #x[1]=socket.inet_ntop(socket.AF_INET,struct.pack('>L',x[1]))
                rows = res['values']
            #pprint.pprint(res, sys.stderr)

    try:
        basedir = os.path.join(documentRoot, 'python/templates')
        mylookup = TemplateLookup(directories=[basedir])
        myTemplate = mylookup.get_template(templateFilename)
        buf = StringIO()

        ctx = Context(buf,
                      columns=cols,
                      values=rows,
                      queryPar=queryPar,
                      queryHistory=history['history'])

        myTemplate.render_context(ctx)
        ntop.sendString(buf.getvalue())
    except:
        ntop.sendString(exceptions.html_error_template().render())

    ntop.printHTMLFooter()


'''HERE STARTS THE SCRIPTS'''

begin()
Esempio n. 30
0
	month = dater[2] + dater[3]
	Months = {"01": "January",
                "02" : "February",
                "03" : "March",
                "04" : "April",
                "05" : "May",
                "06" : "June",
                "07" : "July",
                "08" : "August",
		            "09" : "September",
                "10" : "October",
                "11" : "November",
                "12" : "December",
				        "00" : " "
			}
				
	PhotoDate = Months[month] + " " + year

#Write the file	
#Put the file path and name back together
	PhotoPage = FilePath + PhotoPage
	PhotoFile = Photo


	buf = StringIO()
	ctx = Context(buf, PhotoTitle=PhotoTitle, PhotoDate=PhotoDate, PhotoFile=PhotoFile, PhotoDescription=PhotoDescription, PhotoLocation=PhotoLocation, PhotoKeywords=PhotoKeywords)
	PhotoFrame.render_context(ctx)
	f = open(PhotoPage,"w") 
	f.write(buf.getvalue())
	f.close()
Esempio n. 31
0
		cur = con.cursor(mdb.cursors.DictCursor)
		cur.execute(dbCommand)
	except:
		print "Something ain't right with the db, program exiting"
		sys.exit(0)
	
	print "database o.k. Onward!"

	#Only after db is shown to work do we rewrite the file
	f = open(StoryFile,"w") 

	for i in range(cur.rowcount):
		row = cur.fetchone()
		Link = "[BASE URL]" + row["FileLocation"] + row["FileName"] + ".html"
		Description = row["Description"]
		Headline = row["Headline"]
		Art = row["Art"]
		Published = row["Published"]
		Subject = row["Subject"]
		#Parsing the date
		Published = Published.strftime("%a, %d %b %Y")
		buf = StringIO()
		ctx = Context(buf, Headline=Headline, Link=Link, Description=Description, Published=Published, Art=Art, Subject=Subject)
		StoryListing.render_context(ctx)
		f.write(buf.getvalue())
	f.close() 

#DB connectivity code from http://zetcode.com/db/mysqlpython/
#Date format http://www.faqs.org/rfcs/rfc822.html

Esempio n. 32
0
def gen_nav(url):
    global test_url
    test_url = url

    h = helper_struct(url_for=get_test_url)
    c = context_struct(subsubmenu={})
    buf = StringIO()
    ctx = Context(buf, h=h, c=c)

    try:
        t.render_context(ctx)
        full_html = buf.getvalue()
    except:
        print exceptions.text_error_template().render()
        return

    full_html = buf.getvalue()
    sub_html_pt = full_html.find("<!-- Secondary navigation")
    pri_html = full_html[0:sub_html_pt]
    sec_html = full_html[sub_html_pt:-1]

    pri_text = [
        y for y in (x.strip()
                    for x in re.sub(r'<.*?>', '', pri_html).splitlines()) if y
    ]
    sec_text = [
        y for y in (x.strip()
                    for x in re.sub(r'<.*?>', '', sec_html).splitlines()) if y
    ]

    # put a flag "LINK" in each line with a url, strip it out later
    mangled_pri_links = re.sub(r'.*?href\s?=\s?[\'"](.*?)[\'"].*', r'LINK \1',
                               pri_html)
    pri_links = [
        x[5:].strip() for x in mangled_pri_links.splitlines()
        if x[0:5] == "LINK "
    ]
    mangled_sec_links = re.sub(r'.*?href\s?=\s?[\'"](.*?)[\'"].*', r'LINK \1',
                               sec_html)
    sec_links = [
        x[5:].strip() for x in mangled_sec_links.splitlines()
        if x[0:5] == "LINK "
    ]

    # Find the selected link, if there is one
    pri_sel = [
        re.sub(r'.*?href\s?=\s?[\'"](.*?)[\'"].*', r'\1', x)
        for x in pri_html.splitlines() if "selected" in x
    ]
    sec_sel = [
        re.sub(r'.*?href\s?=\s?[\'"](.*?)[\'"].*', r'\1', x)
        for x in sec_html.splitlines() if "selected" in x
    ]
    sec_sel_text = [
        re.sub(r'<.*?>', '', x).strip() for x in sec_html.splitlines()
        if "selected" in x
    ]

    return {
        'full_html': full_html,
        'pri_text': pri_text,
        'pri_links': pri_links,
        'sec_text': sec_text,
        'sec_links': sec_links,
        'pri_sel': pri_sel[0] if pri_sel else None,
        'sec_sel': sec_sel[0] if sec_sel else None,
        'sec_sel_text': sec_sel_text[0] if sec_sel_text else None
    }
Esempio n. 33
0
    project = sys.argv[1]


    langs = get_available_lang('swissguesser/static/%s/data/locale' % project)

    for lang in langs:

        with open('swissguesser/static/%s/data/locale/%s/translation.json' % (project, lang)) as data_file:
            data = json.load(data_file)
        for key in data.keys():
            data[key.replace('-','_')] = data.pop(key)

        data['lang'] = lang
        data['Preview_Url'] = u"images/preview.jpg" # relative url should work
        data['App_Url'] = u"http://storymaps.geo.admin.ch/storymaps/%s" % project
        buf = StringIO()
        ctx = Context(buf,**data)

        print ctx.keys()

        serve_template('index.html.mako', ctx)
        html = buf.getvalue().encode('utf-8')
        
        with open('swissguesser/static/%s/index.html.%s' % (project,lang), 'w') as f:
            f.write(html)