Father of the Bride Part II
'''

# poster filenames:
poster_file = {}
poster_file['toy_story'] = 'rhIRbceoE9lR4veEXuwCC2wARtG.jpg'
poster_file['jumanji'] = 'vzmL6fP7aPKNKPRTFnZmiUfciyV.jpg'
poster_file['grumpier_old_men'] = '6ksm1sjKMFLbO7UY2i6G1ju9SML.jpg'
poster_file['waiting_to_exhale'] = '16XOMpEaLWkrcPqSQqhTmeJuqQl.jpg'
poster_file['father_of_the_bride'] = 'e64sOI48hQXyru7naBFyssKFxVd.jpg'

app = Flask(__name__)

HTML_TEMPLATE = Template("""
<h1>Hello ${file_name}!</h1>

<img src="https://image.tmdb.org/t/p/w342/${file_name}" alt="poster for ${file_name}">

""")


@app.route('/')
def homepage():
    return """<h1>Test of dynamic poster display here Feb 2</h1>"""


@app.route('/<some_file>')
def some_place_page(some_file):
    return (HTML_TEMPLATE.substitute(file_name=some_file))


def test_call(test_text):
Example #2
0
class THPPlugin(CWrapPlugin):

    TYPE_UNPACK = {
        'THFloatTensor*': Template('((THPFloatTensor*)$arg)->cdata'),
        'THDoubleTensor*': Template('((THPDoubleTensor*)$arg)->cdata'),
        'THLongTensor*': Template('((THPLongTensor*)$arg)->cdata'),
        'THIntTensor*': Template('((THPIntTensor*)$arg)->cdata'),
        'THTensor*': Template('((THPTensor*)$arg)->cdata'),
        'THBoolTensor*': Template('((THPBoolTensor*)$arg)->cdata'),
        'THIndexTensor*': Template('((THPIndexTensor*)$arg)->cdata'),
        'THIntegerTensor*': Template('((THPIntegerTensor*)$arg)->cdata'),
        'THCudaTensor*': Template('((THCPFloatTensor*)$arg)->cdata'),
        'THCudaDoubleTensor*': Template('((THCPDoubleTensor*)$arg)->cdata'),
        'THCudaIntTensor*': Template('((THCPIntTensor*)$arg)->cdata'),
        'THCudaLongTensor*': Template('((THCPLongTensor*)$arg)->cdata'),
        'THSFloatTensor*': Template('((THSPFloatTensor*)$arg)->cdata'),
        'THSDoubleTensor*': Template('((THSPDoubleTensor*)$arg)->cdata'),
        'THSLongTensor*': Template('((THSPLongTensor*)$arg)->cdata'),
        'THSIntTensor*': Template('((THSPIntTensor*)$arg)->cdata'),
        'THSTensor*': Template('((THSPTensor*)$arg)->cdata'),
        'THSBoolTensor*': Template('((THSPBoolTensor*)$arg)->cdata'),
        'THSIndexTensor*': Template('((THSPIndexTensor*)$arg)->cdata'),
        'THLongStorage*': Template('((THPLongStorage*)$arg)->cdata'),
        'THStorage*': Template('((THPStorage*)$arg)->cdata'),
        'THGenerator*': Template('((THPGenerator*)$arg)->cdata'),
        'THSize*': Template('__size.get()'),
        'THStride*': Template('__stride.get()'),
        'void*': Template('THPUtils_unpackLong($arg)'),
        'long': Template('THPUtils_unpackLong($arg)'),
        'int': Template('THPUtils_unpackLong($arg)'),
        'bool': Template('($arg == Py_True ? true : false)'),
        'float': Template('THPFloatUtils_unpackReal($arg)'),
        'double': Template('THPDoubleUtils_unpackReal($arg)'),
        'real': Template('THPUtils_(unpackReal)($arg)'),
        'accreal': Template('THPUtils_(unpackAccreal)($arg)'),
    }

    TYPE_CHECK = {
        'THDoubleTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPDoubleTensorClass'),
        'THFloatTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPFloatTensorClass'),
        'THLongTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPLongTensorClass'),
        'THIntTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPIntTensorClass'),
        'THTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPTensorClass'),
        'THBoolTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPBoolTensorClass'),
        'THIndexTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPIndexTensorClass'),
        'THIntegerTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THPIntegerTensorClass'),
        'THCudaTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THCPFloatTensorClass'),
        'THCudaDoubleTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THCPDoubleTensorClass'),
        'THCudaIntTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THCPIntTensorClass'),
        'THCudaLongTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THCPLongTensorClass'),
        'THSDoubleTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THSPDoubleTensorClass'),
        'THSFloatTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THSPFloatTensorClass'),
        'THSLongTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THSPLongTensorClass'),
        'THSIntTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THSPIntTensorClass'),
        'THSTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THSPTensorClass'),
        'THSBoolTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THSPBoolTensorClass'),
        'THSIndexTensor*':
        Template('(PyObject*)Py_TYPE($arg) == THSPIndexTensorClass'),
        'THLongStorage*':
        Template('(PyObject*)Py_TYPE($arg) == THPLongStorageClass'),
        'THStorage*':
        Template('(PyObject*)Py_TYPE($arg) == THPStorageClass'),
        'THGenerator*':
        Template('(PyObject*)Py_TYPE($arg) == THPGeneratorClass'),
        'THSize*':
        Template('THPUtils_tryUnpackLongs($arg, __size)'),
        'THStride*':
        Template('THPUtils_tryUnpackLongs($arg, __stride)'),
        'void*':
        Template('THPUtils_checkLong($arg)'),
        'long':
        Template('THPUtils_checkLong($arg)'),
        'int':
        Template('THPUtils_checkLong($arg)'),
        'bool':
        Template('PyBool_Check($arg)'),
        'float':
        Template('THPFloatUtils_checkReal($arg)'),
        'double':
        Template('THPDoubleUtils_checkReal($arg)'),
        'real':
        Template('THPUtils_(checkReal)($arg)'),
        'accreal':
        Template('THPUtils_(checkAccreal)($arg)'),
    }

    SIZE_VARARG_CHECK = Template(
        'THPUtils_tryUnpackLongVarArgs(args, $idx, __size)')

    RETURN_WRAPPER = {
        'THTensor*': Template('return THPTensor_(New)($result);'),
        'THSTensor*': Template('return THSPTensor_(New)($result);'),
        'THIndexTensor*': Template('return THPIndexTensor_(New)($result);'),
        'THLongTensor*': Template('return THPLongTensor_New($result);'),
        'THLongStorage*': Template('return THPLongStorage_New($result);'),
        'THCudaIntTensor*': Template('return THCPIntTensor_New($result);'),
        'THCudaLongTensor*': Template('return THCPLongTensor_New($result);'),
        # TODO: make it smarter - it should return python long if result doesn't fit into an int
        'long': Template('return PyInt_FromLong($result);'),
        'accreal': Template('return THPUtils_(newAccreal)($result);'),
        'self': Template('Py_INCREF(self);\nreturn (PyObject*)self;'),
        'real': Template('return THPUtils_(newReal)($result);'),
    }

    TENSOR_METHODS_DECLARATION = Template("""
static PyMethodDef TH${sparse}PTensor_$stateless(methods)[] = {
    $methods
    {NULL}
};
""")

    WRAPPER_TEMPLATE = Template("""\
PyObject * $name(PyObject *self, PyObject *args, PyObject *kwargs)
{
    HANDLE_TH_ERRORS
    int __tuplecount = args ? PyTuple_Size(args) : 0;
    int __dictcount = kwargs ? PyDict_Size(kwargs) : 0;
    int __argcount = __tuplecount + __dictcount;
    $variables
    $init

    $options
    }

    THPUtils_invalidArguments(args, kwargs, "$readable_name", $num_options, $expected_args);
    return NULL;
    END_HANDLE_TH_ERRORS
}
    """)

    ALLOCATE_TMPL = Template("""\
THP${type}TensorPtr _${name}_guard((THP${type}Tensor*) THP${type}Tensor_NewEmpty());
if (!_${name}_guard.get()) return NULL;
THP${type}Tensor* $name = _${name}_guard.get();
""")

    ALLOCATE_CUDA = Template("""\
#if IS_CUDA
${cuda}
#else
${cpu}
#endif
""")

    def _allocate(typename, tmpl, cuda_tmpl=None, sparse=False):
        code = tmpl.safe_substitute(type=typename)
        if typename == '':
            code = code.replace('NewEmpty', '(NewEmpty)')
        if cuda_tmpl:
            cuda_code = code.replace('THP', 'THCP')
            code = cuda_tmpl.substitute(cuda=cuda_code, cpu=code)
        if sparse:
            code = code.replace('THP', 'THSP')
            code = code.replace('THCP', 'THCSP')
        return Template(code)

    ALLOCATE_TYPE = {
        'THTensor*': _allocate('', ALLOCATE_TMPL),
        'THLongTensor*': _allocate('Long', ALLOCATE_TMPL),
        'THIntTensor*': _allocate('Int', ALLOCATE_TMPL),
        'THBoolTensor*': _allocate('Byte', ALLOCATE_TMPL, ALLOCATE_CUDA),
        'THIndexTensor*': _allocate('Long', ALLOCATE_TMPL, ALLOCATE_CUDA),
        'THIntegerTensor*': _allocate('Int', ALLOCATE_TMPL, ALLOCATE_CUDA),
        'THSTensor*': _allocate('', ALLOCATE_TMPL, sparse=True),
    }

    TYPE_NAMES = {
        'THTensor*': '" THPTensorStr "',
        'THSTensor*': '" THSPTensorStr "',
        'THStorage*': '" THPStorageStr "',
        'THGenerator*': 'torch.Generator',
        'THLongStorage*': '" THPModuleStr "LongStorage',
        'THLongTensor*': '" THPModuleStr "LongTensor',
        'THIntTensor*': '" THPModuleStr "IntTensor',
        'THBoolTensor*': '" THPModuleStr "ByteTensor',
        'THIndexTensor*': '" THPModuleStr "LongTensor',
        'THIntegerTensor*': '" THPModuleStr "IntTensor',
        'THFloatTensor*': '" THPModuleStr "FloatTensor',
        'THDoubleTensor*': '" THPModuleStr "DoubleTensor',
        'THCudaTensor*': 'torch.cuda.FloatTensor',
        'THCudaDoubleTensor*': 'torch.cuda.DoubleTensor',
        'THCudaIntTensor*': 'torch.cuda.IntTensor',
        'THCudaLongTensor*': 'torch.cuda.LongTensor',
        'THSize*': 'torch.Size',
        'THStride*': 'tuple',
        'long': 'int',
        'real': '" RealStr "',
        'double': 'float',
        'accreal': '" RealStr "',
        'bool': 'bool',
        'const char*': 'bool',  # Can come only from bool option.
    }

    OUT_INIT = """
    __out = kwargs ? PyDict_GetItemString(kwargs, "out") : NULL;
    if (__out == Py_None) { __out = NULL; __dictcount--; __argcount--; }
    """

    def __init__(self):
        self.declarations = []
        self.stateless_declarations = []
        self.docstrings = []

    def get_type_unpack(self, arg, option):
        return self.TYPE_UNPACK.get(arg['type'], None)

    def get_type_check(self, arg, option):
        if arg['type'] == 'THSize*' and arg.get('long_args', False):
            return self.SIZE_VARARG_CHECK
        return self.TYPE_CHECK.get(arg['type'], None)

    # TODO: argument descriptions shouldn't be part of THP, but rather a general cwrap thing
    def get_wrapper_template(self, declaration):
        arg_desc = OrderedDict()

        def format_arg(arg, var_args=False):
            if var_args and arg.get('long_args', False):
                return 'int ... ' + arg['name']
            else:
                return self.TYPE_NAMES[arg['type']] + ' ' + arg['name']

        def format_args(args, var_args=False):
            option_desc = [
                format_arg(arg, var_args) for arg in args
                if not arg.get('ignore_check', False) and not arg.get('output')
            ]
            output_args = list(filter(lambda a: a.get('output'), args))
            if output_args:
                if len(output_args) > 1:
                    out_type = 'tuple['
                    out_type += ', '.join(self.TYPE_NAMES[arg['type']]
                                          for arg in output_args)
                    out_type += ']'
                    option_desc += ['#' + out_type + ' out']
                else:
                    arg = output_args[0]
                    option_desc += [
                        '#' + self.TYPE_NAMES[arg['type']] + ' out'
                    ]

            if option_desc:
                return '({})'.format(', '.join(option_desc))
            else:
                return 'no arguments'

        for option in declaration['options']:
            arg_desc[format_args(option['arguments'], False)] = True
            arg_desc[format_args(option['arguments'], True)] = True

        arg_desc = sorted(list(arg_desc.keys()), key=len)
        arg_desc = ['"' + desc + '"' for desc in arg_desc]
        arg_str = ', '.join(arg_desc)
        variables_str = '\n'.join(declaration.get('variables', []))
        init_str = '\n'.join(declaration.get('init', []))
        if 'stateless' in declaration['name']:
            readable_name = 'torch.' + declaration['python_name']
        else:
            readable_name = declaration['python_name']
        return Template(
            self.WRAPPER_TEMPLATE.safe_substitute(readable_name=readable_name,
                                                  num_options=len(arg_desc),
                                                  expected_args=arg_str,
                                                  variables=variables_str,
                                                  init=init_str))

    def get_return_wrapper(self, option):
        return self.RETURN_WRAPPER.get(option['return'], None)

    def get_arg_accessor(self, arg, option):
        if arg['name'] == 'self':
            return 'self'
        if arg.get('output'):
            if not option['output_provided']:
                return arg['name']
            if option['output_count'] == 1:
                return '__out'
            else:
                return 'PyTuple_GET_ITEM(__out, {})'.format(arg['output_idx'])

    def process_docstrings(self):
        for declaration in self.declarations:
            docstr = declaration.get('docstring_method')
            if docstr is None:
                continue
            declaration['docstring_content'] = docstr.replace('\n', '\\n')
            declaration[
                'docstring_var'] = 'docstr_' + declaration['python_name']
            for declaration in self.stateless_declarations:
                docstr = declaration.get('docstring_stateless')
                if docstr is None:
                    continue
            declaration['docstring_content'] = docstr.replace('\n', '\\n')
            declaration['docstring_var'] = 'stateless_docstr_' + declaration[
                'python_name']

    def generate_out_options(self, declaration):
        new_options = []
        declaration.setdefault('init', [])
        declaration['init'] += [self.OUT_INIT]
        for option in declaration['options']:
            out_idx = []
            for i, arg in enumerate(option['arguments']):
                if arg.get('output'):
                    out_idx.append(i)
            if not out_idx:
                option['has_output'] = True
                option['output_provided'] = False
                new_options.append(option)
                continue
            for output_provided in (True, False):
                option_copy = deepcopy(option)
                option_copy['has_output'] = True
                option_copy['output_provided'] = output_provided
                option_copy['output_count'] = len(out_idx)
                for i, idx in enumerate(out_idx):
                    arg = option_copy['arguments'][idx]
                    arg['output_idx'] = i
                    if not output_provided:
                        arg['ignore_check'] = True
                    else:
                        option_copy['argcount_offset'] = -len(out_idx) + 1
                        arg['no_kwargs'] = True
                        arg['no_idx'] = True
                new_options.append(option_copy)
        declaration['options'] = new_options

    def process_declarations(self, declarations):
        new_declarations = []

        def has_arg_type(declaration, type_name):
            return any(arg['type'] == type_name
                       for option in declaration['options']
                       for arg in option['arguments'])

        def has_long_args(declaration):
            return any(
                arg.get('long_args', False)
                for option in declaration['options']
                for arg in option['arguments'])

        def has_output_args(declaration):
            return any(
                arg.get('output') for option in declaration['options']
                for arg in option['arguments'])

        for declaration in declarations:
            # Disable all methods for THHalfTensor, unless cpu_half is True
            if not declaration.get('cpu_half', False):
                defined_if = '!defined(TH_REAL_IS_HALF)'
                if 'defined_if' in declaration:
                    defined_if += ' && (' + declaration['defined_if'] + ')'
                declaration['defined_if'] = defined_if

            if declaration.get('only_register', False):
                continue

            declaration.setdefault('python_name', declaration['name'])
            declaration.setdefault('variables', [])
            if has_arg_type(declaration, 'THSize*'):
                declaration['variables'] += ['THLongStoragePtr __size;']
            if has_arg_type(declaration, 'THStride*'):
                declaration['variables'] += ['THLongStoragePtr __stride;']
            if has_output_args(declaration):
                declaration['variables'] += ['PyObject *__out;']
                self.generate_out_options(declaration)
            if has_long_args(declaration):
                declaration['no_kwargs'] = True
            for option in declaration['options']:
                option['cname'] = 'TH{}Tensor_({})'.format(
                    'S' if option.get('sparse', False) else '',
                    option['cname'])
            if declaration.get('with_stateless', False) or declaration.get(
                    'only_stateless', False):
                stateless_declaration = self.make_stateless(declaration)
                new_declarations.append(stateless_declaration)
                self.stateless_declarations.append(stateless_declaration)
            if declaration.get('only_stateless', False):
                continue

            self.declarations.append(declaration)
            declaration['name'] = 'TH{}PTensor_({})'.format(
                'S' if declaration.get('sparse', False) else '',
                declaration['name'])
            for option in declaration['options']:
                for arg in option['arguments']:
                    if arg['name'] == 'self':
                        arg['ignore_check'] = True

        register_only = [
            d for d in declarations if d.get('only_register', False)
        ]
        declarations = [
            d for d in declarations
            if (not d.get('only_stateless', False)) and (
                not d.get('only_register', False))
        ]
        self.declarations.extend(
            filter(lambda x: not x.get('only_stateless', False),
                   register_only))
        self.stateless_declarations.extend(
            filter(lambda x: x.get('only_stateless', False), register_only))

        self.process_docstrings()

        all_declarations = declarations + new_declarations
        return all_declarations

    def make_stateless(self, declaration):
        declaration = deepcopy(declaration)
        declaration['name'] = 'TH{}PTensor_stateless_({})'.format(
            'S' if declaration.get('sparse', False) else '',
            declaration['name'])
        for option in declaration['options']:
            for arg in option['arguments']:
                if arg['name'] == 'self':
                    arg['assign_name'] = 'self'
                    arg['name'] = 'source'
        return declaration

    def declare_methods(self, stateless, sparse):
        tensor_methods = ''
        for declaration in (self.declarations
                            if not stateless else self.stateless_declarations):
            if declaration.get('sparse', False) != sparse:
                continue
            flags = 'METH_VARARGS'
            flags += ' | ' + declaration.get(
                'method_flags') if 'method_flags' in declaration else ''
            if not declaration.get('only_register'):
                flags += ' | METH_KEYWORDS'
            if declaration.get('override_method_flags'):
                flags = declaration['override_method_flags']
            entry = Template(
                '  {"$python_name", (PyCFunction)$name, $flags, $docstring},\n'
            ).substitute(python_name=declaration['python_name'],
                         name=declaration['name'],
                         flags=flags,
                         docstring=declaration.get('docstring_var', 'NULL'))
            if 'defined_if' in declaration:
                entry = self.preprocessor_guard(entry,
                                                declaration['defined_if'])
            tensor_methods += entry
        generated = self.TENSOR_METHODS_DECLARATION.substitute(
            methods=tensor_methods,
            stateless=('' if not stateless else 'stateless_'),
            sparse=('' if not sparse else 'S'),
        )
        if sparse:
            generated = '#ifndef TH_REAL_IS_HALF\n' + generated + '\n#endif\n\n'
        return generated

    def process_full_file(self, code):
        # We have to find a place before all undefs
        idx = code.find('// PUT DEFINITIONS IN HERE PLEASE')
        return (code[:idx] + self.declare_methods(False, False) +
                self.declare_methods(True, False) +
                self.declare_methods(False, True) +
                self.declare_methods(True, True) + code[idx:])

    def preprocessor_guard(self, code, condition):
        return '#if ' + condition + '\n' + code + '#endif\n'

    def process_wrapper(self, code, declaration):
        if 'defined_if' in declaration:
            return self.preprocessor_guard(code, declaration['defined_if'])
        return code

    def process_all_call_arg(self, code, option):
        return 'LIBRARY_STATE ' + code

    def process_all_checks(self, code, option):
        if option.get('has_output'):
            indent = " " * 10
            if option['output_provided']:
                checks = "__out != NULL &&\n" + indent
                if option['output_count'] > 1:
                    checks += "PyTuple_Check(__out) &&\n" + indent
                    length_check = "PyTuple_GET_SIZE(__out) == {} &&\n".format(
                        option['output_count'])
                    checks += length_check + indent
                code = checks + code
            else:
                code = "__out == NULL &&\n" + indent + code

        if any(arg.get('long_args', False) for arg in option['arguments']):
            code = code.replace('__argcount ==', '__argcount >=')
            expected = str(int(option.get('output_provided', False)))
            code = '__dictcount == ' + expected + ' &&\n          ' + code

        return code

    def process_pre_arg_assign(self, template, option):
        new_args = []
        for arg in option['arguments']:
            if not option.get('output_provided', True) and arg.get('output'):
                new_args.append(self.ALLOCATE_TYPE[arg['type']].substitute(
                    name=arg['name']))
        template = new_args + template
        return template

    def generate_docstrings_cpp(self):
        template = Template('char* $name = "$content";')
        return '\n\n'.join(
            template.substitute(name=decl['docstring_var'],
                                content=decl['docstring_content'])
            for decl in chain(self.declarations, self.stateless_declarations)
            if 'docstring_var' in decl)

    def generate_docstrings_h(self):
        template = Template('extern char* $name;')
        return '\n\n'.join(
            template.substitute(name=decl['docstring_var'])
            for decl in chain(self.declarations, self.stateless_declarations)
            if 'docstring_var' in decl)
    def generatecpp(self):
        # Read the configuration manipulator template and insert line numbers and file name into template.
        templatefile = os.path.join(self.dynconfpath, "templates", "ConfigType.h.template")
        templatelines = []
        templatefilesafe = templatefile.replace('\\', '\\\\')  # line directive does backslash expansion.
        curline = 1
        f = open(templatefile)
        for line in f:
            curline = curline + 1
            templatelines.append(Template(line).safe_substitute(linenum=curline, filename=templatefilesafe))
        f.close()
        template = ''.join(templatelines)

        # Write the configuration manipulator.
        self.mkdirabs(self.cpp_gen_dir)
        f = open(os.path.join(self.cpp_gen_dir, self.name + "Config.h"), 'w')
        paramdescr = []
        groups = []
        members = []
        constants = []
        for const in self.constants:
            self.appendline(constants, "${cconsttype} ${configname}_${name} = $v;", const, "value")

        def write_params(group):
            if group.id == 0:
                paramdescr.append(Template("${configname}Config::GroupDescription<${configname}Config::${class}, ${configname}Config> ${name}(\"${name}\", \"${type}\", ${parent}, ${id}, ${cstate}, &${configname}Config::${lower});").safe_substitute(group.to_dict(), configname=self.name))
            else:
                paramdescr.append(Template("${configname}Config::GroupDescription<${configname}Config::${class}, ${configname}Config::${parentclass}> ${name}(\"${name}\", \"${type}\", ${parent}, ${id}, ${cstate}, &${configname}Config::${field});").safe_substitute(group.to_dict(), configname=self.name))
            for param in group.parameters:
                self.appendline(members, "${ctype} ${name};", param)
                self.appendline(paramdescr, "__min__.${name} = $v;", param, "min")
                self.appendline(paramdescr, "__max__.${name} = $v;", param, "max")
                self.appendline(paramdescr, "__default__.${name} = $v;", param, "default")
                self.appendline(
                    paramdescr,
                    group.to_dict()['name'] +
                    ".abstract_parameters.push_back(${configname}Config::AbstractParamDescriptionConstPtr(new ${configname}Config::ParamDescription<${ctype}>(\"${name}\", \"${type}\", ${level}, "
                    "\"${description}\", \"${edit_method}\", &${configname}Config::${name})));", param)
                self.appendline(
                    paramdescr,
                    "__param_descriptions__.push_back(${configname}Config::AbstractParamDescriptionConstPtr(new ${configname}Config::ParamDescription<${ctype}>(\"${name}\", \"${type}\", ${level}, "
                    "\"${description}\", \"${edit_method}\", &${configname}Config::${name})));", param)

            for g in group.groups:
                write_params(g)

            self.appendline(paramdescr, "${name}.convertParams();", group.to_dict())
            if group.id == 0:
                self.appendline(paramdescr, "__group_descriptions__.push_back(${configname}Config::AbstractGroupDescriptionConstPtr(new ${configname}Config::GroupDescription<${configname}Config::${class}, ${configname}Config>(${name})));", group.to_dict())
            else:
                self.appendline(paramdescr, "${parentname}.groups.push_back(${configname}Config::AbstractGroupDescriptionConstPtr(new ${configname}Config::GroupDescription<${configname}Config::${class}, ${configname}Config::${parentclass}>(${name})));", group.to_dict())
                self.appendline(paramdescr, "__group_descriptions__.push_back(${configname}Config::AbstractGroupDescriptionConstPtr(new ${configname}Config::GroupDescription<${configname}Config::${class}, ${configname}Config::${parentclass}>(${name})));", group.to_dict())

        write_params(self.group)
        self.appendgroup(groups, self.group)

        paramdescr = '\n'.join(paramdescr)
        members = '\n'.join(members)
        constants = '\n'.join(constants)
        groups = '\n'.join(groups)
        f.write(Template(template).substitute(
            uname=self.name.upper(),
            configname=self.name, pkgname=self.pkgname, paramdescr=paramdescr,
            members=members, groups=groups, doline=LINEDEBUG, constants=constants))
        f.close()
        print("Wrote header file in " + os.path.join(self.cpp_gen_dir, self.name + "Config.h"))
Example #4
0
    def _generate_images(self, scene_list, video_name,
                         image_name_template='$VIDEO_NAME-Scene-$SCENE_NUMBER-$IMAGE_NUMBER',
                         output_dir=None):
        # type: (List[Tuple[FrameTimecode, FrameTimecode]) -> None

        if not scene_list:
            return
        if not self.options_processed:
            return
        if self.num_images <= 0:
            raise ValueError()
        self.check_input_open()

        imwrite_param = []
        if self.image_param is not None:
            imwrite_param = [self.imwrite_params[self.image_extension], self.image_param]

        # Reset video manager and downscale factor.
        self.video_manager.release()
        self.video_manager.reset()
        self.video_manager.set_downscale_factor(1)
        self.video_manager.start()

        # Setup flags and init progress bar if available.
        completed = True
        logging.info('Generating output images (%d per scene)...', self.num_images)
        progress_bar = None
        if tqdm and not self.quiet_mode:
            progress_bar = tqdm(
                total=len(scene_list) * self.num_images, unit='images')

        filename_template = Template(image_name_template)


        scene_num_format = '%0'
        scene_num_format += str(max(3, math.floor(math.log(len(scene_list), 10)) + 1)) + 'd'
        image_num_format = '%0'
        image_num_format += str(math.floor(math.log(self.num_images, 10)) + 2) + 'd'

        timecode_list = dict()

        fps = scene_list[0][0].framerate

        timecode_list = [
            [
                FrameTimecode(int(f), fps=fps) for f in [
                    # middle frames
                    a[len(a)//2] if (0 < j < self.num_images-1) or self.num_images == 1

                    # first frame
                    else min(a[0] + self.image_frame_margin, a[-1]) if j == 0

                    # last frame
                    else max(a[-1] - self.image_frame_margin, a[0])

                    # for each evenly-split array of frames in the scene list
                    for j, a in enumerate(np.array_split(r, self.num_images))
                ]
            ]
            for i, r in enumerate([
                # pad ranges to number of images
                r
                if 1+r[-1]-r[0] >= self.num_images
                else list(r) + [r[-1]] * (self.num_images - len(r))
                # create range of frames in scene
                for r in (
                    range(start.get_frames(), end.get_frames())
                    # for each scene in scene list
                    for start, end in scene_list
                    )
            ])
        ]

        self.image_filenames = {i: [] for i in range(len(timecode_list))}

        for i, scene_timecodes in enumerate(timecode_list):
            for j, image_timecode in enumerate(scene_timecodes):
                self.video_manager.seek(image_timecode)
                self.video_manager.grab()
                ret_val, frame_im = self.video_manager.retrieve()
                if ret_val:
                    file_path = '%s.%s' % (filename_template.safe_substitute(
                        VIDEO_NAME=video_name,
                        SCENE_NUMBER=scene_num_format % (i + 1),
                        IMAGE_NUMBER=image_num_format % (j + 1),
                        FRAME_NUMBER=image_timecode.get_frames()),
                                           self.image_extension)
                    self.image_filenames[i].append(file_path)
                    cv2.imwrite(
                        get_and_create_path(
                            file_path,
                            output_dir if output_dir is not None else self.output_directory),
                        frame_im, imwrite_param)
                else:
                    completed = False
                    break
                if progress_bar:
                    progress_bar.update(1)

        if not completed:
            logging.error('Could not generate all output images.')
Example #5
0
import pathlib
from os import system, remove
from string import Template

UNIT_TEMPLATE = Template('''
[Unit]
Description=$packagename
After=syslog.target

[Service]
Type=simple
ExecStart=$entrypoint --command listen --filename $filename
SyslogIdentifier=$packagename
StandardOutput=syslog
StandardError=syslog
Restart=always
RestartSec=3

[Install]
WantedBy=multi-user.target
''')


def register(packagename: str, entrypoint: str, filename: str):
    unit = UNIT_TEMPLATE.substitute(packagename=packagename, entrypoint=entrypoint, filename=filename)
    service = packagename + ".service"
    unit_file_fullname = str(pathlib.Path("/", "etc", "systemd", "system", service))
    with open(unit_file_fullname, "w") as file:
        file.write(unit)
    system("sudo systemctl daemon-reload")
    system("sudo systemctl enable " + service)
Example #6
0
    def current_view_html(self, body=None):
        """ This func uses session's 'current_view' attribute to render the html
        for that view.

        A view's HTML does NOT include header/footer type elements. Rather, it
        involes a container element that contains all the user controls, and
        then, outside of/beneath the container element, all of the 'modal'
        UI stuff that a user might need in a view.

        In this method then, we use the current view to figure out what to
        put in the container and what to append to the container.

        The whole thing is decorated in a function that captures failures and
        asks users to report them (and kills their session, logging them out, so
        they don't get stuck in a state where they can't stop re-creating the
        error, etc.
        """

        # set the current view and settlement, if possible
        self.get_current_view()
        if not hasattr(self, "Settlement") or self.Settlement is None:
            self.set_current_settlement()

        include_ui_templates = True

        # start the container
        output = html.meta.start_container

        # now get us some HTML
        if self.current_view == "dashboard":
            body = "dashboard"
            include_ui_templates = False
            output += html.get_template('dashboard')

        elif self.current_view == "new_settlement":
            body = 'create_new_settlement'
            output += html.get_template('new_settlement.html')

        elif self.current_view == "view_campaign":
            body = 'view_campaign_summary'
            output += html.get_template('campaign_summary')

        elif self.current_view == "view_settlement":
            body = 'view_settlement_sheet'
            output += html.get_template('settlement_sheet')

        elif self.current_view == "view_survivor":
            body = 'view_survivor_sheet'
            output += html.get_template('survivor_sheet')

        else:
            self.logger.error("[%s] requested unhandled view '%s'" %
                              (self.User, self.current_view))
            raise Exception("Unknown View!")

        # now close the container
        output += html.meta.close_container

        # add UI templates
        if include_ui_templates:
            for t in settings.get('application', 'ui_templates').split(','):
                output += html.get_template(t.strip())

        # finally, do a single, monster variable substitution pass:
        output = Template(output).safe_substitute(
            api_url=api.get_api_url(),
            application_version=settings.get("application", "version"),
            application_age_rough_years=(datetime.now() -
                                         datetime(2015, 11, 10)).days / 365,
            user_id=self.User.user['_id'],
            user_login=self.User.user["login"],
            settlement_id=self.session['current_settlement'],
            survivor_id=self.session.get('current_asset', None),
            blog_api_key=settings_private.get('api', 'blog_api_key'),
        )

        return output, body
Example #7
0
DEFAULT_LOG_CONFIG = Template("""
version: 1

formatters:
    precise:
        format: '%(asctime)s - %(name)s - %(lineno)d - %(levelname)s - \
%(request)s - %(message)s'

filters:
    context:
        (): synapse.util.logcontext.LoggingContextFilter
        request: ""

handlers:
    file:
        class: logging.handlers.RotatingFileHandler
        formatter: precise
        filename: ${log_file}
        maxBytes: 104857600
        backupCount: 10
        filters: [context]
        encoding: utf8
    console:
        class: logging.StreamHandler
        formatter: precise
        filters: [context]

loggers:
    synapse:
        level: INFO

    synapse.storage.SQL:
        # beware: increasing this to DEBUG will make synapse log sensitive
        # information such as access tokens.
        level: INFO

root:
    level: INFO
    handlers: [file, console]
""")
    def gather_instance_info(self):
        predicate_query = Template("""
        prefix owl: <http://www.w3.org/2002/07/owl#>
        prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
        prefix xsd: <http://www.w3.org/2001/XMLSchema#>
        prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
        prefix gist: <https://ontologies.semanticarts.com/gist/>
        prefix skos: <http://www.w3.org/2004/02/skos/core#>

        select distinct ?predicate ?label ?type where {
          {
            select distinct ?predicate where {
              ?s ?predicate ?o
              FILTER(?predicate NOT IN (rdf:type, rdfs:label, skos:prefLabel, skos:altLabel, skos:definition))
            }
          }
          FILTER (!STRSTARTS(STR(?predicate), 'http://www.w3.org/2002/07/owl#'))
          FILTER (!STRSTARTS(STR(?predicate), 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'))
          FILTER (!STRSTARTS(STR(?predicate), 'http://www.w3.org/2000/01/rdf-schema#'))
          OPTIONAL {
            ?predicate skos:prefLabel|rdfs:label ?label
            FILTER(lang(?label) = '$language' || lang(?label) = '')
          }
          OPTIONAL {
            values ?type { owl:DatatypeProperty owl:ObjectProperty }
            ?predicate a ?type
          }
        }
        """).substitute(language=self.label_lang)
        self.node_data = {}
        if self.repo:
            all_predicates = list(self.remote_select_query(predicate_query))
        else:
            self.data = Graph()
            for file_path in self.files:
                filename = os.path.basename(file_path)
                logging.debug('Parsing %s for documentation', filename)
                self.data.parse(file_path, format=guess_format(file_path))
            all_predicates = list(self.local_select_query(predicate_query))

        hidden_predicates = set(predicate['predicate']
                                for predicate in all_predicates
                                if self.hidden(predicate['predicate']))
        logging.debug("Hiding predicates: %s", hidden_predicates)
        all_predicates = [
            predicate for predicate in all_predicates
            if predicate['predicate'] not in hidden_predicates
        ]

        if not all_predicates:
            logging.warning('No interesting predicates found in %s', self.repo
                            or ' specified files')
            return

        self.build_class_hierarchy()

        for count, predicate_row in enumerate(all_predicates):
            predicate = predicate_row['predicate']
            predicate_str = predicate_row['label'] if predicate_row.get('label') \
                else self.strip_uri(predicate)

            self.print_progress_bar(count,
                                    len(all_predicates),
                                    prefix='Processing predicates:',
                                    suffix=predicate_str + ' ' * 20,
                                    length=50)
            pre_time = perf_counter()
            query_text = self.create_predicate_query(predicate,
                                                     predicate_row.get('type'),
                                                     self.limit)
            predicate_usage = list(self.select_query(query_text))
            logging.debug("%s items returned for %s", len(predicate_usage),
                          predicate)
            for usage in predicate_usage:
                if 'src' not in usage or usage['src'] is None or int(
                        usage.get('num', 0)) < self.threshold:
                    continue
                self.record_predicate_usage(predicate, predicate_str, usage)

            logging.debug("Fetching %s took %d seconds", str(predicate_row),
                          perf_counter() - pre_time)

        self.print_progress_bar(len(all_predicates),
                                len(all_predicates),
                                prefix='Processing predicates:',
                                suffix='Complete',
                                length=50)

        self.prune_for_inheritance()

        if self.show_shacl:
            self.add_shacl_coloring()
    def build_class_hierarchy(self):
        inheritance_query = Template("""
        prefix owl: <http://www.w3.org/2002/07/owl#>
        prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
        prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
        prefix skos: <http://www.w3.org/2004/02/skos/core#>

        select distinct ?class ?c_label ?parent ?p_label where {
          {
              ?class rdfs:subClassOf ?parent .
          }
          UNION
          {
             ?class (owl:equivalentClass|rdfs:subClassOf)/(owl:unionOf|owl:intersectionOf)/rdf:rest*/rdf:first ?parent .
            ?parent a owl:Class
          }
          filter (!isblank(?class) && !isblank(?parent))
          OPTIONAL {
              ?class rdfs:label|skos:prefLabel ?c_label
              FILTER(lang(?c_label) = '$language' || lang(?c_label) = '')
          }
          OPTIONAL {
              ?parent rdfs:label|skos:prefLabel ?p_label
              FILTER(lang(?p_label) = '$language' || lang(?p_label) = '')
          }
        }
        """).substitute(language=self.label_lang)
        if self.repo:
            parents = list(self.remote_select_query(inheritance_query))
        else:
            parents = list(self.local_select_query(inheritance_query))

        for inheritance_info in parents:
            self.superclasses[inheritance_info['class']].add(
                inheritance_info['parent'])
            self.class_names[inheritance_info['class']] = \
                inheritance_info.get('c_label') or self.strip_uri(inheritance_info['class'])
            self.class_names[inheritance_info['parent']] = \
                inheritance_info.get('p_label') or self.strip_uri(inheritance_info['parent'])

        # Determine evaluation order, root classes to leaves
        remaining_classes = set(self.superclasses.keys())
        root_classes = set(parent
                           for cls, parents in self.superclasses.items()
                           for parent in parents
                           if parent not in remaining_classes)
        eval_order = list(root_classes)
        while remaining_classes:
            next_set = set(cls for cls in remaining_classes if all(
                parent in eval_order for parent in self.superclasses[cls]))
            # Make superclasses transitive
            for cls in next_set:
                parents = set(self.superclasses[cls])
                for parent in parents:
                    self.superclasses[cls].update(
                        self.superclasses.get(parent, set()))
            eval_order.extend(next_set)
            remaining_classes.difference_update(next_set)

        logging.debug(
            'Inheritance evaluation order:\n%s', "\n".join(
                f"\t{self.strip_uri(cls)}: {list(self.strip_uri(sup) for sup in self.superclasses[cls])}"
                for cls in eval_order))
        self.inheritance = eval_order

        class_query = self.create_class_count_query(self.limit)
        if self.repo:
            class_counts = list(self.remote_select_query(class_query))
        else:
            class_counts = list(self.local_select_query(class_query))
        for instance_info in class_counts:
            self.class_counts[self.deepest_class(instance_info['src'])] += int(
                instance_info['num'])
Example #10
0
def read_template(name, values):
    t = Template(
        file(resource_filename('canari.resources.template',
                               '%s.plate' % name)).read())
    return t.substitute(**values)
Example #11
0
 def get_data_at_sites(self, data_path, sites, site_idx, period):
     self.data = {}
     for i, v in enumerate(cfg.SM_MODEL_VARNAMES):
         csv = Template(data_path).substitute(v=v)
         df = self.load_from_csv(csv)
         self.data[i] = extract_sites(df, sites, site_idx, period)
Example #12
0
soapenv11_namespace = 'http://schemas.xmlsoap.org/soap/envelope/'
soapenv12_namespace = 'http://www.w3.org/2003/05/soap-envelope'

wsse_namespace = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-secext-1.0.xsd'
wsu_namespace = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-wssecurity-utility-1.0.xsd'

common_namespaces = {
    'soapenv': soapenv11_namespace,
    'wsse': wsse_namespace,
    'wsu': wsu_namespace,
    'zato': zato_namespace
}

soap_doc = Template(
    """<soap:Envelope xmlns:soap='%s'><soap:Body>$body</soap:Body></soap:Envelope>"""
    % soapenv11_namespace)

soap_body_path = '/soapenv:Envelope/soapenv:Body'
soap_body_xpath = etree.XPath(soap_body_path, namespaces=common_namespaces)

soap_fault_path = '/soapenv:Envelope/soapenv:Body/soapenv:Fault'
soap_fault_xpath = etree.XPath(soap_fault_path, namespaces=common_namespaces)

wsse_password_type_text = 'http://docs.oasis-open.org/wss/2004/01/oasis-200401-wss-username-token-profile-1.0#PasswordText'
supported_wsse_password_types = (wsse_password_type_text, )

wsse_username_path = '/soapenv:Envelope/soapenv:Header/wsse:Security/wsse:UsernameToken/wsse:Username'
wsse_username_xpath = etree.XPath(wsse_username_path,
                                  namespaces=common_namespaces)
Example #13
0
 def translate_path(self, path_template, **kwargs):
     context = kwargs
     context['account_code'] = self.account_code
     template = Template(path_template)
     return template.substitute(**kwargs)
Example #14
0
req = Request()
f = req.form
value_dic = {'message': '', 'title': '', 'url': '', 'bookmarks': ''}

if f.has_key('post'):
    if not f.getvalue('title', '') or not f.getvalue('url', ''):
        value_dic['message'] = u'タイトルとURLは必須項目です'
        value_dic['title'] = unicode(f.getvalue('title', ''), 'utf-8',
                                     'ignore')
        value_dic['url'] = f.getvalue('url', '')
    else:
        cur.execute("""INSERT INTO bookmark(title, url) VALUES (?, ?)""",
                    (unicode(f.getvalue('title', ''), 'utf-8',
                             'ignore'), f.getvalue('url', '')))
        con.commit()

listbody = ''
cur.execute("SELECT title, url FROM bookmark")
for item in cur.fetchall():
    listbody += """<dt>%s</dt><dd>%s</dd>\n""" % (item)
listbody = """<ul>\n%s</ul>""" % listbody
value_dic['bookmarks'] = listbody

res = Response()
f = open(path.join(path.dirname(__file__), 'bookmarkform.html'))
t = Template(unicode(f.read(), 'utf-8', 'ignore'))
body = t.substitute(value_dic)
res.set_body(body)
print res
Example #15
0
def get_output(data):
    g = defaultdict(str, [(k, jinja2.escape(v)) for k, v in data.iteritems()])

    required = ['mimetype', 'appID', 'appVersion', 'clientOS', 'chromeLocale']

    # Some defaults we override depending on what we find below.
    plugin = dict(mimetype='-1', name='-1', guid='-1', version='',
                  iconUrl='', XPILocation='', InstallerLocation='',
                  InstallerHash='', InstallerShowsUI='',
                  manualInstallationURL='', licenseURL='',
                  needsRestart='true')

    # Special case for mimetype if they are provided.
    plugin['mimetype'] = g['mimetype'] or '-1'

    output = Template(xml_template)

    for s in required:
        if s not in data:
            # A sort of 404, matching what was returned in the original PHP.
            return output.substitute(plugin)

    # Figure out what plugins we've got, and what plugins we know where
    # to get.

    # Begin our huge and embarrassing if-else statement.
    if (g['mimetype'] in ['application/x-shockwave-flash',
                          'application/futuresplash'] and
        re.match(flash_re, g['clientOS'])):

        # Tell the user where they can go to get the installer.

        plugin.update(
            name='Adobe Flash Player',
            manualInstallationURL='http://www.adobe.com/go/getflashplayer')

        # Offer Windows users a specific flash plugin installer instead.
        # Don't use a https URL for the license here, per request from
        # Macromedia.

        if g['clientOS'].startswith('Win'):
            plugin.update(
                guid='{4cfaef8a-a6c9-41a0-8e6f-967eb8f49143}',
                XPILocation='',
                iconUrl='http://fpdownload2.macromedia.com/pub/flashplayer/current/fp_win_installer.ico',
                needsRestart='false',
                InstallerShowsUI='true',
                version='13.0.0.214',
                InstallerHash='sha256:3429f62b63ea7e06298231be7a2cf80b15745ba09442a952cbf23942e1aeba42',
                InstallerLocation='http://download.macromedia.com/pub/flashplayer/pdc/fp_pl_pfs_installer.exe')

    elif (g['mimetype'] == 'application/x-director' and
          g['clientOS'].startswith('Win')):
        plugin.update(
            name='Adobe Shockwave Player',
            manualInstallationURL='http://get.adobe.com/shockwave/otherversions')

        # Even though the shockwave installer is not a silent installer, we
        # need to show its EULA here since we've got a slimmed down
        # installer that doesn't do that itself.
        if g['chromeLocale'] != 'ja-JP':
            plugin.update(
                licenseURL='http://www.adobe.com/go/eula_shockwaveplayer')
        else:
            plugin.update(
                licenseURL='http://www.adobe.com/go/eula_shockwaveplayer_jp')
        plugin.update(
            guid='{45f2a22c-4029-4209-8b3d-1421b989633f}',
            XPILocation='',
            version='12.1.1.151',
            InstallerHash='sha256:22105e72f390b72ee6120f43d83eec3d85f38df32d3a5d6b3d18dcf0cc46dbf7',
            InstallerLocation='http://fpdownload.macromedia.com/pub/shockwave/default/english/win95nt/latest/Shockwave_Installer_FF.exe',
            manualInstallationURL='http://get.adobe.com/shockwave/otherversions',
            needsRestart='false',
            InstallerShowsUI='false')

    elif (g['mimetype'] in ['audio/x-pn-realaudio-plugin',
                            'audio/x-pn-realaudio'] and
          re.match(r'^(Win|Linux|PPC Mac OS X)', g['clientOS'])):
        plugin.update(
            name='Real Player',
            version='10.5',
            manualInstallationURL='http://www.real.com')

        if g['clientOS'].startswith('Win'):
            plugin.update(
                XPILocation='http://forms.real.com/real/player/download.html?type=firefox',
                guid='{d586351c-cb55-41a7-8e7b-4aaac5172d39}')
        else:
            plugin.update(
                guid='{269eb771-59de-4702-9209-ca97ce522f6d}')

    elif (re.match(quicktime_re, g['mimetype']) and
          re.match(r'^(Win|PPC Mac OS X)', g['clientOS'])):

        # Well, we don't have a plugin that can handle any of those
        # mimetypes, but the Apple Quicktime plugin can. Point the user to
        # the Quicktime download page.

        plugin.update(
            name='Apple Quicktime',
            guid='{a42bb825-7eee-420f-8ee7-834062b6fefd}',
            InstallerShowsUI='true',
            manualInstallationURL='http://www.apple.com/quicktime/download/')

    elif (re.match(java_re, g['mimetype']) and
          re.match(r'^(Win|Linux|PPC Mac OS X)', g['clientOS'])):

        # We serve up the Java plugin for the following mimetypes:
        #
        # application/x-java-vm
        # application/x-java-applet;jpi-version=1.5
        # application/x-java-bean;jpi-version=1.5
        # application/x-java-applet;version=1.3
        # application/x-java-bean;version=1.3
        # application/x-java-applet;version=1.2.2
        # application/x-java-bean;version=1.2.2
        # application/x-java-applet;version=1.2.1
        # application/x-java-bean;version=1.2.1
        # application/x-java-applet;version=1.4.2
        # application/x-java-bean;version=1.4.2
        # application/x-java-applet;version=1.5
        # application/x-java-bean;version=1.5
        # application/x-java-applet;version=1.3.1
        # application/x-java-bean;version=1.3.1
        # application/x-java-applet;version=1.4
        # application/x-java-bean;version=1.4
        # application/x-java-applet;version=1.4.1
        # application/x-java-bean;version=1.4.1
        # application/x-java-applet;version=1.2
        # application/x-java-bean;version=1.2
        # application/x-java-applet;version=1.1.3
        # application/x-java-bean;version=1.1.3
        # application/x-java-applet;version=1.1.2
        # application/x-java-bean;version=1.1.2
        # application/x-java-applet;version=1.1.1
        # application/x-java-bean;version=1.1.1
        # application/x-java-applet;version=1.1
        # application/x-java-bean;version=1.1
        # application/x-java-applet
        # application/x-java-bean
        #
        #
        # We don't want to link users directly to the Java plugin because
        # we want to warn them about ongoing security problems first. Link
        # to SUMO.

        plugin.update(
            name='Java Runtime Environment',
            manualInstallationURL='https://support.mozilla.org/kb/use-java-plugin-to-view-interactive-content',
            needsRestart='false',
            guid='{fbe640ef-4375-4f45-8d79-767d60bf75b8}')

    elif (g['mimetype'] in ['application/pdf', 'application/vnd.fdf',
                            'application/vnd.adobe.xfdf',
                            'application/vnd.adobe.xdp+xml',
                            'application/vnd.adobe.xfd+xml'] and
          re.match(r'^(Win|PPC Mac OS X|Linux(?! x86_64))', g['clientOS'])):
        plugin.update(
            name='Adobe Acrobat Plug-In',
            guid='{d87cd824-67cb-4547-8587-616c70318095}',
            manualInstallationURL='http://www.adobe.com/products/acrobat/readstep.html')

    elif (g['mimetype'] == 'application/x-mtx' and
          re.match(r'^(Win|PPC Mac OS X)', g['clientOS'])):
        plugin.update(
            name='Viewpoint Media Player',
            guid='{03f998b2-0e00-11d3-a498-00104b6eb52e}',
            manualInstallationURL='http://www.viewpoint.com/pub/products/vmp.html')

    elif re.match(wmp_re, g['mimetype']):
        # We serve up the Windows Media Player plugin for the following
        # mimetypes:
        #
        # application/asx
        # application/x-mplayer2
        # audio/x-ms-wax
        # audio/x-ms-wma
        # video/x-ms-asf
        # video/x-ms-asf-plugin
        # video/x-ms-wm
        # video/x-ms-wmp
        # video/x-ms-wmv
        # video/x-ms-wmx
        # video/x-ms-wvx
        #
        # For all windows users who don't have the WMP 11 plugin, give them
        # a link for it.
        if g['clientOS'].startswith('Win'):
            plugin.update(
                name='Windows Media Player',
                version='11',
                guid='{cff1240a-fd24-4b9f-8183-ccd96e5300d0}',
                manualInstallationURL='http://port25.technet.com/pages/windows-media-player-firefox-plugin-download.aspx')

        # For OSX users -- added Intel to this since flip4mac is a UB.
        # Contact at MS was okay w/ this, plus MS points to this anyway.
        elif re.match(r'^(PPC|Intel) Mac OS X', g['clientOS']):
            plugin.update(
                name='Flip4Mac',
                version='2.1',
                guid='{cff0240a-fd24-4b9f-8183-ccd96e5300d0}',
                manualInstallationURL='http://www.flip4mac.com/wmv_download.htm')

    elif (g['mimetype'] == 'application/x-xstandard' and
          re.match(r'^(Win|PPC Mac OS X)', g['clientOS'])):
        plugin.update(
            name='XStandard XHTML WYSIWYG Editor',
            guid='{3563d917-2f44-4e05-8769-47e655e92361}',
            iconUrl='http://xstandard.com/images/xicon32x32.gif',
            XPILocation='http://xstandard.com/download/xstandard.xpi',
            InstallerShowsUI='false',
            manualInstallationURL='http://xstandard.com/download/',
            licenseURL='http://xstandard.com/license/')

    elif (g['mimetype'] == 'application/x-dnl' and
          g['clientOS'].startswith('Win')):
        plugin.update(
            name='DNL Reader',
            guid='{ce9317a3-e2f8-49b9-9b3b-a7fb5ec55161}',
            version='5.5',
            iconUrl='http://digitalwebbooks.com/reader/dwb16.gif',
            XPILocation='http://digitalwebbooks.com/reader/xpinst.xpi',
            InstallerShowsUI='false',
            manualInstallationURL='http://digitalwebbooks.com/reader/')

    elif (g['mimetype'] == 'application/x-videoegg-loader' and
          g['clientOS'].startswith('Win')):
        plugin.update(
            name='VideoEgg Publisher',
            guid='{b8b881f0-2e07-11db-a98b-0800200c9a66}',
            iconUrl='http://videoegg.com/favicon.ico',
            XPILocation='http://update.videoegg.com/Install/Windows/Initial/VideoEggPublisher.xpi',
            InstallerShowsUI='true',
            manualInstallationURL='http://www.videoegg.com/')

    elif (g['mimetype'] == 'video/vnd.divx' and
          g['clientOS'].startswith('Win')):
        plugin.update(
            name='DivX Web Player',
            guid='{a8b771f0-2e07-11db-a98b-0800200c9a66}',
            iconUrl='http://images.divx.com/divx/player/webplayer.png',
            XPILocation='http://download.divx.com/player/DivXWebPlayer.xpi',
            InstallerShowsUI='false',
            licenseURL='http://go.divx.com/plugin/license/',
            manualInstallationURL='http://go.divx.com/plugin/download/')

    elif (g['mimetype'] == 'video/vnd.divx' and
          re.match(r'^(PPC|Intel) Mac OS X', g['clientOS'])):
        plugin.update(
            name='DivX Web Player',
            guid='{a8b771f0-2e07-11db-a98b-0800200c9a66}',
            iconUrl='http://images.divx.com/divx/player/webplayer.png',
            XPILocation='http://download.divx.com/player/DivXWebPlayerMac.xpi',
            InstallerShowsUI='false',
            licenseURL='http://go.divx.com/plugin/license/',
            manualInstallationURL='http://go.divx.com/plugin/download/')

    # End ridiculously huge and embarrassing if-else block.
    return output.substitute(plugin)
    def create_predicate_query(self, predicate, predicate_type, limit):
        if predicate_type == str(OWL.ObjectProperty):
            type_query = """
                prefix owl: <http://www.w3.org/2002/07/owl#>
                prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
                prefix xsd: <http://www.w3.org/2001/XMLSchema#>
                prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
                prefix gist: <https://ontologies.semanticarts.com/gist/>
                prefix skos: <http://www.w3.org/2004/02/skos/core#>

                select ?src ?tgt (COUNT(?src) as ?num) where {
                  {
                    select (group_concat(?src_c;separator=' ') as ?src) (group_concat(?tgt_c;separator=' ') as ?tgt) where {
                      $pattern
                      FILTER(!ISBLANK(?s))
                      ?s a ?src_c .
                      FILTER (!STRSTARTS(STR(?src_c), 'http://www.w3.org/2002/07/owl#'))
                      ?o a ?tgt_c .
                    } group by ?s ?o LIMIT $limit
                  }
                } group by ?src ?tgt
                """
        elif predicate_type == str(OWL.DatatypeProperty):
            type_query = """
                prefix owl: <http://www.w3.org/2002/07/owl#>
                prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
                prefix xsd: <http://www.w3.org/2001/XMLSchema#>
                prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
                prefix gist: <https://ontologies.semanticarts.com/gist/>
                prefix skos: <http://www.w3.org/2004/02/skos/core#>

                select ?src ?dt (COUNT(?src) as ?num) where {
                  {
                    select (group_concat(?src_c;separator=' ') as ?src) (SAMPLE(COALESCE(?dtype, xsd:string)) as ?dt) where {
                      $pattern
                      FILTER(!ISBLANK(?s) && ISLITERAL(?o))
                      ?s a ?src_c .
                      FILTER (!STRSTARTS(STR(?src_c), 'http://www.w3.org/2002/07/owl#'))
                      BIND(DATATYPE(?o) as ?dtype) .
                    } group by ?s LIMIT $limit
                  }
                } group by ?src ?dt
                """
        else:
            type_query = """
                prefix owl: <http://www.w3.org/2002/07/owl#>
                prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
                prefix xsd: <http://www.w3.org/2001/XMLSchema#>
                prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#>
                prefix gist: <https://ontologies.semanticarts.com/gist/>
                prefix skos: <http://www.w3.org/2004/02/skos/core#>

                select ?src
                       ?tgt
                       ?dt
                       ?num
                where {
                  {
                    {
                      select ?src ?tgt (COUNT(?src) as ?num) where {
                        {
                            select (group_concat(?src_c;separator=' ') as ?src)
                                   (group_concat(?tgt_c;separator=' ') as ?tgt) where {
                              $pattern
                              FILTER(!ISBLANK(?s))
                              ?s a ?src_c .
                              FILTER (!STRSTARTS(STR(?src_c), 'http://www.w3.org/2002/07/owl#'))
                              ?o a ?tgt_c .
                            } group by ?s ?o LIMIT $limit
                        }
                      } group by ?src ?tgt
                    }
                  }
                  UNION
                  {
                    {
                      select ?src ?dt (COUNT(?src) as ?num) where {
                        {
                            select (group_concat(?src_c;separator=' ') as ?src)
                                   (SAMPLE(COALESCE(?dtype, xsd:string)) as ?dt) where {
                              $pattern
                              FILTER(!ISBLANK(?s) && ISLITERAL(?o))
                              ?s a ?src_c .
                              FILTER (!STRSTARTS(STR(?src_c), 'http://www.w3.org/2002/07/owl#'))
                              BIND(DATATYPE(?o) as ?dtype) .
                            } group by ?s LIMIT $limit
                        }
                      } group by ?src ?dt
                    }
                  }
                }
                """
        query_text = Template(type_query).substitute(
            pattern=self.filtered_graph_pattern(predicate), limit=limit)
        return query_text
Example #17
0
            alpha,
            "BETA":
            beta,
            "WRAP":
            wrap,
            "MALLOC":
            malloc,
            "SYNC": [0, 1][0 != sync],
            "JIT": [0, 1][0 != jit],
            "LIBXSMM_OFFLOAD_BUILD":
            ["", "\n#define LIBXSMM_OFFLOAD_BUILD"][0 != offload],
            "MNK_PREPROCESSOR_LIST":
            "",
        }

        template = Template(open(filename, "r").read())
        if fnmatch.fnmatch(filename, "*.h*"):
            if mnklist:
                first = mnklist[0]
            for mnk in mnklist:
                mnkstr = "_".join(map(str, mnk))
                if mnk != first:
                    substitute["MNK_PREPROCESSOR_LIST"] += "\n"
                if 2 != precision:
                    substitute["MNK_PREPROCESSOR_LIST"] += (
                        "#define LIBXSMM_SMM_" + mnkstr)
                if mnk != first or 0 == precision:
                    substitute["MNK_PREPROCESSOR_LIST"] += "\n"
                if 1 != precision:
                    substitute["MNK_PREPROCESSOR_LIST"] += (
                        "#define LIBXSMM_DMM_" + mnkstr)
Example #18
0
def getTemplate(input):
    return Template(str(input))
import smtplib
from email.message import EmailMessage
from pathlib import Path
from string import Template

html = Template(Path('index.html').read_text())

email = EmailMessage()
email['from'] = 'W3bh4ck'
email['to'] = '*****@*****.**'
email['subject'] = 'Email subject'

email.set_content(html.substitute({'name': 'uche'}), 'html')

with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:
	smtp.ehlo()
	smtp.starttls()
	smtp.login('source email', 'password')
	smtp.send_message(email)
	print('email sent')
def main(argv=None):
    parser = argparse.ArgumentParser()
    parser.add_argument('-t',
                        '--tag',
                        help='the SapMachine tag',
                        metavar='TAG',
                        required=True)
    parser.add_argument(
        '-d',
        '--dual',
        help='this is going to be a dual architecture cask (x64 and aarch64)',
        action='store_true',
        default=False)
    args = parser.parse_args()

    work_dir = join(os.getcwd(), 'cask_work')
    utils.remove_if_exists(work_dir)
    os.makedirs(work_dir)

    raw_tag = args.tag
    sapMachineTag = SapMachineTag.from_string(raw_tag)
    if sapMachineTag is None:
        print(str.format("Tag {0} seems to be invalid. Aborting...", args.tag))
        sys.exit(1)

    os_name = 'osx' if sapMachineTag.get_major() < 17 or (
        sapMachineTag.get_major() == 17 and sapMachineTag.get_update() is None
        and sapMachineTag.get_build_number() < 21) else 'macos'
    prerelease = not sapMachineTag.is_ga()
    if prerelease:
        jdk_cask_file_name = str.format('sapmachine{0}-ea-jdk.rb',
                                        sapMachineTag.get_major())
        jre_cask_file_name = str.format('sapmachine{0}-ea-jre.rb',
                                        sapMachineTag.get_major())
        cask_tag = str.format('{0}-ea', sapMachineTag.get_major())
        cask_version = str.format(
            '{0},{1}', sapMachineTag.get_version_string_without_build(),
            sapMachineTag.get_build_number())
        ruby_version = 'version.before_comma'
        ea_ext = '-ea.'
        url_version1 = '#{version.before_comma}%2B#{version.after_comma}'
        url_version2 = '#{version.before_comma}-ea.#{version.after_comma}'
    else:
        jdk_cask_file_name = str.format('sapmachine{0}-jdk.rb',
                                        sapMachineTag.get_major())
        jre_cask_file_name = str.format('sapmachine{0}-jre.rb',
                                        sapMachineTag.get_major())
        cask_tag = str.format('{0}', sapMachineTag.get_major())
        cask_version = str.format(
            '{0}', sapMachineTag.get_version_string_without_build())
        ruby_version = 'version'
        ea_ext = '.'
        url_version1 = '#{version}'
        url_version2 = '#{version}'

    if args.dual:
        try:
            aarch_jdk_sha_url, aarch_jre_sha_url = utils.get_asset_url(
                raw_tag, os_name + '-aarch64', '.sha256.dmg.txt')
            intel_jdk_sha_url, intel_jre_sha_url = utils.get_asset_url(
                raw_tag, os_name + '-x64', '.sha256.dmg.txt')
        except Exception as e:
            print('Not both platforms ready yet')
            sys.exit(0)

        aarch_jdk_sha, code1 = utils.download_asset(aarch_jdk_sha_url)
        aarch_jre_sha, code2 = utils.download_asset(aarch_jre_sha_url)
        intel_jdk_sha, code3 = utils.download_asset(intel_jdk_sha_url)
        intel_jre_sha, code4 = utils.download_asset(intel_jre_sha_url)
        if code1 != 200 or code2 != 200 or code3 != 200 or code4 != 200:
            print('Download failed')
            sys.exit(1)
        aarch_jdk_sha = aarch_jdk_sha.split(' ')[0]
        aarch_jre_sha = aarch_jre_sha.split(' ')[0]
        intel_jdk_sha = intel_jdk_sha.split(' ')[0]
        intel_jre_sha = intel_jre_sha.split(' ')[0]

        jdk_cask_content = Template(duplex_cask_template).substitute(
            CASK_TAG=cask_tag,
            IMAGE_TYPE='jdk',
            CASK_VERSION=cask_version,
            URL_VERSION1=url_version1,
            URL_VERSION2=url_version2,
            OS_NAME=os_name,
            INTELSHA256=intel_jdk_sha,
            AARCHSHA256=aarch_jdk_sha,
            RUBY_VERSION=ruby_version,
            EA_EXT=ea_ext)

        jre_cask_content = Template(duplex_cask_template).substitute(
            CASK_TAG=cask_tag,
            IMAGE_TYPE='jre',
            CASK_VERSION=cask_version,
            URL_VERSION1=url_version1,
            URL_VERSION2=url_version2,
            OS_NAME=os_name,
            INTELSHA256=intel_jre_sha,
            AARCHSHA256=aarch_jre_sha,
            RUBY_VERSION=ruby_version,
            EA_EXT=ea_ext)
    else:
        try:
            intel_jdk_sha_url, intel_jre_sha_url = utils.get_asset_url(
                raw_tag, os_name + '-x64', '.sha256.dmg.txt')
        except Exception as e:
            print('Asset not found')
            sys.exit(1)

        intel_jdk_sha, code1 = utils.download_asset(intel_jdk_sha_url)
        intel_jre_sha, code2 = utils.download_asset(intel_jre_sha_url)
        if code1 != 200 or code2 != 200:
            print('Download failed')
            sys.exit(1)
        intel_jdk_sha = intel_jdk_sha.split(' ')[0]
        intel_jre_sha = intel_jre_sha.split(' ')[0]
        try:
            intel_jdk_url, intel_jre_url = utils.get_asset_url(
                raw_tag, os_name + '-x64', '.dmg')
        except Exception as e:
            print('Asset not found')
            sys.exit(1)

        jdk_cask_content = Template(cask_template).substitute(
            CASK_TAG=cask_tag,
            IMAGE_TYPE='jdk',
            CASK_VERSION=cask_version,
            SHA256=intel_jdk_sha,
            URL_VERSION1=url_version1,
            URL_VERSION2=url_version2,
            OS_NAME=os_name,
            RUBY_VERSION=ruby_version,
            EA_EXT=ea_ext)

        jre_cask_content = Template(cask_template).substitute(
            CASK_TAG=cask_tag,
            IMAGE_TYPE='jre',
            CASK_VERSION=cask_version,
            SHA256=intel_jre_sha,
            URL_VERSION1=url_version1,
            URL_VERSION2=url_version2,
            OS_NAME=os_name,
            RUBY_VERSION=ruby_version,
            EA_EXT=ea_ext)

    homebrew_dir = join(work_dir, 'homebrew')
    utils.git_clone('github.com/SAP/homebrew-SapMachine', 'master',
                    homebrew_dir)

    jdk_replaced = replace_cask(jdk_cask_file_name, jdk_cask_content,
                                sapMachineTag, homebrew_dir)
    jre_replaced = replace_cask(jre_cask_file_name, jre_cask_content,
                                sapMachineTag, homebrew_dir)
    if jdk_replaced or jre_replaced:
        utils.git_push(homebrew_dir)
    utils.remove_if_exists(work_dir)

    return 0
Example #21
0
def make_nmonths_table_from_template(con,
                                     dataset,
                                     date_column,
                                     n_months,
                                     max_dist,
                                     template,
                                     load=False,
                                     columns='all'):
    '''
        Load inspections table matched with events that happened X months
        before. Returns pandas dataframe with the data loaded
    '''
    #Create a cursor
    cur = con.cursor()

    #Get the current schema
    cur.execute('SELECT current_schema;')
    current_schema = cur.fetchone()[0]

    #Build the table name
    table_name = ('insp2{dataset}_{n_months}months'
                  '_{max_dist}m').format(dataset=dataset,
                                         n_months=n_months,
                                         max_dist=max_dist)
    #Check if table already exists in current schema
    #If not, create it
    if table_name not in tables_in_schema(con, current_schema):
        logger.info(
            'Table {} does not exist... Creating it'.format(table_name))
        path_to_template = os.path.join(os.environ['ROOT_FOLDER'],
                                        'blight_risk_prediction', 'features',
                                        template)
        #Load template with SQL statement
        with open(path_to_template, 'r') as f:
            sql_script = Template(f.read())
        #Replace values in template
        sql_script = sql_script.substitute(TABLE_NAME=table_name,
                                           DATASET=dataset,
                                           DATE_COLUMN=date_column,
                                           N_MONTHS=n_months,
                                           MAX_DIST=max_dist)
        #Run the code using the connection
        #this is going to take a while
        cur.execute(sql_script)
        #Commit changes to db
        con.commit()

        #If table created has a geom column which type USER DEFINED,
        #delete it, we don't need it here
        cols = columns_for_table_in_schema(con, table_name, current_schema)
        if ('geom', 'USER-DEFINED') in cols:
            #Important: this is not prouction ready since it's
            #vulnerable to SQL injection, I haven't found any solution
            #to dynamically pass table names as parameters in psycopg2
            #it seems like the only solution is to prevent SQL injection
            #in the code
            q = ('ALTER TABLE {} DROP COLUMN geom').format(table_name)
            cur.execute(q)
            con.commit()
            logger.info('Table {} has a PostGIS column, deleting...'.format(
                table_name))
    else:
        logger.info('Table {} already exists. Skipping...'.format(table_name))

    cur.close()
    #Load data
    e = create_engine(uri)
    logger.info('Loading {} month table...'.format(table_name))
    if columns == 'all':
        #Since the table contains a geom column, you need to subselect columns
        #to load otherwise pandas will complain
        cols = columns_for_table_in_schema(con, table_name, current_schema)
        valid_cols = filter(lambda x: x[1] != 'USER-DEFINED', cols)
        cols_to_load = [x[0] for x in valid_cols]
    #If the user passed and array in the columns parameter, only
    #select those columns
    else:
        cols_to_load = columns

    if load:
        df = pd.read_sql_table(table_name,
                               e,
                               schema=current_schema,
                               columns=cols_to_load)
        return df
Example #22
0
                    help="password for exchange/outlook",
                    required=True)
parser.add_argument("-d",
                    "--deep",
                    help="Attemp a deep search (takes longer).",
                    action="store_true")
args = parser.parse_args()

url = args.url
user = args.user
#password = getpass.getpass("Password:"******"&&Dec2016"

xml_template = open("resolvenames_template.xml", "r").read()
xml = Template(xml_template)

rooms = {}

for prefix in args.prefix:
    rooms.update(findRooms(prefix))
    print "After searching for prefix '" + prefix + "' we found " + str(
        len(rooms)) + " rooms."

    deep = args.deep

    if deep:
        symbols = letters + digits
        for symbol in symbols:
            prefix_deep = prefix + " " + symbol
            rooms.update(findRooms(prefix_deep))
Example #23
0
    def process_input(self):
        # type: () -> None
        """ Process Input: Processes input video(s) and generates output as per CLI commands.

        Run after all command line options/sub-commands have been parsed.
        """
        logging.debug('Processing input...')
        if not self.options_processed:
            logging.debug('Skipping processing, CLI options were not parsed successfully.')
            return
        self.check_input_open()
        assert self.scene_manager.get_num_detectors() >= 0
        if self.scene_manager.get_num_detectors() == 0:
            logging.error(
                'No scene detectors specified (detect-content, detect-threshold, etc...),\n'
                ' or failed to process all command line arguments.')
            return

        # Handle scene detection commands (detect-content, detect-threshold, etc...).
        self.video_manager.start()
        base_timecode = self.video_manager.get_base_timecode()

        start_time = time.time()
        logging.info('Detecting scenes...')

        num_frames = self.scene_manager.detect_scenes(
            frame_source=self.video_manager, frame_skip=self.frame_skip,
            show_progress=not self.quiet_mode)

        # Handle case where video fails with multiple audio tracks (#179).
        # TODO: Is there a fix for this? See #179.
        if num_frames <= 0:
            logging.critical('\n'.join([
                'Failed to read any frames from video file. This could be caused'
                ' by the video having multiple audio tracks. If so, please try'
                ' removing the audio tracks or muxing to mkv via:'
                '      ffmpeg -i input.mp4 -c copy -an output.mp4'
                'or:'
                '      mkvmerge -o output.mkv input.mp4'
                ' For details, see https://pyscenedetect.readthedocs.io/en/latest/faq/']))
            return

        duration = time.time() - start_time
        logging.info('Processed %d frames in %.1f seconds (average %.2f FPS).',
                     num_frames, duration, float(num_frames)/duration)

        # Handle -s/--statsfile option.
        if self.stats_file_path is not None:
            if self.stats_manager.is_save_required():
                with open(self.stats_file_path, 'wt') as stats_file:
                    logging.info('Saving frame metrics to stats file: %s',
                                 os.path.basename(self.stats_file_path))
                    self.stats_manager.save_to_csv(
                        stats_file, base_timecode)
            else:
                logging.debug('No frame metrics updated, skipping update of the stats file.')

        # Get list of detected cuts and scenes from the SceneManager to generate the required output
        # files with based on the given commands (list-scenes, split-video, save-images, etc...).
        cut_list = self.scene_manager.get_cut_list(base_timecode)
        scene_list = self.scene_manager.get_scene_list(base_timecode)
        video_paths = self.video_manager.get_video_paths()
        video_name = os.path.basename(video_paths[0])
        if video_name.rfind('.') >= 0:
            video_name = video_name[:video_name.rfind('.')]

        # Ensure we don't divide by zero.
        if scene_list:
            logging.info('Detected %d scenes, average shot length %.1f seconds.',
                         len(scene_list),
                         sum([(end_time - start_time).get_seconds()
                              for start_time, end_time in scene_list]) / float(len(scene_list)))
        else:
            logging.info('No scenes detected.')

        # Handle list-scenes command.
        if self.scene_list_output:
            scene_list_filename = Template(self.scene_list_name_format).safe_substitute(
                VIDEO_NAME=video_name)
            if not scene_list_filename.lower().endswith('.csv'):
                scene_list_filename += '.csv'
            scene_list_path = get_and_create_path(
                scene_list_filename,
                self.scene_list_directory if self.scene_list_directory is not None
                else self.output_directory)
            logging.info('Writing scene list to CSV file:\n  %s', scene_list_path)
            with open(scene_list_path, 'wt') as scene_list_file:
                write_scene_list(scene_list_file, scene_list, cut_list)
        # Handle `list-scenes`.
        if self.print_scene_list:
            logging.info("""Scene List:
-----------------------------------------------------------------------
 | Scene # | Start Frame |  Start Time  |  End Frame  |   End Time   |
-----------------------------------------------------------------------
%s
-----------------------------------------------------------------------
""", '\n'.join(
    [' |  %5d  | %11d | %s | %11d | %s |' % (
        i+1,
        start_time.get_frames(), start_time.get_timecode(),
        end_time.get_frames(), end_time.get_timecode())
     for i, (start_time, end_time) in enumerate(scene_list)]))


        if cut_list:
            logging.info('Comma-separated timecode list:\n  %s',
                         ','.join([cut.get_timecode() for cut in cut_list]))

        # Handle save-images command.
        if self.save_images:
            self._generate_images(scene_list=scene_list, video_name=video_name,
                                  image_name_template=self.image_name_format,
                                  output_dir=self.image_directory)

        # Handle export-html command.
        if self.export_html:
            html_filename = Template(self.html_name_format).safe_substitute(
                VIDEO_NAME=video_name)
            if not html_filename.lower().endswith('.html'):
                html_filename += '.html'
            html_path = get_and_create_path(
                html_filename,
                self.image_directory if self.image_directory is not None
                else self.output_directory)
            logging.info('Exporting to html file:\n %s:', html_path)
            if not self.html_include_images:
                self.image_filenames = None
            write_scene_list_html(html_path, scene_list, cut_list,
                                  image_filenames=self.image_filenames,
                                  image_width=self.image_width,
                                  image_height=self.image_height)

        # Handle split-video command.
        if self.split_video:
            # Add proper extension to filename template if required.
            dot_pos = self.split_name_format.rfind('.')
            if self.split_mkvmerge and not self.split_name_format.endswith('.mkv'):
                self.split_name_format += '.mkv'
            # Don't add if we find an extension between 2 and 4 characters
            elif not (dot_pos >= 0) or (
                    dot_pos >= 0 and not
                    ((len(self.split_name_format) - (dot_pos+1) <= 4 >= 2))):
                self.split_name_format += '.mp4'

            output_file_prefix = get_and_create_path(
                self.split_name_format,
                self.split_directory if self.split_directory is not None
                else self.output_directory)
            mkvmerge_available = is_mkvmerge_available()
            ffmpeg_available = is_ffmpeg_available()
            if mkvmerge_available and (self.split_mkvmerge or not ffmpeg_available):
                if not self.split_mkvmerge:
                    logging.warning(
                        'ffmpeg not found, falling back to fast copy mode (split-video -c/--copy).')
                split_video_mkvmerge(video_paths, scene_list, output_file_prefix, video_name,
                                     suppress_output=self.quiet_mode or self.split_quiet)
            elif ffmpeg_available:
                if self.split_mkvmerge:
                    logging.warning('mkvmerge not found, falling back to normal splitting'
                                    ' mode (split-video).')
                split_video_ffmpeg(video_paths, scene_list, output_file_prefix,
                                   video_name, arg_override=self.split_args,
                                   hide_progress=self.quiet_mode,
                                   suppress_output=self.quiet_mode or self.split_quiet)
            else:
                if not (mkvmerge_available or ffmpeg_available):
                    error_strs = ["ffmpeg/mkvmerge is required for split-video [-c/--copy]."]
                else:
                    error_strs = [
                        "{EXTERN_TOOL} is required for split-video{EXTRA_ARGS}.".format(
                            EXTERN_TOOL='mkvmerge' if self.split_mkvmerge else 'ffmpeg',
                            EXTRA_ARGS=' -c/--copy' if self.split_mkvmerge else '')]
                error_strs += ["Install one of the above tools to enable the split-video command."]
                error_str = '\n'.join(error_strs)
                logging.debug(error_str)
                raise click.BadParameter(error_str, param_hint='split-video')
            if scene_list:
                logging.info('Video splitting completed, individual scenes written to disk.')
Example #24
0
_JVPP_IMPL_TEMPLATE = Template("""package $plugin_package;

import java.io.IOException;
import java.io.InputStream;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.StandardCopyOption;
import java.nio.file.attribute.PosixFilePermission;
import java.nio.file.attribute.PosixFilePermissions;
import java.util.Set;
import java.util.logging.Logger;
import java.util.logging.Level;
import io.fd.jvpp.callback.JVppCallback;
import io.fd.jvpp.VppConnection;
import io.fd.jvpp.JVppRegistry;

/**
 * <p>Default implementation of JVpp interface.
 * <br>It was generated by jvpp_impl_gen.py based on $json_filename.
 * <br>(python representation of api file generated by vppapigen)
 */
public final class JVpp${plugin_name}Impl implements $plugin_package.JVpp${plugin_name} {

    private final static Logger LOG = Logger.getLogger(JVpp${plugin_name}Impl.class.getName());
    private static final java.lang.String LIBNAME = "libjvpp_${plugin_name_underscore}.so";

    // FIXME using NativeLibraryLoader makes load fail could not find (WantInterfaceEventsReply).
    static {
        try {
            loadLibrary();
        } catch (Exception e) {
            LOG.severe("Can't find jvpp jni library: " + LIBNAME);
            throw new ExceptionInInitializerError(e);
        }
    }

    private static void loadStream(final InputStream is) throws IOException {
        final Set<PosixFilePermission> perms = PosixFilePermissions.fromString("rwxr-x---");
        final Path p = Files.createTempFile(LIBNAME, null, PosixFilePermissions.asFileAttribute(perms));
        try {
            Files.copy(is, p, StandardCopyOption.REPLACE_EXISTING);

            try {
                Runtime.getRuntime().load(p.toString());
            } catch (UnsatisfiedLinkError e) {
                throw new IOException("Failed to load library " + p, e);
            }
        } finally {
            try {
                Files.deleteIfExists(p);
            } catch (IOException e) {
            }
        }
    }

    private static void loadLibrary() throws IOException {
        try (final InputStream is = JVpp${plugin_name}Impl.class.getResourceAsStream('/' + LIBNAME)) {
            if (is == null) {
                throw new IOException("Failed to open library resource " + LIBNAME);
            }
            loadStream(is);
        }
    }

    private VppConnection connection;
    private JVppRegistry registry;

    private static native void init0(final JVppCallback callback, final long queueAddress, final int clientIndex);
    @Override
    public void init(final JVppRegistry registry, final JVppCallback callback, final long queueAddress, final int clientIndex) {
        this.registry = java.util.Objects.requireNonNull(registry, "registry should not be null");
        this.connection = java.util.Objects.requireNonNull(registry.getConnection(), "connection should not be null");
        connection.checkActive();
        init0(callback, queueAddress, clientIndex);
    }

    private static native void close0();
    @Override
    public void close() {
        close0();
    }

    @Override
    public int send(io.fd.jvpp.dto.JVppRequest request) throws io.fd.jvpp.VppInvocationException {
        return request.send(this);
    }

    @Override
    public final int controlPing(final io.fd.jvpp.dto.ControlPing controlPing) throws io.fd.jvpp.VppInvocationException {
        return registry.controlPing(JVpp${plugin_name}Impl.class);
    }
$methods
}
""")
Example #25
0
 def generate_docstrings_h(self):
     template = Template('extern char* $name;')
     return '\n\n'.join(
         template.substitute(name=decl['docstring_var'])
         for decl in chain(self.declarations, self.stateless_declarations)
         if 'docstring_var' in decl)
Example #26
0
class Neo4jClient:
    def __init__(self, neo4j_url, user, password):
        self.neo4j_url = neo4j_url
        self.neo4j_user = user
        self.neo4j_password = password
        self.neo4j_driver = None

    def connect(self):
        self.neo4j_driver = GraphDatabase.driver(self.neo4j_url,
                                                 auth=basic_auth(
                                                     self.neo4j_user,
                                                     self.neo4j_password))
        return self.neo4j_driver

    def run_in_tx(self, batch, chunk_count=None, dry_run=False):
        if not chunk_count:
            chunk_count = sys.maxsize
        if isinstance(batch, Generator):
            it = batch
        elif isinstance(batch, Iterable):

            def gen():
                for j in batch:
                    yield j

            it = gen()
        else:
            err = "batch_job must be iterable or callable but {0} passed"
            err = err.format(type(batch))
            logger.error(err)
            raise ValueError(err)
        if dry_run:
            return list(it)
        session = self.neo4j_driver.session()
        try:
            result_set = []
            consumed_result = None
            more_chunks = True
            while more_chunks:
                logger.debug('neo4j transaction beginning')
                with session.begin_transaction() as tx:
                    chunk_i = 0
                    try:
                        while chunk_i < chunk_count:
                            # noinspection PyNoneFunctionAssignment
                            query, params = it.send(consumed_result)
                            logger.debug(
                                'chunk %s will run query %s'
                                'in transaction', chunk_i, query)
                            result = tx.run(query, params)
                            consumed_result = list(result)
                            result_set.append(consumed_result)
                            chunk_i += 1
                    except StopIteration:
                        more_chunks = False
                    tx.success = True
                logger.debug('neo4j transaction committed')
            return result_set
        finally:
            session.close()

    import_nodes_template = Template("UNWIND {props} AS map "
                                     "CREATE (n$labels) SET n = map")

    def import_nodes(self,
                     nodes: List[dict],
                     labels: List[str] = None,
                     chunk_size: int = 1000):
        node_labels = ':{0}'.format(':'.join(labels)) \
            if labels else ''
        query = self.import_nodes_template.safe_substitute(labels=node_labels)

        chunk_count = 1

        def batch():
            for i in range(0, len(nodes), chunk_size):
                logger.debug('starting chunk %s', i)
                result = (yield query, dict(props=nodes[i:i + chunk_size]))
                logger.debug(result)

        result = self.run_in_tx(batch(), chunk_count=chunk_count)
        return result

    def run_batch_query(self,
                        query: str,
                        labels: List[str] = None,
                        params: List[dict] = None,
                        chunk_size: int = 1000):
        node_labels = ':{0}'.format(':'.join(labels)) \
            if labels else ''
        query_template = Template("UNWIND {params} AS params " + query)
        labeled_query = query_template.safe_substitute(labels=node_labels)

        chunk_count = 1

        def batch():
            for i in range(0, len(params), chunk_size):
                logger.debug('starting chunk %s', i)
                result = (yield labeled_query,
                          dict(params=params[i:i + chunk_size]))
                logger.debug(result)

        result = self.run_in_tx(batch(), chunk_count=chunk_count)
        return result

    def run_query(self,
                  query: str,
                  labels: List[str] = None,
                  params: dict = None) -> List:
        node_labels = ':{0}'.format(':'.join(labels)) \
            if labels else ''
        query_template = Template(query)
        labeled_query = query_template.safe_substitute(labels=node_labels)
        logger.debug('will run query %s', labeled_query)

        def batch():
            yield labeled_query, params

        result = self.run_in_tx(batch(), chunk_count=1)
        return result[0]

    def set_neo4j_password(self, new_password):
        """
        sets new password for the neo4j server updating the server

        :param new_password: new password to set
        :return: boolean: True if update is successful

        An excerpt from neo4j documentation on password change API:

        Changing the user password
        Given that you know the current password, you can ask the server to
        change a users password.
        You can choose any password you like, as long as it is different from
        the current password.

        Example request

        POST http://localhost:7474/user/neo4j/password
        Accept: application/json; charset=UTF-8
        Authorization: Basic bmVvNGo6bmVvNGo=
        Content-Type: application/json
        {
          "password" : "secret"
        }
        Example response

        200: OK
        """

        value_error_msg = ''

        if new_password == self.neo4j_password:
            value_error_msg = "New password must not equal old password"
        elif not new_password:
            value_error_msg = "New password must not be empty"

        if value_error_msg:
            raise ValueError(value_error_msg)

        url = "{host_url}/user/{user_name}/password".format(
            host_url=self.neo4j_url, user_name=self.neo4j_user)
        headers = {
            "Accept": "application/json; charset=UTF-8",
            "Content-Type": "application/json"
        }
        payload = {'password': new_password}
        r = requests.post(url,
                          auth=(self.neo4j_user, self.neo4j_password),
                          headers=headers,
                          json=payload)
        if r.ok:
            self.neo4j_password = new_password
            return True
        else:
            r.raise_for_status()
 def appendline(self, list, text, param, value=None):
     if value is None:
         val = ""
     else:
         val = self.crepr(param, param[value])
     list.append(Template('${doline} $srcline "$srcfile"\n      ' + text).safe_substitute(param, v=val, doline=LINEDEBUG, configname=self.name))
Example #28
0
psi4_cc_template = Template('''#! Psi4 input generated by QSoME
memory $MEMORY MB  # total memory not per thread memory for parallel jobs

molecule acrolein {
  $CHARGE $SPIN
# acrolein geometry from MD snapshots 
  $GEOM
  units angstrom # default in psi4
  symmetry c1    # no symmetry with embedding potential
  noreorient     # prevent reorienting molecules 
  nocom          # prevent recentering molecules 
}

set {
  freeze_core false 
  #df_basis_scf aug-cc-pvdz-jkfit
  #df_basis_cc aug-cc-pvdz-ri
  #scf_type df
  #cc_type df
  scf_type pk
  CC_NUM_THREADS $NCORE
}

basis {
# generated by PySCF 
$BASIS_TYPE
$BASIS
}

energy('$METHOD')
''')
Example #29
0
import os
from string import Template

# Generate conf/nutcracker.yml

target_port = os.environ['TARGET_PORT']
redis_host = os.environ['REDIS_HOST']
redis_port = os.environ.get('REDIS_PORT', 6379)
redis_db = os.environ.get('REDIS_DB', 0)
redis_auth = os.environ['REDIS_AUTH']

nutcracker_yml_tmpl = open(
    '/usr/src/twemproxy/conf/templates/nutcracker.yml.tmpl', 'r').read()
nutcracker_yml_body = (Template(nutcracker_yml_tmpl).substitute(
    target_port=target_port,
    redis_host=redis_host,
    redis_port=redis_port,
    redis_db=redis_db,
    redis_auth=redis_auth))
nutcracker_yml = open('/usr/src/twemproxy/conf/nutcracker.yml', 'w')
nutcracker_yml.write(nutcracker_yml_body)
nutcracker_yml.close()
from string import Template

# The format uses placeholder names formed by $ with valid Python identifiers
#  (alphanumeric characters and underscores).
# Surrounding the placeholder with braces allows it to be followed
# by more alphanumeric letters with no intervening spaces.
# Writing $$ creates a single escaped $:
t = Template('${village}folks send $$10 to $cause.')

x = t.substitute(village='Nottingham', cause='the ditch fund')

print(x)

t = Template('Return the $item to $owner.')
d = dict(item='unladen swallow')
#  this will raise KeyError: 'owner' exception
# t.substitute(d)

t.safe_substitute(d)  # will return 'Return the unladen swallow to $owner.'