Esempio n. 1
0
 def __str__(self):
     if self.left or self.right:
         return '{}({},\n{},\n{}\n)'.format(type(self).__name__,
                 self.data,
                 textwrap.indent(str(self.left), ' '*4),
                 textwrap.indent(str(self.right), ' '*4))
     return repr(self)
Esempio n. 2
0
def print_scrape_results_http(results):
    """Print the results obtained by "http" method.

    Args:
        results: The results to be printed to stdout.
    """
    for t in results:
        for result in t:
            logger.info('{} links found. The search with the keyword "{}" yielded the result: "{}"'.format(
                len(result['results']), result['search_keyword'], result['num_results_for_kw']))
            import textwrap
            for result_set in ('results', 'ads_main', 'ads_aside'):
                if result_set in result.keys():
                    print('### {} link results for "{}" ###'.format(len(result[result_set]), result_set))
                    for link_title, link_snippet, link_url, link_position in result[result_set]:
                        try:
                            print('  Link: {}'.format(unquote(link_url.geturl())))
                        except AttributeError as ae:
                            print(ae)
                        if Config['GLOBAL'].getint('verbosity') > 1:
                            print(
                                '  Title: \n{}'.format(textwrap.indent('\n'.join(textwrap.wrap(link_title, 50)), '\t')))
                            print(
                                '  Description: \n{}\n'.format(
                                    textwrap.indent('\n'.join(textwrap.wrap(link_snippet, 70)), '\t')))
                            print('*' * 70)
                            print()
Esempio n. 3
0
    def _format_loop_exception(self, context, n):
        message = context.get('message', 'Unhandled exception in event loop')
        exception = context.get('exception')
        if exception is not None:
            exc_info = (type(exception), exception, exception.__traceback__)
        else:
            exc_info = None

        lines = []
        for key in sorted(context):
            if key in {'message', 'exception'}:
                continue
            value = context[key]
            if key == 'source_traceback':
                tb = ''.join(traceback.format_list(value))
                value = 'Object created at (most recent call last):\n'
                value += tb.rstrip()
            else:
                try:
                    value = repr(value)
                except Exception as ex:
                    value = ('Exception in __repr__ {!r}; '
                             'value type: {!r}'.format(ex, type(value)))
            lines.append('[{}]: {}\n\n'.format(key, value))

        if exc_info is not None:
            lines.append('[exception]:\n')
            formatted_exc = textwrap.indent(
                ''.join(traceback.format_exception(*exc_info)), '  ')
            lines.append(formatted_exc)

        details = textwrap.indent(''.join(lines), '    ')
        return '{:02d}. {}:\n{}\n'.format(n, message, details)
Esempio n. 4
0
def training_desc(info, verbosity, precision):
    """
    Textifies a single training history record. Returns a list of lines.
    """
    desc = [
        "Date: {}".format(date_desc(info['date'])),
        "Bot: {bot}".format(**info) +
                ", Trainer: {trainer} {config}".format(**info),
        "Dists: {}".format(indent(dists_desc(info['dists']), " " * 7).strip()),
        "Seeds: {}".format(seeds_desc(info['seeds'], verbosity)),
        "Level: {}".format(level_desc(info['level'])) +
                ", Runs: {}".format(info['runs']) +
                ", Time: {}".format(time_desc(info['time'], precision)),
        "Output: {output}, PRNGs: {prngs_seed}".format(**info) +
                ", Scores: {}".format(scores_desc(info['scores'], verbosity,
                                                  precision))
    ]
    if info['phases'] is not None:
        desc.insert(3, "Phases: {}".format(
                                    phases_desc(info['phases'], precision)))
    if info['emphases']:
        desc.insert(3, "Emphases: {}".format(emphases_desc(info['emphases'])))
    if info['param_scale']:
        desc.insert(5, "Scaled params: {}".format(
                                        param_scale_desc(info['param_scale'])))
    if info['param_freeze']:
        desc.insert(5, "Frozen params: {}".format(
                                        ", ".join(info['param_freeze'])))
    if info['param_map']:
        desc.insert(5, "Params map: {}".format(
                indent(param_map_desc(info['param_map']), " " * 12).strip()))
    return desc
Esempio n. 5
0
    def write_options_group(self, write, group):
        def is_positional_group(group):
            return any(not o.option_strings for o in group._group_actions)

        if is_positional_group(group):
            for option in group._group_actions:
                write(option.metavar)
                write(textwrap.indent(option.help or '', ' ' * 4))
            return

        opts = OrderedDict()

        for option in group._group_actions:
            if option.metavar:
                option_fmt = '%s ' + option.metavar
            else:
                option_fmt = '%s'
            option_str = ', '.join(option_fmt % s for s in option.option_strings)
            option_desc = textwrap.dedent((option.help or '') % option.__dict__)
            opts[option_str] = textwrap.indent(option_desc, ' ' * 4)

        padding = len(max(opts)) + 1

        for option, desc in opts.items():
            write(option.ljust(padding), desc)
Esempio n. 6
0
    def write_options_group(self, group, fp, with_title=True, base_indent=4):
        def is_positional_group(group):
            return any(not o.option_strings for o in group._group_actions)

        indent = ' ' * base_indent

        if is_positional_group(group):
            for option in group._group_actions:
                fp.write(option.metavar + '\n')
                fp.write(textwrap.indent(option.help or '', ' ' * base_indent) + '\n')
            return

        if not group._group_actions:
            return

        if with_title:
            fp.write('\n\n')
            fp.write(group.title + '\n')

        opts = OrderedDict()

        for option in group._group_actions:
            if option.metavar:
                option_fmt = '%s ' + option.metavar
            else:
                option_fmt = '%s'
            option_str = ', '.join(option_fmt % s for s in option.option_strings)
            option_desc = textwrap.dedent((option.help or '') % option.__dict__)
            opts[option_str] = textwrap.indent(option_desc, ' ' * 4)

        padding = len(max(opts)) + 1

        for option, desc in opts.items():
            fp.write(indent + option.ljust(padding) + desc + '\n')
Esempio n. 7
0
   def play(self):
      selection = None
      current_sleep = self.start
      current_round = 0

      while True:
         selection = choice_unduplicated(available_notes, selection)
         octave, note = selection.split('.')

         flute = getFlute(notes[selection])
         figlet_note = figlet('{}  {}'.format(octave, note))

         header = ' Interval: {:.2f} | Sleep: {:.2f} | Round {:d}/{:d}'.format(
            self.step, current_sleep, current_round, self.rounds)

         flute_padded = indent(flute, ' ' * 14)
         figlet_width = len(max(figlet_note.splitlines(), key=len))
         note_padded = indent(figlet_note, ' ' * (17 - int(figlet_width/2)))

         system('clear')
         print(header, "\n\n", note_padded, flute_padded, sep='')

         sleep(current_sleep)

         current_round += 1

         if (current_round >= self.rounds):
            current_round = 0
            current_sleep -= self.step

         if (current_sleep <= self.stop):
            break
Esempio n. 8
0
    def check():
        name = f.__name__
        message = ""
        if f.__doc__:
            message = "\n" + textwrap.indent(f.__doc__, '    """ ')
        try:
            f()
            return True
        except AssertionError as e:
            if e.args:
                message = e.args[0].strip()
            exception_class, exception, trace = sys.exc_info()
            frames = traceback.extract_tb(trace)
            last = frames[len(frames) - 1]

            message_hr = textwrap.indent(message, "    ")

            assertion = "{3}".format(*last)
            position = "{0}:{1}".format(*last)

            report("{} ({}):".format(name, position))
            if message_hr:
                report("    --------------------------------")
                report("{}".format(message_hr))
                report("    --------------------------------")
            report("    {}".format(assertion))
            report("")
            return False
        except Exception as e:
            report_exc("{}:{}".format(name, message), traceback.format_exc())
            return False
Esempio n. 9
0
def print_scrape_results_http(results, verbosity=1, view=False):
    """Print the results obtained by "http" method."""
    for t in results:
        for result in t:
            logger.info('{} links found! The search with the keyword "{}" yielded the result:{}'.format(
                len(result['results']), result['search_keyword'], result['num_results_for_kw']))
            if view:
                import webbrowser
                webbrowser.open(result['cache_file'])
            import textwrap

            for result_set in ('results', 'ads_main', 'ads_aside'):
                if result_set in result.keys():
                    print('### {} link results for "{}" ###'.format(len(result[result_set]), result_set))
                    for link_title, link_snippet, link_url, link_position in result[result_set]:
                        try:
                            print('  Link: {}'.format(urllib.parse.unquote(link_url.geturl())))
                        except AttributeError as ae:
                            print(ae)
                        if verbosity > 1:
                            print(
                                '  Title: \n{}'.format(textwrap.indent('\n'.join(textwrap.wrap(link_title, 50)), '\t')))
                            print(
                                '  Description: \n{}\n'.format(
                                    textwrap.indent('\n'.join(textwrap.wrap(link_snippet, 70)), '\t')))
                            print('*' * 70)
                            print()
Esempio n. 10
0
def galaxy(_galaxy):
    systems = []
    debug('showing the galaxy...')
    for c, s in _galaxy._systems.items():
        sys = indent(system(s), '  ')[2:]
        systems.append('{} {}'.format(c, sys))
    return 'Galaxy:\n{}'.format(indent('\n'.join(systems), '  '))
Esempio n. 11
0
def check_negative(dirname, results, expected):
    """Checks the results with the expected results."""

    inputs  = (set(results['inputs']),  set(expected['!inputs']))
    outputs = (set(results['outputs']), set(expected['!outputs']))

    if not inputs[1].isdisjoint(inputs[0]):
        print(bcolors.ERROR + ': Found inputs that should not exist:')
        s = pprint.pformat(inputs[1] & inputs[1], width=1)
        print(textwrap.indent(s, '       '))
        return False

    if not outputs[1].isdisjoint(outputs[0]):
        print(bcolors.ERROR + ': Found outputs that should not exist:')
        s = pprint.pformat(outputs[1] & outputs[1], width=1)
        print(textwrap.indent(s, '       '))
        return False

    existing = [p for p in (os.path.join(dirname, p) for p in outputs[1])
                if os.path.exists(p)]

    if existing:
        print(bcolors.ERROR + ': Outputs exist on from file system, but should not:')
        pprint.pprint(existing)
        return False

    return True
Esempio n. 12
0
def check_positive(dirname, results, expected):
    """Checks the results with the expected results."""

    inputs  = (set(results['inputs']),  set(expected['inputs']))
    outputs = (set(results['outputs']), set(expected['outputs']))

    if not inputs[1].issubset(inputs[0]):
        print(bcolors.ERROR + ': Expected inputs are not a subset of the results.')
        print('       The following were not found in the results:')
        s = pprint.pformat(inputs[1] - inputs[0], width=1)
        print(textwrap.indent(s, '       '))
        print('       Instead, these were found:')
        s = pprint.pformat(inputs[0], width=1)
        print(textwrap.indent(s, '       '))
        return False

    if not outputs[1].issubset(outputs[0]):
        print(bcolors.ERROR + ': Expected outputs are not a subset of the results')
        print('       The following were not found in the results:')
        s = pprint.pformat(outputs[1] - outputs[0], width=1)
        print(textwrap.indent(s, '       '))
        print('       Instead, these were found:')
        s = pprint.pformat(outputs[0], width=1)
        print(textwrap.indent(s, '       '))
        return False

    missing = [p for p in (os.path.join(dirname, p) for p in outputs[0])
                if not os.path.exists(p)]

    if missing:
        print(bcolors.ERROR + ': Result outputs missing from file system:')
        pprint.pprint(missing)
        return False

    return True
	def pretty_print(self,m=True,f=False):
		#Print the files and folders under root, with their memory usage if m(with_memory)=True
		#Only print folders is f(folders_only)=True
		base=3
		width=self.depth*base+50
		DFS=[self.root]
		while DFS:
			cur_node=DFS.pop()
			indent_size=cur_node.dist*base
			name=os.path.basename(cur_node.name)
			star_size=width-indent_size-len(name)
			try:
				if m:
					print(textwrap.indent(name,''.join([' ']*indent_size)),''.join(['.']*star_size),cur_node.memsize)
				else:
					print(textwrap.indent(name,''.join([' ']*indent_size)))
			except UnicodeEncodeError as err:
				logging.debug(err)
			for k in cur_node.kiddir:
				DFS.append(k)
			if not f:
				for k in cur_node.kidfile:
					indent_size=k.dist*base
					name=os.path.basename(k.name)
					star_size=width-indent_size-len(name)
					try:
						if m:
							print(textwrap.indent(name,''.join([' ']*indent_size)),''.join(['.']*star_size),k.memsize)
						else:
							print(textwrap.indent(name,''.join([' ']*indent_size)))
					except UnicodeEncodeError as err:
						print ('Handling unicode encode error',err)
Esempio n. 14
0
 def __str__(self):
     o = self.fmt.format(**self.__dict__)
     for k, l in self.lines.items():
         o += textwrap.indent(str(l), prefix=k + ' ')
     for txt in self.texts:
         o += 'with string\n'
         o += textwrap.indent(str(txt), prefix='    ')
     return o
Esempio n. 15
0
def build_cpp_api_main(outputdir, rst_header, components):
    """Parse existing rst files (one for each class,
    + those for functions) generated for C++ API
    and collect them into  cpp_api.rst
    in sphinx/reference directory.

    Parameters
    ----------
    outputdir : Path()
         sphinx directory which contains rst files
         generated for the api (e.g. by doxy2swig)
    rst_header : string
         text to put on top of the python_api file.
    """

    mainrst_filename = Path(outputdir, 'cpp_api.rst')
    # list documented (cpp) packages
    docpp_dir = Path(outputdir, 'cpp')
    packages = [f for f in docpp_dir.glob('*')]
    packages = [p.name for p in packages if os.listdir(p)]
    # trick to print components in the expected order.
    packages = [p for p in components if p in packages]
    indent = 4 * ' '
    class_diag = 'Class diagrams (UML view)'
    class_diag += '\n' + len(class_diag) * '=' + '\n\n'
    class_diag += ':doc:`/reference/class_diagrams`\n\n'

    with open(mainrst_filename, 'w') as f:
        label = '.. _siconos_cpp_reference:\n\n\n'
        title = 'Siconos C/C++ API reference'
        title += '\n' + len(title) * '#' + '\n\n'
        title += 'This is the documentation of C/C++ interface to Siconos.\n\n\n'
        f.write(label)
        f.write(title)
        f.write(rst_header)
        tab_directive = '.. csv-table::\n'
        tab_directive += textwrap.indent(':widths: 60 40\n\n', indent)
        column_titles = '**Classes and structs**, **Files**\n'
        tab_directive += textwrap.indent(column_titles, indent)

        f.write(class_diag)
        for p in packages:
            title = p.title() + ' component\n'
            title += len(title) * '=' + '\n\n'
            ppath = 'cpp/' + p
            f.write(title)
            pgm_listings = 'Check :ref:`' + p + '_pgm_listings`'
            pgm_listings += ' for a complete list of headers for this component.'
            f.write(pgm_listings + '\n\n')
            #f.write(tab_directive)
            directive = '.. include:: ' + ppath + '/autodoc_classes.rst'
            directive += '\n'#','
            directive += '.. include:: ' + ppath + '/autodoc_files.rst\n'
            indent = ''
            f.write(textwrap.indent(directive, indent))
            f.write('\n')
Esempio n. 16
0
File: git.py Progetto: jkloetzke/bob
    def status(self, workspacePath):
        status = ScmStatus()
        try:
            onCorrectBranch = False
            output = self.callGit(workspacePath, 'ls-remote' ,'--get-url')
            if output != self.__url:
                status.add(ScmTaint.switched,
                    "> URL: configured: '{}', actual: '{}'".format(self.__url, output))

            if self.__commit:
                output = self.callGit(workspacePath, 'rev-parse', 'HEAD')
                if output != self.__commit:
                    status.add(ScmTaint.switched,
                        "> commit: configured: '{}', actual: '{}'".format(self.__commit, output))
            elif self.__tag:
                output = self.callGit(workspacePath, 'tag', '--points-at', 'HEAD').splitlines()
                if self.__tag not in output:
                    actual = ("'" + ", ".join(output) + "'") if output else "not on any tag"
                    status.add(ScmTaint.switched,
                        "> tag: configured: '{}', actual: '{}'".format(self.__tag, actual))
            elif self.__branch:
                output = self.callGit(workspacePath, 'rev-parse', '--abbrev-ref', 'HEAD')
                if output != self.__branch:
                    status.add(ScmTaint.switched,
                        "> branch: configured: '{}', actual: '{}'".format(self.__branch, output))
                else:
                    output = self.callGit(workspacePath, 'log', '--oneline',
                        'refs/remotes/origin/'+self.__branch+'..HEAD')
                    if output:
                        status.add(ScmTaint.unpushed_main,
                            joinLines("> unpushed commits on {}:".format(self.__branch),
                                indent(output, '   ')))
                    onCorrectBranch = True

            # Check for modifications wrt. checked out commit
            output = self.callGit(workspacePath, 'status', '--porcelain')
            if output:
                status.add(ScmTaint.modified, joinLines("> modified:",
                    indent(output, '   ')))

            # The following shows all unpushed commits reachable by any ref
            # (local branches, stash, detached HEAD, etc).
            # Exclude HEAD if the configured branch is checked out to not
            # double-count them. Does not mark the SCM as dirty.
            what = ['--all', '--not', '--remotes']
            if onCorrectBranch: what.append('HEAD')
            output = self.callGit(workspacePath, 'log', '--oneline', '--decorate',
                *what)
            if output:
                status.add(ScmTaint.unpushed_local,
                    joinLines("> unpushed local commits:", indent(output, '   ')))

        except BuildError as e:
            status.add(ScmTaint.error, e.slogan)

        return status
Esempio n. 17
0
def slope_a(a, cell_size=1, kern=None, degrees=True, verb=False, keep=False):
    """Return slope in degrees for an input array using 3rd order
    finite difference method for a 3x3 moing window view into the array.

    Requires:
    ---------
    - a : an input 2d array. X and Y represent coordinates of the Z values
    - cell_size : cell size, must be in the same units as X and Y
    - kern : kernel to use
    - degrees : True, returns degrees otherwise radians
    - verb : True, to print results
    - keep : False, to remove/squeeze extra dimensions
    - filter :
        np.array([[1, 2, 1], [2, 0, 2], [1, 2, 1]]) **current default

    Notes:
    ------

    ::

        dzdx: sum(col2 - col0)/8*cellsize
        dzdy: sum(row2 - row0)/8*celsize
        Assert the array is ndim=4 even if (1,z,y,x)
        general         dzdx      +    dzdy     =    dxyz
        [[a, b, c],  [[1, 0, 1],   [[1, 2, 1]       [[1, 2, 1]
         [d, e, f]    [2, 0, 2], +  [0, 0, 0]   =    [2, 0, 2],
         [g, h, i]    [1, 0, 1]]    [1, 2, 1]]       [1, 2, 1]]

    """
    frmt = """\n    :----------------------------------------:
    :{}\n    :input array...\n    {}\n    :slope values...\n    {!r:}
    :----------------------------------------:
    """
    # ---- stride the data and calculate slope for 3x3 sliding windows ----
    np.set_printoptions(edgeitems=10, linewidth=100, precision=1)
    a_s = stride(a, win=(3, 3), stepby=(1, 1))
    if a_s.ndim < 4:
        new_shape = (1,) * (4-len(a_s.shape)) + a_s.shape
        a_s = a_s.reshape(new_shape)
    #
    kern = kernels(kern)  # return the kernel if specified
    # ---- default filter, apply the filter to the array ----
    #
    dz_dx, dz_dy = filter_a(a_s, a_filter=kern, cell_size=cell_size)
    #
    s = np.sqrt(dz_dx**2 + dz_dy**2)
    if degrees:
        s = np.rad2deg(np.arctan(s))
    if not keep:
        s = np.squeeze(s)
    if verb:
        p = "    "
        args = ["Results for slope_a... ",
                indent(str(a), p), indent(str(s), p)]
        print(dedent(frmt).format(*args))
    return s
Esempio n. 18
0
 def __init__(self, module, objs, pckg_tree=None):
     self.name = module.split('.')
     if pckg_tree:
         pckg_tree.walk(self.name[1:], self)
     self._extractors = {}
     self._var_names = set()
     code = StringIO()
     for obj in objs:
         if isinstance(obj, RustFuncGen.Func):
             self._extractors[obj.name] = extractors = []
             params, pyconstructors, argnames = [], [], []
             pn = [x[0] for x in obj.parameters]
             for x, param in enumerate(obj.parameters):
                 argnames.append(param[0] + ', ')
                 pyarg_constructor = self._fn_arg.format(param[0])
                 p, _ = self.unwrap_types(param[1], "", "")
                 if p[0] != '(':
                     p = p[1:-1]
                 sig = param[0] + ': ' + p
                 if x + 1 != len(obj.parameters):
                     sig += ",\n"
                     pyarg_constructor += "\n"
                 params.append(indent(sig, tab * 3))
                 pyconstructors.append(indent(pyarg_constructor, tab * 3))
             signature, return_val = self.unwrap_types(
                 obj.rsreturn, "", "",
                 extractors=extractors,
                 arg_names=pn,
                 init=True,
                 parent="result")
             if signature[0] == '(':
                 return_type = "-> " + signature
             elif len(obj.rsreturn) == 1 \
                     and obj.rsreturn[0] == 'PyObject::None':
                 return_type, return_val = "", ""
             else:
                 return_type = "-> " + signature[1:-1]
             if len(argnames) > 0:
                 args = "(" + "".join(argnames) + "),"
             else:
                 args = "NoArgs,"
             body = self._fn_body.substitute(
                 name=obj.name,
                 convert="".join(pyconstructors),
                 args=args,
                 extract_values="".join(extractors),
                 return_value=return_val,
             )
             code.write(self._function.substitute(
                 name=obj.name,
                 parameters="".join(params),
                 return_type=return_type,
                 body=body,
             ))
     self._generated_functions = code
Esempio n. 19
0
 def fmt_field(key, value="", level=0):
     if "\n" in value:
         value = indent(dedent(value), get_setting("indent")).strip("\n")
         sep = "\n"
     elif value:
         sep = " "
     else:
         sep = ""
     key = str(key).replace("_", " ")
     leader = level * get_setting("indent")
     return indent(LabelColor(key + ":") + sep + value, leader)
Esempio n. 20
0
    def __call__(self, content=None, wd=None, sudo=False, sudoPwd=None):
        if self.path is not None:
            try:
                with open(self.path, 'r') as input_:
                    self._content = input_.read()
            except FileNotFoundError:
                pass

        content = self._content if content is None else content
        if isinstance(self.seek, str):
            rObj = re.compile(self.seek, re.DOTALL | re.MULTILINE)
            match = rObj.search(content)
            if match:
                tgt = match.string[match.start():match.end()]
                msg = log_message(
                    logging.DEBUG,
                    msg="Pattern {} matched {}".format(
                        rObj.pattern, tgt),
                    name=self._name)
                self._rv = "\n".join(
                    [rObj.sub(textwrap.indent(self.data, ' ' * self.indent),
                     content, count=0)] +
                    [""] * self.newlines
                )
            else:
                msg = log_message(
                    logging.WARNING,
                    msg="Pattern {} unmatched.".format(
                        rObj.pattern),
                    name=self._name)
                self._rv = ""

            yield msg

        elif self.seek:
            args = (
                [content] +
                [textwrap.indent(self.data, ' ' * self.indent)] +
                [""] * self.newlines
            )
            self._rv = "\n".join(args)
        else:
            args = (
                [textwrap.indent(self.data, ' ' * self.indent)] +
                [""] * self.newlines + [content]
            )
            self._rv = "\n".join(args)

        # yield log_message(logging.DEBUG, msg=self._rv, name=self._name)

        if self._rv is not None and self.path is not None:
            with open(self.path, 'w') as output:
                output.write(self._rv)
                output.flush()
 def __str__(self):
     s = ""
     s += "frac_train = %s" % self.frac_train
     s += "\nrandomized = %s" % self.randomized
     s += "\nnum_trainings = %s" % self.num_trainings
     s += "\nreplace = %s" % self.replace
     s += "\nresults = %s" % self.results
     s += "\nmean = %s" % self.mean
     textwrap.indent(s, "    ")
     s = "CrossValidationResults:\n" + s
     return s
Esempio n. 22
0
 def visit_If(self, node):
     test = self.visit(node.test)
     spaces = ' ' * 4
     body = textwrap.indent('\n'.join(map(self.visit, node.body)), spaces)
     if node.orelse:
         orelse = textwrap.indent(
             '\n'.join(map(self.visit, node.orelse)),
             spaces
         )
         return 'if {}:\n{}\nelse:\n{}'.format(test, body, orelse)
     return 'if {}:\n{}'.format(test, body)
Esempio n. 23
0
    def write_options(self, parser, fp):
        def is_positional_group(group):
            return any(not o.option_strings for o in group._group_actions)

        # HTML output:
        # A table using some column-spans

        def html_write(s):
            for line in s.splitlines():
                fp.write('    ' + line + '\n')

        rows = []
        for group in parser._action_groups:
            if group.title == 'Common options':
                # (no of columns used, columns, ...)
                rows.append((1, '.. class:: borg-common-opt-ref\n\n:ref:`common_options`'))
            else:
                if not group._group_actions:
                    continue
                group_header = '**%s**' % group.title
                if group.description:
                    group_header += ' — ' + group.description
                rows.append((1, group_header))
                if is_positional_group(group):
                    for option in group._group_actions:
                        rows.append((3, '', '``%s``' % option.metavar, option.help or ''))
                else:
                    for option in group._group_actions:
                        if option.metavar:
                            option_fmt = '``%s ' + option.metavar + '``'
                        else:
                            option_fmt = '``%s``'
                        option_str = ', '.join(option_fmt % s for s in option.option_strings)
                        option_desc = textwrap.dedent((option.help or '') % option.__dict__)
                        rows.append((3, '', option_str, option_desc))

        fp.write('.. only:: html\n\n')
        table = io.StringIO()
        table.write('.. class:: borg-options-table\n\n')
        self.rows_to_table(rows, table.write)
        fp.write(textwrap.indent(table.getvalue(), ' ' * 4))

        # LaTeX output:
        # Regular rST option lists (irregular column widths)
        latex_options = io.StringIO()
        for group in parser._action_groups:
            if group.title == 'Common options':
                latex_options.write('\n\n:ref:`common_options`\n')
                latex_options.write('    |')
            else:
                self.write_options_group(group, latex_options)
        fp.write('\n.. only:: latex\n\n')
        fp.write(textwrap.indent(latex_options.getvalue(), ' ' * 4))
Esempio n. 24
0
	def __repr__(self):
		lines = [
			'building_id: {}'.format(self.building_id),
			'name:        {}'.format(self.name),
			'container: \n{}'.format(indent(str(self.container), '  '))
			]
		for (index, resource_plant) in enumerate(self.resource_plants):
			lines.append('resource_plant {}:')
			lines.append(indent(str(resource_plant), '  '))
		for (index, unit_factory) in enumerate(self.unit_factories):
			lines.append('unit_factory {}:')
			lines.append(indent(str(unit_factory), '  '))
		return '\n'.join(lines)
Esempio n. 25
0
        def make_codeblock(s):
            """Make a codeblock that will render nicely in sphinx"""
            block = ["AreaDetector Component", "::", ""]

            lines = s.split("\n", 1)
            header, lines = lines[0], lines[1:]

            block.append(textwrap.indent(textwrap.dedent(header), prefix=" " * 4))

            lines = "\n".join(lines)
            block.append(textwrap.indent(textwrap.dedent(lines), prefix=" " * 4))
            block.append("")
            return "\n".join(block)
Esempio n. 26
0
 def visit_If(self, node):
     ts = self.visit(node.test)
     ys = []
     for n in node.body:
         ys.append(self.visit(n))
     yes = indent('\n'.join(ys), '    ')
     no = []
     for n in node.orelse:
         no.append(self.visit(n))
     if no:
         n = indent('\n'.join(no), '    ')
         return 'if (%s) {\n%s\n}\nelse {\n%s\n}\n' % (ts, yes, n)
     else:
         return 'if (%s) {\n%s\n}\n' % (ts, yes)
Esempio n. 27
0
    def run_test(self, *, source, spec, expected):
        ir = compiler.compile_to_ir(source, self.schema)

        path_scope = textwrap.indent(ir.scope_tree.pformat(), '    ')
        expected_scope = textwrap.indent(
            textwrap.dedent(expected).strip(' \n'), '    ')

        if path_scope != expected_scope:
            diff = '\n'.join(difflib.context_diff(
                expected_scope.split('\n'), path_scope.split('\n')))

            self.fail(
                f'Scope tree does not match the expected result.'
                f'\nEXPECTED:\n{expected_scope}\nACTUAL:\n{path_scope}'
                f'\nDIFF:\n{diff}')
Esempio n. 28
0
    def check_pickleable(self, test, err):
        # Ensure that sys.exc_info() tuples are picklable. This displays a
        # clear multiprocessing.pool.RemoteTraceback generated in the child
        # process instead of a multiprocessing.pool.MaybeEncodingError, making
        # the root cause easier to figure out for users who aren't familiar
        # with the multiprocessing module. Since we're in a forked process,
        # our best chance to communicate with them is to print to stdout.
        try:
            pickle.dumps(err)
        except Exception as exc:
            original_exc_txt = repr(err[1])
            original_exc_txt = textwrap.fill(original_exc_txt, 75)
            original_exc_txt = textwrap.indent(original_exc_txt, '    ')
            pickle_exc_txt = repr(exc)
            pickle_exc_txt = textwrap.fill(pickle_exc_txt, 75)
            pickle_exc_txt = textwrap.indent(pickle_exc_txt, '    ')
            if tblib is None:
                print("""

{} failed:

{}

Unfortunately, tracebacks cannot be pickled, making it impossible for the
parallel test runner to handle this exception cleanly.

In order to see the traceback, you should install tblib:

    pip install tblib
""".format(test, original_exc_txt))
            else:
                print("""

{} failed:

{}

Unfortunately, the exception it raised cannot be pickled, making it impossible
for the parallel test runner to handle it cleanly.

Here's the error encountered while trying to pickle the exception:

{}

You should re-run this test without the --parallel option to reproduce the
failure and get a correct traceback.
""".format(test, original_exc_txt, pickle_exc_txt))
            raise
Esempio n. 29
0
def mk_build(targets, rule, inputs, implicit=(), order=(), **kwargs):
    targets = targets if isinstance(targets, str) else ' '.join(targets)
    inputs = inputs if isinstance(inputs, str) else ' '.join(inputs)
    implicit = ' | %s' % ' '.join(implicit) if implicit else ''
    order = ' || %s' % ' '.join(order) if order else ''
    bindings = '\n' + textwrap.indent(mk_bindings(**kwargs), '  ') if len(kwargs) > 0 else ''
    return 'build %s: %s %s%s%s%s' % (targets, rule, inputs, implicit, order, bindings)
Esempio n. 30
0
def convert_docstring(docstring):
    indentation = re.search(r'^\s*', docstring).group()
    data = parse_docstring(docstring)
    lines = []
    lines.append(data['description'])
    if len(data['params']) > 0:
        lines.append("")
        lines.append("Args:")
    for param_name, param_dict in data['params'].items():
        if len(param_dict['type']) > 0:
            lines.append(format_hanging(
                "%s (%s): %s" % (
                    param_name, param_dict['type'], param_dict['doc'])))
        else:
            lines.append(format_hanging(
                "%s: %s" % (param_name, param_dict['doc'])))
    if len(data['returns']) > 0 or len(data['rtype']) > 0:
        lines.append("")
        lines.append("Returns:")
        if len(data['rtype']) > 0:
            lines.append(format_hanging(
                "%s: %s" % (data['rtype'], data['returns'])))
        else:
            lines.append(format_hanging("%s" % data['returns']))
    lines.append(data['epilogue'])
    res = (data['delimiters'][0] + "\n".join(lines) + data['delimiters'][1])
    while "\n\n\n" in res:
        res = res.replace("\n\n\n", "\n\n")
    res = textwrap.indent(res, indentation)
    return res
Esempio n. 31
0
def main():
    parser = argparse.ArgumentParser(
        description=
        "Collects all hive-managed resources from a cluster and outputs them as yaml documents"
    )
    parser.add_argument(
        "--path",
        "-p",
        required=True,
        help="Destination file path for artifact [required]",
    )
    parser.add_argument(
        "--output",
        "-o",
        required=True,
        choices=["yaml", "configmap"],
        help="Output format of managed resource list [required]",
    )
    parser.add_argument(
        "--all",
        action="store_true",
        help=
        "Output ALL managed resources. By default, it only outputs namespaces.",
    )
    parser.add_argument(
        "--namespace",
        "-ns",
        default="openshift-monitoring",
        help="The namespace for the generated ConfigMap",
    )
    parser.add_argument(
        "--name",
        default="managed-namespaces",
        help="The name of the generated ConfigMap",
    )
    arguments = parser.parse_args()
    try:
        r = subprocess.check_output(["oc", "whoami"],
                                    stderr=subprocess.DEVNULL)
    except subprocess.CalledProcessError as e:
        sys.exit(
            "Must be logged into an OSD cluster to gather list of managed resources"
        )
    print("Collecting a list of hive-managed resources from cluster...")
    if arguments.all:
        kinds = get_api_resource_kinds()
    else:
        kinds = ["namespaces"]
    managed_resource_dict = collect_managed_resources(kinds)
    managed_resource_yaml = yaml.dump(managed_resource_dict)
    with open(arguments.path, "w") as f:
        if arguments.output == "yaml":
            f.write(managed_resource_yaml)
        else:
            # indent the yaml document into the configmap template
            f.write(
                CONFIGMAP_TEMPLATE.format(
                    arguments.name,
                    arguments.namespace,
                    textwrap.indent(managed_resource_yaml, "    "),
                ))
Esempio n. 32
0
def write_sym_rst(sym, out_dir):
    # Writes documentation for 'sym' to <out_dir>/CONFIG_<sym.name>.rst

    kconf = sym.kconfig

    # List all prompts on separate lines
    prompt_str = "\n\n".join("*{}*".format(node.prompt[0])
                             for node in sym.nodes if node.prompt) \
                 or "*(No prompt -- not directly user assignable.)*"

    # String with the RST for the symbol page
    #
    # - :orphan: suppresses warnings for the symbol RST files not being
    #   included in any toctree
    #
    # - '.. title::' sets the title of the document (e.g. <title>). This seems
    #   to be poorly documented at the moment.
    sym_rst = ":orphan:\n\n" \
              ".. title:: {0}\n\n" \
              ".. option:: CONFIG_{0}\n\n" \
              "{1}\n\n" \
              "Type: ``{2}``\n\n" \
              .format(sym.name, prompt_str, kconfiglib.TYPE_TO_STR[sym.type])

    # Symbols with multiple definitions can have multiple help texts
    for node in sym.nodes:
        if node.help is not None:
            sym_rst += "Help\n" \
                       "====\n\n" \
                       "{}\n\n" \
                       .format(node.help)

    if sym.direct_dep is not kconf.y:
        sym_rst += "Direct dependencies\n" \
                   "===================\n\n" \
                   "{}\n\n" \
                   "*(Includes any dependencies from if's and menus.)*\n\n" \
                   .format(kconfiglib.expr_str(sym.direct_dep))

    if sym.defaults:
        sym_rst += "Defaults\n" \
                   "========\n\n"

        for value, cond in sym.defaults:
            default_str = kconfiglib.expr_str(value)
            if cond is not kconf.y:
                default_str += " if " + kconfiglib.expr_str(cond)
            sym_rst += " - {}\n".format(default_str)

        sym_rst += "\n"

    def add_select_imply_rst(type_str, expr):
        # Writes a link for each selecting symbol (if 'expr' is sym.rev_dep) or
        # each implying symbol (if 'expr' is sym.weak_rev_dep). Also adds a
        # heading at the top, derived from type_str ("select"/"imply").

        nonlocal sym_rst

        heading = "Symbols that ``{}`` this symbol".format(type_str)
        sym_rst += "{}\n{}\n\n".format(heading, len(heading)*"=")

        # The reverse dependencies from each select/imply are ORed together
        for select in kconfiglib.split_expr(expr, kconfiglib.OR):
            # - 'select/imply A if B' turns into A && B
            # - 'select/imply A' just turns into A
            #
            # In both cases, we can split on AND and pick the first
            # operand.
            sym_rst += " - :option:`CONFIG_{}`\n".format(
                kconfiglib.split_expr(select, kconfiglib.AND)[0].name)

        sym_rst += "\n"

    if sym.rev_dep is not kconf.n:
        add_select_imply_rst("select", sym.rev_dep)

    if sym.weak_rev_dep is not kconf.n:
        add_select_imply_rst("imply", sym.weak_rev_dep)

    def menu_path(node):
        path = ""

        menu = node.parent
        while menu is not kconf.top_node:
            # Fancy Unicode arrow. Added in '93, so ought to be pretty
            # safe.
            path = " → " + menu.prompt[0] + path
            menu = menu.parent

        return "(top menu)" + path

    heading = "Kconfig definition"
    if len(sym.nodes) > 1:
        heading += "s"
    sym_rst += "{}\n{}\n\n".format(heading, len(heading)*"=")

    sym_rst += "\n\n".join(
        "At ``{}:{}``, in menu ``{}``:\n\n"
        ".. parsed-literal::\n\n"
        "{}".format(node.filename, node.linenr, menu_path(node),
                    textwrap.indent(str(node), " "*4))
        for node in sym.nodes)

    sym_rst += "\n\n*(Definitions include propagated dependencies, " \
               "including from if's and menus.)*"

    write_if_updated(os.path.join(out_dir, "CONFIG_{}.rst".format(sym.name)),
                     sym_rst)
Esempio n. 33
0
    def wiki_file_writer(elem, myFile, prefix):
        global instance_id
        t = '\t'

        Instance = t + t + "<Instance "

        for ch_elem in elem:

            if (('id' in ch_elem.tag) and ('parentid' not in ch_elem.tag)):
                Instance = Instance + "Id=" + '"' + str(
                    wikiConverter.instance_id
                ) + '"' + " InstanceType=" + '"' + "Revision/Wiki" + '"' + " RevisionId=" + '"' + str(
                    ch_elem.text) + '"' + ">\n"
                myFile.write(Instance)
                '''
                RevisionId = t+t+t+"<RevisionId>"+ch_elem.text+"</RevisionId>\n"
                myFile.write(RevisionId)
                '''
            '''
            if(ch_elem.tag==prefix+'parentid'):
                ParentId = t+t+t+"<ParentId>"+ch_elem.text+"</ParentId>\n" 
                myFile.write(ParentId)
            '''
            '''
            Timestamp Information
            '''
            if ('timestamp' in ch_elem.tag):
                '''
                if(f_p!=1):
                    Instance = Instance+" InstanceType= "+'"'+"wiki/text"+'"'+">\n"
                    myFile.write(Instance)
                '''
                Timestamp = t + t + t + "<TimeStamp>\n"
                myFile.write(Timestamp)
                CreationDate = t + t + t + t + "<CreationDate>" + ch_elem.text[:
                                                                               -1] + '.0' + "</CreationDate>\n"
                myFile.write(CreationDate)
                Timestamp = t + t + t + "</TimeStamp>\n"
                myFile.write(Timestamp)
            '''
            Contributors information
            '''
            if ('contributor' in ch_elem.tag):
                Contributors = t + t + t + "<Contributors>\n"
                myFile.write(Contributors)
                for contrib in ch_elem:
                    if ('ip' in contrib.tag):
                        LastEditorUserName = t + t + t + t + "<OwnerUserName>" + html.escape(
                            contrib.text) + "</OwnerUserName>\n"
                        myFile.write(LastEditorUserName)
                    else:
                        if ('username' in contrib.tag):
                            try:
                                LastEditorUserName = t + t + t + t + "<OwnerUserName>" + html.escape(
                                    contrib.text) + "</OwnerUserName>\n"
                            except:
                                LastEditorUserName = t + t + t + t + "<OwnerUserName>None</OwnerUserName>\n"
                            myFile.write(LastEditorUserName)
                        if (('id' in contrib.tag)
                                and ('parentid' not in contrib.tag)):
                            LastEditorUserId = t + t + t + t + "<OwnerUserId>" + contrib.text + "</OwnerUserId>\n"
                            myFile.write(LastEditorUserId)

                Contributors = t + t + t + "</Contributors>\n"
                myFile.write(Contributors)
            '''
            Body/Text Information
            '''
            if ('text' in ch_elem.tag):
                Body = t + t + t + "<Body>\n"
                myFile.write(Body)
                if (ch_elem.attrib.get('bytes') != None):
                    text_field = t + t + t + t + "<Text Type=" + '"' + "wiki/text" + '"' + " Bytes=" + '"' + ch_elem.attrib[
                        'bytes'] + '">\n'
                elif (ch_elem.text != None):
                    text_field = t + t + t + t + "<Text Type=" + '"' + "wiki/text" + '"' + " Bytes=" + '"' + str(
                        len(ch_elem.text)) + '">\n'
                else:
                    text_field = t + t + t + t + "<Text Type=" + '"' + "wiki/text" + '"' + " Bytes=" + '"' + str(
                        0) + '">\n'
                myFile.write(text_field)
                if (ch_elem.text == None):
                    text_body = ""
                else:

                    text_body = textwrap.indent(text=ch_elem.text,
                                                prefix=t + t + t + t + t)
                    text_body = html.escape(text_body)
                Body_text = text_body + "\n"
                myFile.write(Body_text)
                text_field = t + t + t + t + "</Text>\n"
                myFile.write(text_field)
                Body = t + t + t + "</Body>\n"
                myFile.write(Body)

            if ('comment' in ch_elem.tag):
                Edit = t + t + t + "<EditDetails>\n"
                myFile.write(Edit)
                if (ch_elem.text == None):
                    text_body = ""
                else:
                    text_body = textwrap.indent(text=ch_elem.text,
                                                prefix=t + t + t + t + t)
                    text_body = html.escape(text_body)

                EditType = t + t + t + t + "<EditType>\n" + text_body + "\n" + t + t + t + t + "</EditType>\n"
                #Body_text = text_body+"\n"
                myFile.write(EditType)

                Edit = t + t + t + "</EditDetails>\n"
                myFile.write(Edit)

            if ('sha1' in ch_elem.tag):
                sha = ch_elem.text
                if (type(sha) != type(None)):
                    shaText = t + t + t + '<Knowl key="sha">' + sha + '</Knowl>\n'
                    myFile.write(shaText)
                else:
                    shaText = ''

        Instance = t + t + "</Instance>\n"
        myFile.write(Instance)
        wikiConverter.instance_id += 1
Esempio n. 34
0
def run(options):
    datadir = 'meson-private'
    if options.builddir is not None:
        datadir = os.path.join(options.builddir, datadir)
    if not os.path.isdir(datadir):
        print('Current directory is not a build dir. Please specify it or '
              'change the working directory to it.')
        return 1

    all_backends = options.all

    print('This is a dump of the internal unstable cache of meson. This is for debugging only.')
    print('Do NOT parse, this will change from version to version in incompatible ways')
    print('')

    coredata = cdata.load(options.builddir)
    backend = coredata.get_option(OptionKey('backend'))
    for k, v in sorted(coredata.__dict__.items()):
        if k in ('backend_options', 'base_options', 'builtins', 'compiler_options', 'user_options'):
            # use `meson configure` to view these
            pass
        elif k in ['install_guid', 'test_guid', 'regen_guid']:
            if all_backends or backend.startswith('vs'):
                print(k + ': ' + v)
        elif k == 'target_guids':
            if all_backends or backend.startswith('vs'):
                print(k + ':')
                dump_guids(v)
        elif k in ['lang_guids']:
            if all_backends or backend.startswith('vs') or backend == 'xcode':
                print(k + ':')
                dump_guids(v)
        elif k == 'meson_command':
            if all_backends or backend.startswith('vs'):
                print('Meson command used in build file regeneration: ' + ' '.join(v))
        elif k == 'pkgconf_envvar':
            print('Last seen PKGCONFIG environment variable value: ' + v)
        elif k == 'version':
            print('Meson version: ' + v)
        elif k == 'cross_files':
            if v:
                print('Cross File: ' + ' '.join(v))
        elif k == 'config_files':
            if v:
                print('Native File: ' + ' '.join(v))
        elif k == 'compilers':
            for for_machine in MachineChoice:
                print('Cached {} machine compilers:'.format(
                    for_machine.get_lower_case_name()))
                dump_compilers(v[for_machine])
        elif k == 'deps':
            def print_dep(dep_key, dep):
                print('  ' + dep_key[0][1] + ": ")
                print('      compile args: ' + repr(dep.get_compile_args()))
                print('      link args: ' + repr(dep.get_link_args()))
                if dep.get_sources():
                    print('      sources: ' + repr(dep.get_sources()))
                print('      version: ' + repr(dep.get_version()))

            for for_machine in iter(MachineChoice):
                items_list = list(sorted(v[for_machine].items()))
                if items_list:
                    print(f'Cached dependencies for {for_machine.get_lower_case_name()} machine')
                    for dep_key, deps in items_list:
                        for dep in deps:
                            print_dep(dep_key, dep)
        else:
            print(k + ':')
            print(textwrap.indent(pprint.pformat(v), '  '))
Esempio n. 35
0
def create_breathe_files(headers, srcdir, component_name, sphinx_directory,
                         doxygen_config_filename):
    """Create rst files for sphinx from xml (doxygen) outputs generated from headers.

    Parameters
    ----------

    headers : list (cmake like)
         headers files to parse
    srcdir : string
        absolute path to c/c++ sources (CMAKE_SOURCE_DIR)
    component_name : string
         component (numerics, kernel, ...) of interest
    sphinx_directory : string
        directory where rst files will be written
    doxygen_config_filename : string
         name (full path) of the doxygen configuration file

    Notes:
    * for each header, rst files (class, struct, file and source codes) will be generated
    * three other 'main' rst files will be produced :
       * breathe_api.rst, with the toctree for all classes and structs
       * files_list.rst, with the toctree for all files documentation
       * sources_list.rst with the toctree for all program listings

    """

    # Get all headers for the current component
    headers = bt.parse_cmake_list(headers)
    rst_files = []
    rst_programs = []
    # Parse doxygen config
    doxyconf = parse_doxygen_config(doxygen_config_filename)
    xmlconf = {}
    sphinx_directory = os.path.join(sphinx_directory, component_name)
    if not os.path.exists(sphinx_directory):
        os.makedirs(sphinx_directory)

    xmlconf['CASE_SENSE_NAMES'] = doxyconf['CASE_SENSE_NAMES'].find('YES') > -1
    xmlconf['XML_OUTPUT'] = os.path.join(doxyconf['OUTPUT_DIRECTORY'].lstrip(),
                                         doxyconf['XML_OUTPUT'].lstrip())

    all_index = {}
    # -- Create rst files for classes, structs and files found in xml directory --
    for hfile in headers:
        xml2rst(hfile, srcdir, component_name, sphinx_directory, xmlconf,
                all_index)

    # -- Create rst files to collect list of classes and files (i.e. files just created above) --
    class_and_struct_files = glob.glob(
        os.path.join(sphinx_directory, 'class*.rst'))
    class_and_struct_files += glob.glob(
        os.path.join(sphinx_directory, 'struct*.rst'))
    class_and_struct_files.sort()
    pgm_files = glob.glob(os.path.join(sphinx_directory, 'pgm_*.rst'))
    pgm_files.sort()
    rst_files = glob.glob(os.path.join(sphinx_directory, 'file_*.rst'))
    rst_files.sort()
    all_files = class_and_struct_files + rst_files
    all_files.sort()
    # -- Name of the main rst files for the current component --
    # usually : docs/sphinx/reference/cpp/component_name/autodoc_all.rst
    outputname = os.path.join(sphinx_directory, 'autodoc_all.rst')
    #title = component_name + ' component\n'
    #title += len(title) * '=' + '\n\n'
    indent = 4 * ' '
    basename = '/reference/cpp/' + component_name + '/'
    with open(outputname, 'wt') as out:
        opt = ':maxdepth: 4\n'
        for f in all_files:
            name = os.path.basename(f).split('.')[0]
            if name.find('class') > -1:
                shorttitle = name.split('class')[-1]
                text = '* :class:`' + shorttitle + '` : '
            elif name.find('struct') > -1:
                shorttitle = name.split('struct')[-1]
                text = '* :class:`' + shorttitle + '` : '
            elif name.find('file_') > -1:
                shorttitle = name.split('file_')[-1].replace('_', '.')
                name = basename + name
                text = '* :doc:`' + shorttitle + '<' + name + '>` : '
            try:
                text += all_index[shorttitle] + '\n'
            except:
                text += '\n'
            name = basename + name
            #gen = textwrap.indent(text, 4 * ' ')
            out.write(text)
        out.write('\n\n')

    # Classes and structs
    outputname = os.path.join(sphinx_directory, 'autodoc_classes.rst')
    subtitle = 'Classes and structs'
    subtitle += '\n' + len(subtitle) * '-' + '\n\n'
    with open(outputname, 'wt') as out:
        #out.write(title)
        out.write(subtitle)
        #out.write('.. toctree::\n')
        opt = ':maxdepth: 4\n'
        #out.write(textwrap.indent(opt, indent))
        #out.write('\n')
        for f in class_and_struct_files:
            name = os.path.basename(f).split('.')[0]
            if name.find('class') > -1:
                shorttitle = name.split('class')[-1]
            elif name.find('struct') > -1:
                shorttitle = name.split('struct')[-1]
            text = '* :class:`' + shorttitle + '` : '
            try:
                text += all_index[shorttitle] + '\n'
            except:
                text += '\n'
            name = basename + name
            #gen = textwrap.indent(text, 4 * ' ')
            out.write(text)
        out.write('\n\n')
    # Files doc
    outputname = os.path.join(sphinx_directory, 'autodoc_files.rst')
    subtitle = 'Files documentation\n'
    subtitle += len(subtitle) * '-' + '\n\n'
    with open(outputname, 'wt') as out:
        out.write(subtitle)
        #out.write('.. toctree::\n    :maxdepth: 2\n\n')
        for f in rst_files:
            name = os.path.basename(f).split('.')[0]
            shorttitle = name.split('file_')[-1].replace('_', '.')
            name = basename + name
            text = '* :doc:`' + shorttitle + '<' + name + '>` : '
            try:
                text += all_index[shorttitle] + '\n'
            except:
                text += '\n'
            #gen = textwrap.indent(shorttitle + '<' + name + '>\n', 4 * ' ')
            out.write(text)
        out.write('\n\n')
    # Programs listings
    outputname = os.path.join(sphinx_directory, 'autodoc_pgm.rst')
    label = '.. _' + component_name + '_pgm_listings:\n\n'
    title = component_name.title() + ' programs listings\n'
    title += len(title) * '-' + '\n\n'
    title = label + title
    with open(outputname, 'wt') as out:
        out.write(title)
        out.write('.. toctree::\n    :maxdepth: 2\n\n')
        for f in pgm_files:
            name = os.path.basename(f).split('.')[0]
            shorttitle = f.split(sphinx_directory)[-1].split('.')[0]
            splits = shorttitle.split('_')
            shorttitle = '/'.join(splits[2:-1]) + '.' + splits[-1]
            name = basename + name
            gen = textwrap.indent(shorttitle + '<' + name + '>\n', 4 * ' ')
            out.write(gen)
Esempio n. 36
0
    def process_solutions(self, doctree: nodes.document, src: str) -> None:
        """Handle any solutions contained in the document.

        This ensures that a ``*.py`` file is created in the ``resources`` directory
        containing the actual solution.

        It then also rewrites the given doctree to output a pair of code cells in
        the resulting notebook. The first is a prompt for the user to input their
        solution and the second contains a :magic:`ipython:load` declaration to
        give the user the option to load in the solution if they wish to see it.

        Parameters
        ----------
        doctree:
           The doctree to process
        src:
           The path to the file containing the document being processed
        """

        docpath = pathlib.Path(src)
        logger.debug("[tutorial]: processing solutions for: %s", docpath)
        basename = f"{docpath.stem}-soln"

        for idx, soln in enumerate(doctree.traverse(condition=solution)):

            name = f"{basename}-{idx+1:02d}.py"
            destination = pathlib.Path("resources", docpath.with_suffix(""), name)
            refuri = relative_uri(src, str(destination))

            # Convert the solution to a valid Python document that can be executed.
            document = new_document("<solution>")
            document += soln

            # Rather than go through the trouble of maintaining 2 document translators,
            # one for notebooks and another for Python files. Let's just use the notebook
            # translator and do some post-processing on the result - much easier.
            translator = NotebookTranslator(document)
            document.walkabout(translator)
            notebook = translator.asnotebook()

            blocks = []
            for cell in notebook.cells:
                source = cell.source

                # Comment out the lines containing markdown.
                if cell.cell_type == "markdown":
                    source = textwrap.indent(source, "# ")

                blocks.append(source)

            self.resources[str(destination)] = ("create", "\n".join(blocks))

            # TODO: Expose config options for these
            # TODO: Translations?
            your_soln = nodes.literal_block(
                "", "# Write your solution here...\n", language="python"
            )
            load_soln = nodes.literal_block(
                "",
                f"# Execute this cell to load the example solution\n%load {refuri}\n",
                language="python",
            )

            # Replace the actual solution with the 2 cells defined above.
            soln.children = [your_soln, load_soln]
Esempio n. 37
0
 def print_new_expected(s: str) -> None:
     print(f'{"":>12}"""\\', end="")
     for l in s.splitlines(keepends=False):
         print("\n" + textwrap.indent(repr(l)[1:-1], " " * 12), end="")
     print('"""\n')
Esempio n. 38
0
def gen_ctypes_wrappers(fcninfo, ofname):

    file_header = file_docstring.format(
        datetime=time.strftime("%d %b %Y %H:%M:%S", time.localtime()))

    contents = ''
    function_pointer_string = ''
    namedtuples_header = ' ' * 8 + '# Named tuples to contain the outputs of DLL calls\n'

    for fcn, data in sorted(six.iteritems(fcninfo)):

        function_pointer_string += ' ' * 8 + 'self._{name:s} = self._getfcn(self.dll, \'{name:s}\')\n'.format(
            name=fcn)

        i, o = [], []
        for arg in data['argnames']:
            if arg in data['input_args']:
                i.append(arg)
            elif arg in data['output_args']:
                o.append(arg)
            elif arg in data['inout_args']:
                i.append(arg)
                o.append(arg)

        proto = ','.join(o) + ' = ' + fcn + '(' + ','.join(i) + ')'
        headline = 'def {fname:s}(self,{inargs:s}):\n'.format(
            inargs=','.join(i), fname=fcn)
        body = '\"\"\"\n{proto:s}\n\"\"\"\n'.format(proto=proto)

        def gen_val(typ, dim, default=''):
            if typ == 'int' and dim == 0:
                return 'ct.c_long({default:s})'.format(default=default)
            elif typ == 'int' and dim > 0:
                return '({dim:d}*ct.c_long)()'.format(default=default, dim=dim)
            elif typ == 'double' and dim == 0:
                return 'ct.c_double({default:s})'.format(default=default)
            elif typ == 'double' and dim < 0:
                return '(len({default:s})*ct.c_double)(*{default:s})'.format(
                    default=default)
            elif typ == 'double' and dim > 0:
                return '({dim:d}*ct.c_double)()'.format(default=default,
                                                        dim=dim)
            elif typ == 'char' and dim != 0 and default:
                return 'ct.create_string_buffer({default:s}.encode(\'utf-8\'),{dim:d})'.format(
                    default=default, dim=abs(dim))
            elif typ == 'char' and dim != 0 and not default:
                return 'ct.create_string_buffer({dim:d})'.format(dim=abs(dim))
            else:
                raise KeyError('Invalid type/dim pair:' + str((typ, dim)))

        # Add the type definitions
        for arg in data['argnames']:
            if arg in data['input_args'] or arg in data['inout_args']:
                typ, dim = data['input_args'].get(
                    arg, None) or data['inout_args'].get(arg, None)
                if dim == '*':
                    body += '{name:s} = '.format(
                        name=arg
                    ) + 'ct.create_string_buffer({default:s},len{default:s})'.format(
                        default=arg) + '\n'
                else:
                    body += '{name:s} = '.format(name=arg) + gen_val(
                        typ, dim, default=arg) + '\n'
            elif arg in data['output_args']:
                typ, dim = data['output_args'][arg]
                body += '{name:s} = '.format(name=arg) + gen_val(
                    typ, dim, default='') + '\n'

        # Add the function call
        string_lengths = []
        arg_strings = []
        for arg in data['argnames']:
            typ, dim = data['input_args'].get(
                arg, None) or data['inout_args'].get(
                    arg, None) or data['output_args'].get(arg, None)
            if dim == 0 and typ in ['int', 'double']:
                arg_strings.append('ct.byref({arg:s})'.format(arg=arg))
            else:
                arg_strings.append(arg)
            if typ == 'char':
                if dim == '*':
                    string_lengths.append(str(10000))
                else:
                    string_lengths.append(str(dim))

        body += '\nif self._{fcnname:s} is None: raise ValueError("The function {fcnname:s} could not be loaded from the shared library.") \n'.format(
            fcnname=fcn)
        if len(string_lengths) == 0:
            string_args = ''
        else:
            string_args = ',' + ','.join(string_lengths)

        body += 'self._{fcnname:s}({args:s}{string_args:s})\n'.format(
            fcnname=fcn, args=','.join(arg_strings),
            string_args=string_args) + '\n'

        # Add the return call
        arg_strings = []
        arg_names = []
        for arg in data['argnames']:
            if arg in data['output_args'] or arg in data['inout_args']:
                typ, dim = data['output_args'].get(
                    arg, None) or data['inout_args'].get(arg, None)
                arg_names.append('"' + arg + '"')
                if dim == 0 and typ in ['int', 'double']:
                    arg_strings.append('{arg:s}.value'.format(arg=arg))
                elif dim > 0 and typ in ['int', 'double']:
                    arg_strings.append('list({arg:s})'.format(arg=arg))
                elif typ == 'char':
                    arg_strings.append('trim({arg:s}.raw)'.format(arg=arg))
                else:
                    arg_strings.append(arg)
        if len(arg_strings) > 1:
            namedtuples_header += ' ' * 8 + 'self._{fcn:s}output_tuple = namedtuple(\'{fcn:s}output\',[{argnames:s}])\n'.format(
                fcn=fcn, argnames=','.join(arg_names))
            body += 'return self._{fcn:s}output_tuple({args:s})'.format(
                fcn=fcn, args=','.join(arg_strings)) + '\n\n'
        elif len(arg_strings) == 1:
            body += 'return {arg:s}'.format(arg=arg_strings[0]) + '\n\n'
        else:
            body += 'return\n\n'

        contents += textwrap.indent(headline, ' ' * 4) + textwrap.indent(
            body, ' ' * 8)

    # Write it into the output file
    with open(ofname, 'w') as fp:
        fp.write(file_header)
        fp.write(wrapper_header)
        fp.write(namedtuples_header + '\n')
        fp.write(function_pointer_string + '\n')
        fp.write(contents)
Esempio n. 39
0
def codegen_construct(class_header, class_name, class_name_elaborated, fields, has_post_ctor, meth=False):
    body = []
    for kind, field in fields:
        name = field.spelling
        spec = parse_codegen_spec(kind)
        if spec.is_meta:
            tpl = construct_meta_tpl
        if spec.is_plain:
            if spec.star:
                tpl = construct_plain_star_tpl
            else:
                tpl = construct_plain_tpl
        if spec.is_list:
            if spec.star:
                tpl = construct_list_star_tpl
            else:
                tpl = construct_list_tpl
        if spec.is_dlist:
            if spec.star:
                tpl = construct_dlist_star_tpl
            else:
                tpl = construct_dlist_tpl
        if spec.is_set:
            tpl = construct_set_tpl
        if spec.is_dict:
            tpl = construct_dict_tpl

        if spec.is_dict:
            key_type = spec.element_type[0]
            value_type = spec.element_type[1]
        else:
            key_type = None
            value_type = None

        body.append(
            tpl.format(name=name,
                       element_type=spec.element_type,
                       key_type=key_type,
                       value_type=value_type,
                       deref=spec.deref))

    if meth:
        function_tpl = construct_meth_tpl
        function_body_indent = 4
        if has_post_ctor:
            post_ctor = post_construct_meth_tpl
        else:
            post_ctor = ''
    else:
        function_tpl = construct_tpl
        function_body_indent = 0
        if has_post_ctor:
            post_ctor = post_construct_tpl
        else:
            post_ctor = ''

    code = function_tpl.format(class_header=class_header,
                               class_name=class_name,
                               class_name_elaborated=class_name_elaborated,
                               construct_body=textwrap.indent(''.join(body), ' ' * function_body_indent).strip(),
                               post_construct=post_ctor)
    return code
Esempio n. 40
0
    def create_dockerfile_object(self, directory: str) -> str:
        """
        Writes a dockerfile to the provided directory using the specified
        arguments on this Docker storage object.

        In order for the docker python library to build a container it needs a
        Dockerfile that it can use to define the container. This function takes the
        specified arguments then writes them to a temporary file called Dockerfile.

        *Note*: if `files` are added to this container, they will be copied to this directory as well.

        Args:
            - directory (str, optional): A directory where the Dockerfile will be created,
                if no directory is specified is will be created in the current working directory

        Returns:
            - str: the absolute file path to the Dockerfile
        """
        # Generate single pip install command for python dependencies
        pip_installs = "RUN pip install "
        if self.python_dependencies:
            for dependency in self.python_dependencies:
                pip_installs += "{} ".format(dependency)

        # Generate ENV variables to load into the image
        env_vars = ""
        if self.env_vars:
            white_space = " " * 20
            env_vars = "ENV " + " \ \n{}".format(white_space).join(
                "{k}={v}".format(k=k, v=v) for k, v in self.env_vars.items())

        # Copy user specified files into the image
        copy_files = ""
        if self.files:
            for src, dest in self.files.items():
                fname = os.path.basename(src)
                full_fname = os.path.join(directory, fname)
                if os.path.exists(full_fname) and filecmp.cmp(
                        src, full_fname) is False:
                    raise ValueError(
                        "File {fname} already exists in {directory}".format(
                            fname=full_fname, directory=directory))
                else:
                    shutil.copy2(src, full_fname)
                copy_files += "COPY {fname} {dest}\n".format(
                    fname=full_fname if self.dockerfile else fname, dest=dest)

        # Write all flows to file and load into the image
        copy_flows = ""
        for flow_name, flow_location in self.flows.items():
            clean_name = slugify(flow_name)
            flow_path = os.path.join(directory, "{}.flow".format(clean_name))
            with open(flow_path, "wb") as f:
                cloudpickle.dump(self._flows[flow_name], f)
            copy_flows += "COPY {source} {dest}\n".format(
                source=flow_path
                if self.dockerfile else "{}.flow".format(clean_name),
                dest=flow_location,
            )

        # Write all extra commands that should be run in the image
        extra_commands = ""
        for cmd in self.extra_commands:
            extra_commands += "RUN {}\n".format(cmd)

        # Write a healthcheck script into the image
        with open(os.path.join(os.path.dirname(__file__), "_healthcheck.py"),
                  "r") as healthscript:
            healthcheck = healthscript.read()

        healthcheck_loc = os.path.join(directory, "healthcheck.py")
        with open(healthcheck_loc, "w") as health_file:
            health_file.write(healthcheck)

        if self.dockerfile:
            with open(self.dockerfile, "r") as contents:
                base_commands = textwrap.indent("\n" + contents.read(),
                                                prefix=" " * 16)
        else:
            base_commands = "FROM {base_image}".format(
                base_image=self.base_image)

        file_contents = textwrap.dedent("""\
            {base_commands}

            RUN pip install pip --upgrade
            {extra_commands}
            {pip_installs}

            RUN mkdir -p /root/.prefect/
            {copy_flows}
            COPY {healthcheck_loc} /root/.prefect/healthcheck.py
            {copy_files}

            {env_vars}

            RUN python /root/.prefect/healthcheck.py '[{flow_file_paths}]' '{python_version}'
            """.format(
            base_commands=base_commands,
            extra_commands=extra_commands,
            pip_installs=pip_installs,
            copy_flows=copy_flows,
            healthcheck_loc=healthcheck_loc
            if self.dockerfile else "healthcheck.py",
            copy_files=copy_files,
            env_vars=env_vars,
            flow_file_paths=", ".join(
                ['"{}"'.format(k) for k in self.flows.values()]),
            python_version=(sys.version_info.major, sys.version_info.minor),
        ))

        file_contents = "\n".join(line.lstrip()
                                  for line in file_contents.split("\n"))
        dockerfile_path = os.path.join(directory, "Dockerfile")
        with open(dockerfile_path, "w+") as dockerfile:
            dockerfile.write(file_contents)
        return dockerfile_path
Esempio n. 41
0
def _clean_output(output):
    output = output.replace("{", "{{").replace( "}", "}}")
    return textwrap.indent(output, ' |  ')
Esempio n. 42
0
 def append(self, txt: str, sep='\n') -> 'MultiLineFormatter':
     self.text += sep
     if txt:
         self.text += indent(dedent(txt), self.current_indent)
     return self
Esempio n. 43
0
def show_not_passed(e):
    print('\033[91m', "    Not passsed", '\033[0m')
    print(textwrap.indent(str(e), "    "))
    rst.write("""\

:drc_rule:`{rt.template}`
{hd}

""".format(rt=rt, hd='-'*(len(rt.template)+len(':drc_rule:``'))))

    if rt.notes:
        rst.write("""\
.. note::

{}


""".format(textwrap.indent(rt.notes, prefix='    ')))

    headers = ('Name', 'Description', 'Flags', 'Value')
    headers_fmt = (':drc_rule:`Name`', 'Description', ':drc_flag:`Flags`', 'Value')

    rst.write("""\
.. list-table:: {rt.description}
   :header-rows: 1
   :stub-columns: 1
   :widths: 10 75 5 10

   * - {h}
""".format(rt=rt,h='\n     - '.join(headers_fmt)))


    for r in rt.rules:
	def _fill_text(self, text, width, indent):
		return "\n".join([textwrap.fill(line, width) for line in textwrap.indent(textwrap.dedent(text), indent).splitlines()])
Esempio n. 46
0
async def _eval(ctx, *, body: str):
    '''Run python scripts on discord!'''
    await to_code_block(ctx, body)
    env = {
        'bot': bot,
        'ctx': ctx,
        'channel': ctx.message.channel,
        'author': ctx.message.author,
        'server': ctx.message.server,
        'message': ctx.message,
    }

    env.update(globals())

    body = cleanup_code(content=body)
    stdout = io.StringIO()

    to_compile = 'async def func():\n%s' % textwrap.indent(body, '  ')

    try:
        exec(to_compile, env)
    except SyntaxError as e:
        return await bot.say(get_syntax_error(e))

    func = env['func']
    try:
        with redirect_stdout(stdout):
            ret = await func()
    except Exception as e:
        value = stdout.getvalue()
        x = await bot.say('```py\n{}{}\n```'.format(value,
                                                    traceback.format_exc()))
        try:
            await bot.add_reaction(x, '\U0001f534')
        except:
            pass
    else:
        value = stdout.getvalue()

        if TOKEN in value:
            value = value.replace(TOKEN, "[EXPUNGED]")

        if ret is None:
            if value:
                try:
                    x = await bot.say('```py\n%s\n```' % value)
                except:
                    x = await bot.say('```py\n\'Result was too long.\'```')
                try:
                    await bot.add_reaction(x, '\U0001f535')
                except:
                    pass
            else:
                try:
                    await bot.add_reaction(ctx.message, '\U0001f535')
                except:
                    pass
        else:
            try:
                x = await bot.say('```py\n%s%s\n```' % (value, ret))
            except:
                x = await bot.say('```py\n\'Result was too long.\'```')
            try:
                await bot.add_reaction(x, '\U0001f535')
            except:
                pass
Esempio n. 47
0
    def AddScripts(self, codeArg, outputArg, mixerLevelArg):
        listCheckLineWhitoutSpace       = []
        listCheckLine                   = []
        countBackSlash                  = 0
        countScriptsAdded               = 0
        countLineAdded                  = 0
        countLine                       = 0
        checkLine                       = 0
        checkQuotePassing               = 0
        checkCharPassing                = 0
        countRecursFiles                = 0
        checkParenthesesCharPassing     = 0
        checkBracketsCharPassing        = 0
        checkBracesCharPassing          = 0
        
        if codeArg == "python":
            detectFile  = "py"
            blockDirs   = r"__pycache__"

        recursFiles = [f for f in glob.glob("{0}{1}**{1}*.{2}".format(outputArg, self.utils.Platform(), detectFile), recursive=True)]

        # -- Count the number of lines that will be checked before filling -- #
        for file in recursFiles:
            if re.match(blockDirs, file):
                continue
            else:
                with open(file , "r") as readFile:
                    readF = readFile.readlines()
                    for eachLine in readF:
                        if not eachLine:
                            continue
                        countLine += 1

        for number in recursFiles:
            countRecursFiles += 1

        print("\n[+] Running adding of random scripts in {0} file(s)...\n".format(countRecursFiles))

        # -- Padding scripts added -- #
        with tqdm(total=countRecursFiles) as pbar:
            for file in recursFiles:
                pbar.update(1)
                if re.match(blockDirs, file):
                    continue
                else:
                    with fileinput.input(file, inplace=True) as inputFile:
                        for eachLine in inputFile:
                            print(eachLine)
                            if eachLine == "\n" or "coding" in eachLine:
                                continue
                            else:
                                if codeArg == "python":
                                    listCheckLine                       = [] # Initialize var
                                    spaces                              = len(eachLine) - len(eachLine.lstrip()) # Check line indent

                                    detectStringVar                     = r".*\w+\s*\={1}\s*r+[\"|\']{1}"
                                    noAddScript                         = r"^\@|\s+\@|\s+return|\s*def\s+.+\s*\:{1}|^class\s+.+\s*\:{1}|.*[\[|\(|\{|\,|\\]$|\s+[\)|\]|\}]$"
                                    addIndentScript                     = r".*\:{1}\s"
                                    quoteIntoVariable                   = r".*\={1}\s*\w*\.?\w*[\(|\.]{1}[\"|\']{3}|.*\={1}\s*[\"|\']{3}" # """ and ''' before an variables
                                    quoteOfCommentariesMultipleLines    = r"^\s*[\"|\']{3}$"        # """ and ''' without before variables and if commentaries is over multiple lines
                                    quoteOfEndCommentariesMultipleLines = r"^\s*[\"|\']{3}\)?\.?"   # """ and ''' without before variables, if commentaries is over multiple lines and he finish by .format() funtion
                                    
                                    for i in eachLine:
                                        listCheckLine.append(i)

                                    # -- Check line below after '\' backslash char -- #     
                                    if re.match(r".+\\$", eachLine):
                                        countBackSlash += 1
                                    if countBackSlash > 0:
                                        if re.match(r".+\\$", eachLine) is None:
                                            countBackSlash = 0
                                            continue

                                    # -- Check if end char in line is " or ' -- #
                                    if re.match(r"\"|\'", listCheckLine[-2]):
                                        try:
                                            if re.match(r"\'|\"", listCheckLine[-3]) and re.match(r"\'|\"", listCheckLine[-4]):
                                                pass
                                            else:
                                                if re.match(detectStringVar, eachLine):
                                                    pass
                                                else:
                                                    continue
                                        except IndexError:
                                            continue
        
                                    # -- Check code into """ or ''' -- #
                                    if re.match(quoteIntoVariable, eachLine):
                                        checkQuotePassing += 1
                                        continue
                                    elif re.match(quoteOfCommentariesMultipleLines, eachLine) or re.match(quoteOfEndCommentariesMultipleLines, eachLine):
                                        checkQuotePassing += 1
                                        if checkQuotePassing == 2:
                                            checkQuotePassing = 0
                                        continue

                                    if checkQuotePassing == 1:
                                        continue
                                    elif checkQuotePassing == 2:
                                        checkQuotePassing = 0
                                        continue
                                    else:
                                        checkQuotePassing = 0

                                # -- Add scripts -- #
                                if re.match(noAddScript, eachLine) is not None:
                                    continue
                                elif re.match(addIndentScript, eachLine) is not None:
                                    if spaces == 0:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg),"    "))
                                        countScriptsAdded += 1                                                                
                                    elif spaces == 4:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "        "))
                                        countScriptsAdded += 1
                                    elif spaces == 8:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "            "))
                                        countScriptsAdded += 1
                                    elif spaces == 12:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                "))
                                        countScriptsAdded += 1
                                    elif spaces == 16:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                    "))
                                        countScriptsAdded += 1
                                    elif spaces == 20:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                        "))
                                        countScriptsAdded += 1
                                    elif spaces == 24:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                            "))
                                        countScriptsAdded += 1
                                    elif spaces == 28:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                "))
                                        countScriptsAdded += 1
                                    elif spaces == 32:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                    "))
                                        countScriptsAdded += 1
                                    elif spaces == 36:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                        "))
                                        countScriptsAdded += 1
                                    elif spaces == 40:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                            "))
                                        countScriptsAdded += 1
                                    elif spaces == 44:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                "))
                                        countScriptsAdded += 1
                                    elif spaces == 48:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                    "))
                                        countScriptsAdded += 1
                                    elif spaces == 52:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                        "))
                                        countScriptsAdded += 1
                                    elif spaces == 56:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                            "))
                                        countScriptsAdded += 1
                                    elif spaces == 60:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                                "))
                                        countScriptsAdded += 1
                                    else:
                                        continue
                                else:
                                    if spaces == 0:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg),""))
                                        countScriptsAdded += 1
                                    elif spaces == 4:                                                                       
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "    "))
                                        countScriptsAdded += 1
                                    elif spaces == 8:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "        "))
                                        countScriptsAdded += 1
                                    elif spaces == 12:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "            "))
                                        countScriptsAdded += 1
                                    elif spaces == 16:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                "))
                                        countScriptsAdded += 1
                                    elif spaces == 20:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                    "))
                                        countScriptsAdded += 1
                                    elif spaces == 24:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                        "))
                                        countScriptsAdded += 1
                                    elif spaces == 28:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                            "))
                                        countScriptsAdded += 1
                                    elif spaces == 32:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                "))
                                        countScriptsAdded += 1
                                    elif spaces == 36:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                    "))
                                        countScriptsAdded += 1
                                    elif spaces == 40:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                        "))
                                        countScriptsAdded += 1
                                    elif spaces == 44:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                            "))
                                        countScriptsAdded += 1
                                    elif spaces == 48:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                "))
                                        countScriptsAdded += 1
                                    elif spaces == 52:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                    "))
                                        countScriptsAdded += 1
                                    elif spaces == 56:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                        "))
                                        countScriptsAdded += 1
                                    elif spaces == 60:
                                        print(textwrap.indent(Padding.ScriptsGenerator(self, codeArg, mixerLevelArg), "                                                            "))
                                        countScriptsAdded += 1
                                    else:
                                        continue
                                
        # -- Check padding has added in output script -- #
        for file in recursFiles:
            if re.match(blockDirs, file):
                continue
            else:
                with open(file , "r") as readFile:
                    readF = readFile.readlines()
                    for eachLine in readF:
                        if not eachLine:
                            continue    
                        checkLine += 1
        
        countLineAdded = checkLine - countLine

        if (self.remove.LineBreaks(codeArg, outputArg) == 0):
            if checkLine > countLine:    
                print("\n-> {0} scripts added in {1} file(s)\n".format(countScriptsAdded, countRecursFiles))
                print("-> {0} lines added in {1} file(s)\n".format(countLineAdded, countRecursFiles))
                return EXIT_SUCCESS
                
            else:
                return EXIT_FAILURE
        else:
            return EXIT_FAILURE
Esempio n. 48
0
        # Patch the socket module
        socks.setdefaultproxy(
            socks.PROXY_TYPE_SOCKS5, proxy_host, int(proxy_port), rdns=True
        )  # rdns is by default on true. Never use rnds=False with TOR, otherwise you are screwed!
        socks.wrap_module(socket)
        socket.create_connection = create_connection

    if args.deep_scrape:
        results = deep_scrape(args.query)
    else:
        results = scrape(args.query, args.num_results_per_page, args.num_pages)

    for result in results:
        logger.info(
            '{} links found! The search with the keyword "{}" yielded the result:{}'
            .format(len(result['results']), result['search_keyword'],
                    result['num_results_for_kw']))
        if args.view:
            import webbrowser
            webbrowser.open(result['cache_file'])
        for link_title, link_snippet, link_url in result['results']:
            print('Link: {}'.format(urllib.parse.unquote(link_url.geturl())))
            if args.verbosity > 1:
                import textwrap
                print('Title: \n{}'.format(
                    textwrap.indent('\n'.join(textwrap.wrap(link_title, 50)),
                                    '\t')))
                print('Description: \n{}\n'.format(
                    textwrap.indent('\n'.join(textwrap.wrap(link_snippet, 70)),
                                    '\t')))
                print('*' * 70)
Esempio n. 49
0
def module_docstrings2rst(component_name, module_path, module_name,
                          sphinx_directory, swig_working_dir):
    """Import a module and create 'rst' (autodoc)
    file for each documented (docstrings) object.

    Parameters
    ----------

    component_name : string
         name of the current component (e.g. kernel)
    module_path : string
        current module path, relative to swig working dir
        (usually wrap/siconos), e.g. mechanics/collision for module bodies.
    module_name : string
         name of the module (e.g. sensor)
    sphinx_directory : string
         directory (absolute) where rst files will
         be written. 
    swig_working_dir : string
         directory (absolute) where python modules
         are generated using swig

    Notes:
       * module_path is required for module like sensor
         located in siconos/control directory, 
         to build module name like siconos.control.sensor
       * Usually : sphinx_directory 
         = binary_dir/docs/sphinx/reference/python/module_name
       * Results : 
          * pyclass*.rst or pyfunction*.rst in sphinx_directory
          * pyclass.rst to collect (docstree toc) all pyclass*
          * pyfunctions.rst to collect all pyfunc *
  
    """

    # Test case with submodules (e.g. sensor in control)
    if module_path == '.' or module_path == '':
        module_name = 'siconos.' + module_name
    else:
        module_path = module_path.replace(r'/', r'.')
        module_name = 'siconos.' + module_path + '.' + module_name

    comp = importlib.import_module(module_name)
    sphinx_directory = sphinx_directory + module_name.replace(r'.', '_')
    if not os.path.exists(sphinx_directory):
        os.makedirs(sphinx_directory)

    features_filename = comp.__file__.split(
        component_name)[0] + component_name + '.pickle'
    with open(features_filename, 'rb') as f:
        features = pickle.load(f)

    # We have to remove pyfiles each time the function is call
    # because of 'a' (append) in writing process
    pyfiles = glob.glob(os.path.join(sphinx_directory, '*pyfile.rst'))
    pyfiles += glob.glob(os.path.join(sphinx_directory, '*pyclass.rst'))
    for file in pyfiles:
        os.remove(file)

    class_files = []
    pyfunc_files = []
    objlist = [obj for obj in dir(comp) if not obj.startswith('_')]
    for obj in objlist:
        current = getattr(comp, obj)
        needs_doc = hasattr(current, '__doc__') and current.__doc__ is not None
        if needs_doc and len(current.__doc__.strip()) > 0:
            gen, kind, name = create_autodoc(current, module_name)
            if kind == 'pyclass':  # one rst file per class
                outputname = name + '_' + kind + '.rst'
                outputname = os.path.join(sphinx_directory, outputname)
                class_files.append(outputname)
            elif kind == 'pyfunction':  # one rst file for all functions from a given header
                if name in features:
                    featname = features[name]
                elif name.replace('_', '::', 1) in features:
                    # - when two classes wrapped with swig have
                    # the same method (same name),
                    # swig create class1_methodname and class2_methodname.
                    # while features name is class1::methodname.
                    # - the same kind of thing happens for static class methods.
                    # We have to take these into account ... and the fact
                    # that some methods names may contain '_'
                    # (maxreplace=1 in replace below)
                    featname = features[name.replace('_', '::', 1)]
                elif name.split('::')[-1] in features:
                    # another way for swig to deal with
                    # namespaces ...
                    featname = features[name.split('::')[-1]]

                else:
                    keys = list(features.keys())
                    for k in keys:
                        if k.count(name) > -1:
                            featname = features[k]
                            break
                    else:
                        raise Exception('Unknown feature name : ', name)
                    #featname = name
                outputname = os.path.basename(featname).split('.')[0]
                outputname = outputname + '_pyfile.rst'
                outputname = os.path.join(sphinx_directory, outputname)
                pyfunc_files.append(outputname)
            else:
                # pydata, processed later.
                continue
            with open(outputname, 'a+') as out:
                out.write(gen)
                out.write('\n')

    # -- Create rst files to collect list of classes and files (i.e. files just created above) --
    #class_files.sort()
    pyfunc_files = list(set(pyfunc_files))
    #pyfunc_files.sort()

    # Insert title (required to be taken into account in toctree ...)
    for fname in pyfunc_files:
        shortname = os.path.basename(fname).split('.')[0]
        label = '.. _' + shortname + ':\n\n'

        title = shortname.split('_pyfile')[0] + ' (functions)'
        lenname = len(title)
        title = label + title + '\n' + lenname * '-' + '\n\n'
        with open(fname, 'r+') as f:
            lines = f.read()
            f.seek(0, 0)
            f.write(title + lines)

    all_index_filename = os.path.join(swig_working_dir,
                                      component_name + '_index.pickle')
    with open(all_index_filename, 'rb') as f:
        all_index = pickle.load(f)

    allfiles = class_files + pyfunc_files
    allfiles.sort()
    submodule_name = module_name.split('.')[-1]
    outputname = os.path.join(sphinx_directory, 'autodoc_all.rst')
    title = module_name + '\n'
    title += len(title) * '=' + '\n\n'
    indent = 4 * ' '
    basename = '/reference/python/' + module_name.replace(r'.', '_')

    # A few lines to illustrate module usage
    header = '**Usage example** :\n\n.. code-block:: python\n\n'
    importname = 's' + module_name.split('.')[-1][0]
    code = 'import ' + module_name + ' as ' + importname
    code += '\n \nhelp(' + importname + '.SomeClass)\n\n'
    header += textwrap.indent(code, '    ')
    header += '**Classes and functions**\n\n'

    with open(outputname, 'wt') as out:
        out.write(title)
        out.write(header)
        #out.write('.. toctree::\n    :maxdepth: 2\n\n')
        gen = '* :doc:`Enums and constants <' + os.path.join(
            basename, 'autodoc_pydata') + '>`\n'
        for f in allfiles:
            name = os.path.basename(f).split('.')[0]
            text = ''
            if name.count('_pyclass') > 0:
                realname = name.split('_pyclass')[0]
                shorttitle = realname + ' (class) '
                #text = '* :doc:`' + shorttitle + '<' + basename + name + '>` : '
                text = '* :py:class:`' + module_name + '.' + realname + '` : '
                try:
                    text += all_index[realname] + '\n'
                except:
                    text += ' \n'
            elif name.count('_pyfile') > 0:
                realname = name.split('_pyfile')[0]
                shorttitle = realname + ' (functions) '
                text = '* :doc:`' + shorttitle + '<' + os.path.join(
                    basename, name) + '>` : '
                if realname + '.h' in all_index:
                    text += all_index[realname + '.h'] + ' \n'
                elif realname + '.hpp' in all_index:
                    text += all_index[realname + '.hpp'] + ' \n'
                else:
                    text += ' \n'
            else:
                shorttitle = ''

            gen += text
        # out.write(textwrap.indent(gen, indent))
        out.write(gen + '\n')

    # It might be necessary to parse some latex from doxygen and convert it to sphinx ...
    latex_dir = swig_working_dir + 'tmp_' + component_name
    replace_latex(outputname, latex_dir)

    # Process enums
    # Get saved enums for the current module
    outputname = os.path.join(sphinx_directory, 'autodoc_pydata.rst')
    title = module_name + ' constants (Python API)\n'
    title += len(title) * '-' + '\n\n'
    title += 'All the predefined global constants in ' + module_name
    title += '(generated from C++ enum, global variables, ...) \n\n'
    enumskeys = [k for k in features if k.find('pydata') > -1]
    enums = [features[key] for key in enumskeys]
    header = '**Usage** :\n\n.. code-block:: python\n\n'
    importname = 's' + module_name.split('.')[-1][0]
    code = 'import ' + module_name + ' as ' + importname
    code += '\n \nprint(' + importname + '.CONSTNAME)\n\n'
    header += textwrap.indent(code, '    ')
    title += header
    title += '\n-----\n\n**List and descriptions of available constants** :\n\n'

    with open(outputname, 'wt') as out:
        out.write(title)
        for key in enumskeys:
            enums = features[key]
            for ename in enums:
                # Document only data available in python API
                if hasattr(comp, ename):
                    # and only data with a description
                    if len(enums[ename][1].strip()) > 0:
                        gen = ''
                        gen += '.. _pydata_' + ename + ':\n\n'
                        gen += '.. py:data:: ' + ename + '\n\n'
                        if len(enums[ename][0]) > 0:
                            # Add initializer value if set
                            gen += '    {0} ({1})\n\n'.format(
                                enums[ename][1].strip(), enums[ename][0])
                        else:
                            gen += '    {0} \n\n'.format(
                                enums[ename][1].strip())
                        out.write(gen)
    # It might be necessary to parse some latex from doxygen and convert it to sphinx ...
    replace_latex(outputname, latex_dir)
Esempio n. 50
0
    module_name, func_name = func.split(":", 1)
    module = import_module(module_name)
    func = getattr(module, func_name)
    return func


def parse_static_item(static_item):
    assert "=" in static_item, "{!r} should have format 'path=directory'"
    return static_item.split("=", 1)


def main():
    sys.path.insert(0, os.getcwd())
    # Parse the args.
    kwargs = vars(parser.parse_args(sys.argv[1:]))
    application = import_func(kwargs.pop("application"))
    static = list(map(parse_static_item, kwargs.pop("static")))
    # Set up logging.
    verbosity = (kwargs.pop("verbose") - kwargs.pop("quiet")) * 10
    logging.basicConfig(level=max(logging.ERROR - verbosity, logging.DEBUG),
                        format="%(message)s")
    logging.getLogger("aiohttp").setLevel(
        max(logging.INFO - verbosity, logging.DEBUG))
    logger.setLevel(max(logging.INFO - verbosity, logging.DEBUG))
    # Serve!
    serve(application, static=static, **kwargs)


__doc__ = __doc__.format(help=textwrap.indent(parser.format_help(), "    "),
                         **HELP)
Esempio n. 51
0
def replace_latex(pyfile, latex_dir):
    """Post processing of latex forms in docstrings.
    
    Parameters
    ----------
    pyfile : string
       name (full path) of the python file to process
    latex_dir : string
       directory which contains pickle files with latex forms
       (result of do_latex or do_verbatim call in sicodoxy2swig)

    Usually : pyfile = some_component.py (e.g. numerics.py)
    and latex_dir = wrap/siconos/tmp_component_name.

    """
    # Parse latex_dir and get all pickle files
    formfiles = glob.glob(os.path.join(latex_dir, 'latex_*.pickle'))

    # Runner : use sed rather than python replace. But less portable (?)
    #runner = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'replace_latex.sh')

    # temp file for outputs.
    target = pyfile + '.copy'
    shutil.copyfile(pyfile, target)

    # Read input (.py)
    with open(pyfile, "r") as f:
        source_lines = f.readlines()

    rst = []
    # Parse and replace :
    # for each formula found in each pickle file,
    # replace FORMULA_Id with the proper string
    # in temp list.
    for fname in formfiles:
        with open(fname, 'rb') as f:
            latex_dict = pickle.load(f)
            for form in latex_dict:
                idf = 'FORMULA' + str(form) + '_'
                # we must \\dot in \rst doxygen
                # else there is a confusion with dot from graphviz.
                formula = latex_dict[form]["latex"].replace(r'\\', '\\\\')
                # escape \
                formula = latex_dict[form]["latex"].replace('\\', '\\\\')
                formula_type = latex_dict[form]["label"]  # inline or not
                #formula = ''.join(formula)
                for line in source_lines:
                    if formula_type == 'inline':
                        rst.append(line.replace(idf, formula))
                    else:
                        indent = len(line) - len(line.lstrip())
                        rst.append(
                            line.replace(
                                idf, textwrap.indent(formula, indent * ' ')))
                source_lines = list(rst)
                rst = []
                #cmd = [runner, idf, formula, source, target]
                #print(cmd)
                #subprocess.call(cmd)
                #shutil.copyfile(target, source)

    # Replace .py with new results.
    with open(target, 'w') as f:
        for line in source_lines:
            f.write(line)
    shutil.move(target, pyfile)
Esempio n. 52
0
 def as_text(self) -> str:
     return '\n'.join('* DistributionPoint:\n%s' % textwrap.indent(dp.as_text(), '  ')
                      for dp in self.value)
Esempio n. 53
0
    def query_iter(self,
                   ids=None,
                   patterns=None,
                   children=False,
                   parents=False,
                   objects_per_report=0):
        """
        Match and fetch objects from the database, in object number-limited
        chunks.

        Args:
            ids:                A dictionary of object list names, and lists
                                of IDs of objects to match. None means empty
                                dictionary.
            patterns:           A dictionary of object list names, and lists
                                of LIKE patterns, for IDs of objects to match.
                                None means empty dictionary.
            children:           True if children of matched objects should be
                                matched as well.
            parents:            True if parents of matched objects should be
                                matched as well.
            objects_per_report: An integer number of objects per each returned
                                report data, or zero for no limit.

        Returns:
            An iterator returning report JSON data adhering to the latest I/O
            schema version, each containing at most the specified number of
            objects.

        Raises:
            `IncompatibleSchema` if the dataset schema is incompatible with
            the latest I/O schema.
        """
        # Calm down, we'll get to it,
        # pylint: disable=too-many-locals,too-many-statements
        assert ids is None or isinstance(ids, dict)
        if ids is None:
            ids = dict()
        assert all(
            isinstance(k, str) and isinstance(v, list) and all(
                isinstance(e, str) for e in v) for k, v in ids.items())

        assert patterns is None or isinstance(patterns, dict)
        if patterns is None:
            patterns = dict()
        assert all(
            isinstance(k, str) and isinstance(v, list) and all(
                isinstance(e, str) for e in v) for k, v in patterns.items())

        assert isinstance(objects_per_report, int)
        assert objects_per_report >= 0

        major, minor = self.get_schema_version()
        if major != io.schema.LATEST.major:
            raise IncompatibleSchema(major, minor)

        # A dictionary of object list names and tuples containing a SELECT
        # statement and the list of its parameters, returning IDs of the
        # objects to fetch.
        obj_list_queries = {
            # JOIN selecting objects with IDs matching the patterns
            obj_list_name: [
                f"SELECT id FROM UNNEST(?) AS id\n" \
                f"UNION DISTINCT\n" \
                f"SELECT {obj_list_name}.id AS id " \
                f"FROM {obj_list_name} " \
                f"INNER JOIN UNNEST(?) AS id_pattern " \
                f"ON {obj_list_name}.id LIKE id_pattern\n",
                [
                    bigquery.ArrayQueryParameter(
                        None, "STRING", ids.get(obj_list_name, [])
                    ),
                    bigquery.ArrayQueryParameter(
                        None, "STRING", patterns.get(obj_list_name, [])
                    )
                ]
            ]
            for obj_list_name in io.schema.LATEST.tree if obj_list_name
        }

        # Add referenced parents if requested
        if parents:

            def add_parents(obj_list_name):
                """Add parent IDs to query results"""
                obj_name = obj_list_name[:-1]
                query = obj_list_queries[obj_list_name]
                for child_list_name in io.schema.LATEST.tree[obj_list_name]:
                    add_parents(child_list_name)
                    child_query = obj_list_queries[child_list_name]
                    query[0] += \
                        f"UNION DISTINCT\n" \
                        f"SELECT {child_list_name}.{obj_name}_id AS id " \
                        f"FROM {child_list_name} " + \
                        "INNER JOIN (\n" + \
                        textwrap.indent(child_query[0], " " * 4) + \
                        ") USING(id)\n"
                    query[1] += child_query[1]

            for obj_list_name in io.schema.LATEST.tree[""]:
                add_parents(obj_list_name)

        # Add referenced children if requested
        if children:

            def add_children(obj_list_name):
                """Add child IDs to query results"""
                obj_name = obj_list_name[:-1]
                query = obj_list_queries[obj_list_name]
                for child_list_name in io.schema.LATEST.tree[obj_list_name]:
                    child_query = obj_list_queries[child_list_name]
                    child_query[0] += \
                        f"UNION DISTINCT\n" \
                        f"SELECT {child_list_name}.id AS id " \
                        f"FROM {child_list_name} " + \
                        "INNER JOIN (\n" + \
                        textwrap.indent(query[0], " " * 4) + \
                        f") AS {obj_list_name} ON " \
                        f"{child_list_name}.{obj_name}_id = " \
                        f"{obj_list_name}.id\n"
                    child_query[1] += query[1]
                    add_children(child_list_name)

            for obj_list_name in io.schema.LATEST.tree[""]:
                add_children(obj_list_name)

        # Fetch the data
        obj_num = 0
        data = io.new()
        for obj_list_name, query in obj_list_queries.items():
            query_parameters = query[1]
            query_string = \
                f"SELECT * FROM {obj_list_name} INNER JOIN (\n" + \
                textwrap.indent(query[0], " " * 4) + \
                ") USING(id)\n"
            LOGGER.debug("Query string: %s", query_string)
            LOGGER.debug("Query params: %s", query_parameters)
            job_config = bigquery.job.QueryJobConfig(
                query_parameters=query_parameters,
                default_dataset=self.dataset_ref)
            query_job = self.client.query(query_string, job_config=job_config)
            obj_list = None
            for row in query_job:
                if obj_list is None:
                    obj_list = []
                    data[obj_list_name] = obj_list
                obj_list.append(Client._unpack_node(dict(row.items())))
                obj_num += 1
                if objects_per_report and obj_num >= objects_per_report:
                    assert io.schema.is_valid_latest(data)
                    yield data
                    obj_num = 0
                    data = io.new()
                    obj_list = None

        if obj_num:
            assert io.schema.is_valid_latest(data)
            yield data
Esempio n. 54
0
    c.KubeSpawner.init_containers.append(ip_block_container)

if get_config('debug.enabled', False):
    c.JupyterHub.log_level = 'DEBUG'
    c.Spawner.debug = True

extra_config = get_config('hub.extraConfig', {})
if isinstance(extra_config, str):
    from textwrap import indent, dedent
    msg = dedent("""
    hub.extraConfig should be a dict of strings,
    but found a single string instead.
    extraConfig as a single string is deprecated
    as of the jupyterhub chart version 0.6.
    The keys can be anything identifying the
    block of extra configuration.
    Try this instead:
        hub:
          extraConfig:
            myConfig: |
              {}
    This configuration will still be loaded,
    but you are encouraged to adopt the nested form
    which enables easier merging of multiple extra configurations.
    """)
    print(msg.format(indent(extra_config, ' ' * 10).lstrip()), file=sys.stderr)
    extra_config = {'deprecated string': extra_config}

for key, config_py in sorted(extra_config.items()):
    print("Loading extra config: %s" % key)
    exec(config_py)
Esempio n. 55
0
def _execute_nomad_job(job, previews, params, movie_file_path):
    import nomad
    import zlib
    import json

    preview_ids = [
        preview["id"] for preview in previews if preview["extension"] == "mp4"
    ]
    input_bytes = zlib.compress(bytes(json.dumps(preview_ids), "utf-8"))
    input_string = base64.b64encode(input_bytes).decode("ascii")
    bucket_prefix = config.FS_BUCKET_PREFIX
    params = {
        "version": "1",
        "bucket_prefix": bucket_prefix,
        "output_filename": Path(movie_file_path).name,
        "output_key": file_store.make_key("playlists", job["id"]),
        "input": input_string,
        "width": params.width,
        "height": params.height,
        "fps": params.fps,
        "FS_BACKEND": config.FS_BACKEND,
    }
    # Add object storage information
    if config.FS_BACKEND == "s3":
        params.update({
            "S3_ENDPOINT": config.FS_S3_ENDPOINT,
            "AWS_DEFAULT_REGION": config.FS_S3_REGION,
            "AWS_ACCESS_KEY_ID": config.FS_S3_ACCESS_KEY,
            "AWS_SECRET_ACCESS_KEY": config.FS_S3_SECRET_KEY,
        })
    elif config.FS_BACKEND == "swift":
        params.update({
            "OS_USERNAME": config.FS_SWIFT_USER,
            "OS_PASSWORD": config.FS_SWIFT_KEY,
            "OS_AUTH_URL": config.FS_SWIFT_AUTHURL,
            "OS_TENANT_NAME": config.FS_SWIFT_TENANT_NAME,
            "OS_REGION_NAME": config.FS_SWIFT_REGION_NAME,
        })

    # don't use 'app.config' because the webapp doesn't use this variable,
    # only the rq worker does.
    nomad_job = os.getenv("JOB_QUEUE_NOMAD_PLAYLIST_JOB", "zou-playlist")
    nomad_host = os.getenv("JOB_QUEUE_NOMAD_HOST", "zou-nomad-01.zou")
    data = json.dumps(params).encode("utf-8")
    payload = base64.b64encode(data).decode("utf-8")
    ncli = nomad.Nomad(host=nomad_host, timeout=5)

    response = ncli.job.dispatch_job(nomad_job, payload=payload)
    nomad_jobid = response["DispatchedJobID"]

    while True:
        summary = ncli.job.get_summary(nomad_jobid)
        task_group = list(summary["Summary"])[0]
        status = summary["Summary"][task_group]
        if status["Failed"] != 0 or status["Lost"] != 0:
            logger.debug("Nomad job %r failed: %r", nomad_jobid, status)
            out, err = _get_nomad_job_logs(ncli, nomad_jobid)
            out = textwrap.indent(out, "\t")
            err = textwrap.indent(err, "\t")
            raise Exception("Job %s is 'Failed' or 'Lost':\nStatus: "
                            "%s\nerr:\n%s\nout:\n%s" %
                            (nomad_jobid, status, err, out))
        if status["Complete"] == 1:
            logger.debug("Nomad job %r: complete", nomad_jobid)
            break
        # there isn't a timeout here but python rq jobs have a timeout. Nomad
        # jobs have a timeout too.
        time.sleep(1)

    # fetch movie from object storage
    with open(movie_file_path, "wb") as movie_file:
        for chunk in file_store.open_movie("playlists", job["id"]):
            movie_file.write(chunk)
dedented_text = textwrap.dedent(sample_text)
print("Dedented:")
print(dedented_text)

# 控制段落宽度
dedented_text = textwrap.dedent(sample_text).strip()
for width in [45, 60]:
    print("{} Columns:\n".format(width))
    print(textwrap.fill(dedented_text, width=width))  # 注意fill()有一个控制width的参数
    print()

# Indenting Blocks
dedented_text = textwrap.dedent(sample_text)
wrapped = textwrap.fill(dedented_text, width=50)
wrapped += "\n\nSecond paragraph after a blank line"
final = textwrap.indent(wrapped, "> ")  # indent这个method允许对目标使用一个字符作为前置,每行都用

print("Quoted block:\n")
print(final)

print()

# 对于一行中含有偶数字符的前方加"EVEN "
def should_indent(line):
    print("Indent {!r}?".format(line))
    return len(line.strip()) % 2 == 0


dedented_text = textwrap.dedent(sample_text)
wrapped = textwrap.fill(dedented_text, width=50)
final = textwrap.indent(wrapped, "EVEN ", predicate=should_indent)
Esempio n. 57
0
def indent4(a_string):
    return textwrap.indent(a_string, ' '*4)
Esempio n. 58
0
    result = run_script(script_name=script_key, config_pathname=config_file)

    # Inform the user when the script is finished, mostly for
    # completeness.
    core.error.ifas_info("FINISH! The script `{script}` using the "
                         "configuration file `{config}` has been completed."
                         .format(script=script_key, config=config_file))
    # All done.
    return result

if (__name__ == '__main__'):
    # Creating the argpaser. The textwrap is for formatting.
    parser = argparse.ArgumentParser(
        description=textwrap.indent(textwrap.dedent(
            '''
            Execute an IfA-Smeargle script function. Change the 
            script by name and the configuration parameters by the 
            configuration file."
            '''), prefix='  '),
        epilog=textwrap.indent(textwrap.dedent(
            '''
            The script to show all available scripts is:
                python -m ifa_smeargle script_special_list_scripts none
            '''), prefix='  '),
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # Adding the three required arguments for all pipelines. These 
    # are always constant requirements and thus are positional.
    parser.add_argument("script_key", 
                        help=("the name of the script function to be run"),
                        type=str)
    parser.add_argument("configuration_path", 
Esempio n. 59
0
"""
# TODO: remove this warning once Tui is delta-driven
# https://github.com/cylc/cylc-flow/issues/3527

from textwrap import indent

from urwid import html_fragment

from cylc.flow.option_parsers import CylcOptionParser as COP
from cylc.flow.terminal import cli_function
from cylc.flow.tui import (TUI)
from cylc.flow.tui.app import (TuiApp, TREE_EXPAND_DEPTH
                               # ^ a nasty solution
                               )

__doc__ += indent(TUI, '           ')


def get_option_parser():
    parser = COP(
        __doc__,
        argdoc=[('REG', 'Workflow name')],
        # auto_add=False,  NOTE: at present auto_add can not be turned off
        color=False)

    parser.add_option(
        '--display',
        help=('Specify the display technology to use.'
              ' "raw" for interactive in-terminal display.'
              ' "html" for non-interactive html output.'),
        action='store',
Esempio n. 60
0
def debug_print_env(path):
    import textwrap
    logger.info('PATH:\n{}'.format(textwrap.indent(path.replace(os.pathsep, '\n'), '    ')))