Example #1
0
def save_csv(result, option):

    def max_ccn(f):
        cyclomatic_complexities = list(func.cyclomatic_complexity for func in f.function_list)
        return 0 if (cyclomatic_complexities == []) else max(cyclomatic_complexities)

    result = filter(lambda f: f.function_list != [], result)
    total_files_count = len(result)
    nloc_unhealthy_files = filter(lambda f: f.nloc > option.length, result)
    fanout_unhealthy_files = filter(lambda f: len(f.dependency_list) > option.DEP, result)
    ccn_unhealthy_files = [ccn for ccn in map(max_ccn, result) if ccn > option.CCN]
    cpd_unhealthy_files = set([file['file'] for info in option.cpd_infos for file in info['files']])

    avg_nloc_serverity = sum(map(lambda f: f.nloc, nloc_unhealthy_files)) / (option.length * len(nloc_unhealthy_files)) if len(nloc_unhealthy_files) > 0 else 1
    avg_fanout_serverity = sum(map(lambda f: len(f.dependency_list), fanout_unhealthy_files)) / (option.DEP * len(fanout_unhealthy_files)) if len(fanout_unhealthy_files) > 0 else 1
    avg_ccn_serverity = sum(ccn_unhealthy_files) / (option.CCN * len(ccn_unhealthy_files)) if len(ccn_unhealthy_files) > 0 else 1
    avg_cpd_serverity = sum(map(lambda info: info['tokens'], option.cpd_infos)) / (option.cpd_tokens * len(option.cpd_infos)) if len(option.cpd_infos) > 0 else 1

    percent_nloc_warning = len(nloc_unhealthy_files)/ total_files_count
    percent_fanout_warning = len(fanout_unhealthy_files) / total_files_count
    percent_ccn_warning = len(ccn_unhealthy_files) / total_files_count
    percent_cpd_warning = len(cpd_unhealthy_files) / total_files_count

    # today = os.popen("git log -1 --pretty=format:'%ad' --date=short").read()
    today = datetime.date.today().__str__()
    try:
        with open(option.csv_file, 'r') as f:
            f_csv = csv.DictReader(f)
            rows = filter(lambda r: r['date'] != today, [row for row in f_csv])
    except IOError, e:
        rows = []
Example #2
0
def convert_node(fields, x, names={}, import_data=None):
    name = x.__class__.__name__
    conv = lambda x:convert_node(fields, x, names=names, import_data=import_data)
    if name == 'Str':
        return x.s.decode('utf-8') if isinstance(x.s, bytes) else x.s
    elif name == 'Num':
        return x.n
    elif name in {'Set', 'List', 'Tuple'}:
        func = {'Set':set, 'List':list, 'Tuple':tuple}[name]
        return func(map(conv, x.elts))
    elif name == 'Dict':
        keys, values = map(conv, x.keys), map(conv, x.values)
        return dict(zip(keys, values))
    elif name == 'Call':
        if len(x.args) != 1 and len(x.keywords) != 0:
            raise TypeError('Unsupported function call for fields: %s' % (fields,))
        return tuple(map(conv, x.args))[0]
    elif name == 'Name':
        if x.id not in names:
            if import_data is not None and x.id in import_data[0]:
                return get_import_data(x.id, import_data[0][x.id], *import_data[1:])
            raise ValueError('Could not find name %s for fields: %s' % (x.id, fields))
        return names[x.id]
    elif name == 'BinOp':
        if x.right.__class__.__name__ == 'Str':
            return x.right.s.decode('utf-8') if isinstance(x.right.s, bytes) else x.right.s
    raise TypeError('Unknown datatype %s for fields: %s' % (x, fields))
Example #3
0
def pad_larger(*arrays):
    """Pad the smallest of `n` arrays.

    Parameters
    ----------
    arrays : tuple of array_like

    Raises
    ------
    AssertionError

    Returns
    -------
    ret : tuple
        Tuple of zero padded arrays.
    """
    assert all(map(isinstance, arrays, itools.repeat(np.ndarray))), \
        ("all arguments must be instances of ndarray or implement the ndarray"
         " interface")
    if len(arrays) == 2:
        return pad_larger2(*arrays)

    sizes = np.fromiter(map(operator.attrgetter('size'), arrays), int)
    lsize = sizes.max()

    ret = []

    for array, size in zip(arrays, sizes):
        size_diff = lsize - size
        ret.append(np.pad(array, (0, size_diff), 'constant',
                          constant_values=(0,)))
    ret.append(lsize)

    return ret
Example #4
0
def initialize_constants():
    src = read_cal_file('constants.py')
    nv = re.search(r'numeric_version\s+=\s+\((\d+), (\d+), (\d+)\)', src)
    calibre_constants['version'] = '%s.%s.%s' % (nv.group(1), nv.group(2), nv.group(3))
    calibre_constants['appname'] = re.search(r'__appname__\s+=\s+(u{0,1})[\'"]([^\'"]+)[\'"]', src).group(2)
    epsrc = re.compile(r'entry_points = (\{.*?\})', re.DOTALL).search(read_cal_file('linux.py')).group(1)
    entry_points = eval(epsrc, {'__appname__': calibre_constants['appname']})

    def e2b(ep):
        return re.search(r'\s*(.*?)\s*=', ep).group(1).strip()

    def e2s(ep, base='src'):
        return (base + os.path.sep + re.search(r'.*=\s*(.*?):', ep).group(1).replace('.', '/') + '.py').strip()

    def e2m(ep):
        return re.search(r'.*=\s*(.*?)\s*:', ep).group(1).strip()

    def e2f(ep):
        return ep[ep.rindex(':') + 1:].strip()

    calibre_constants['basenames'] = basenames = {}
    calibre_constants['functions'] = functions = {}
    calibre_constants['modules'] = modules = {}
    calibre_constants['scripts'] = scripts = {}
    for x in ('console', 'gui'):
        y = x + '_scripts'
        basenames[x] = list(map(e2b, entry_points[y]))
        functions[x] = list(map(e2f, entry_points[y]))
        modules[x] = list(map(e2m, entry_points[y]))
        scripts[x] = list(map(e2s, entry_points[y]))

    src = read_cal_file('ebooks/__init__.py')
    be = re.search(r'^BOOK_EXTENSIONS\s*=\s*(\[.+?\])', src, flags=re.DOTALL | re.MULTILINE).group(1)
    calibre_constants['book_extensions'] = json.loads(be.replace("'", '"'))
Example #5
0
    def __init__(self, sfnt):
        self.sfnt = sfnt

        self.head = self.sfnt[b"head"]
        hhea = self.sfnt[b"hhea"]
        hhea.read_data(self.sfnt[b"hmtx"])
        self.ascent = hhea.ascender
        self.descent = hhea.descender
        self.bbox = (self.head.x_min, self.head.y_min, self.head.x_max, self.head.y_max)
        self._advance_widths = hhea.advance_widths
        self.cmap = self.sfnt[b"cmap"]
        self.units_per_em = self.head.units_per_em
        self.os2 = self.sfnt[b"OS/2"]
        self.os2.read_data()
        self.post = self.sfnt[b"post"]
        self.post.read_data()
        self.names = get_all_font_names(self.sfnt[b"name"].raw, raw_is_table=True)
        self.is_otf = "CFF " in self.sfnt.tables
        self._sig = hash(self.sfnt[b"name"].raw)

        # Metrics for embedding in PDF
        pdf_scale = self.pdf_scale = lambda x: int(round(x * 1000.0 / self.units_per_em))
        self.pdf_ascent, self.pdf_descent = map(pdf_scale, (self.os2.typo_ascender, self.os2.typo_descender))
        self.pdf_bbox = tuple(map(pdf_scale, self.bbox))
        self.pdf_capheight = pdf_scale(getattr(self.os2, "cap_height", self.os2.typo_ascender))
        self.pdf_avg_width = pdf_scale(self.os2.average_char_width)
        self.pdf_stemv = 50 + int((self.os2.weight_class / 65.0) ** 2)
Example #6
0
def convert_node(fields, x, names={}, import_data=None):
    name = x.__class__.__name__
    conv = lambda x: convert_node(fields, x, names=names, import_data=import_data)
    if name == "Str":
        return x.s.decode("utf-8") if isinstance(x.s, bytes) else x.s
    elif name == "Num":
        return x.n
    elif name in {"Set", "List", "Tuple"}:
        func = {"Set": set, "List": list, "Tuple": tuple}[name]
        return func(map(conv, x.elts))
    elif name == "Dict":
        keys, values = map(conv, x.keys), map(conv, x.values)
        return dict(zip(keys, values))
    elif name == "Call":
        if len(x.args) != 1 and len(x.keywords) != 0:
            raise TypeError("Unsupported function call for fields: %s" % (fields,))
        return tuple(map(conv, x.args))[0]
    elif name == "Name":
        if x.id not in names:
            if import_data is not None and x.id in import_data[0]:
                return get_import_data(x.id, import_data[0][x.id], *import_data[1:])
            raise ValueError("Could not find name %s for fields: %s" % (x.id, fields))
        return names[x.id]
    elif name == "BinOp":
        if x.right.__class__.__name__ == "Str":
            return x.right.s.decode("utf-8") if isinstance(x.right.s, bytes) else x.right.s
    raise TypeError("Unknown datatype %s for fields: %s" % (x, fields))
Example #7
0
    def __init__(self, sfnt):
        for table in (b'head', b'hhea', b'hmtx', b'cmap', b'OS/2', b'post',
                      b'name'):
            if table not in sfnt:
                raise UnsupportedFont('This font has no %s table'%table)
        self.sfnt = sfnt

        self.head = self.sfnt[b'head']
        hhea = self.sfnt[b'hhea']
        hhea.read_data(self.sfnt[b'hmtx'])
        self.ascent = hhea.ascender
        self.descent = hhea.descender
        self.bbox = (self.head.x_min, self.head.y_min, self.head.x_max,
                     self.head.y_max)
        self._advance_widths = hhea.advance_widths
        self.cmap = self.sfnt[b'cmap']
        self.units_per_em = self.head.units_per_em
        self.os2 = self.sfnt[b'OS/2']
        self.os2.read_data()
        self.post = self.sfnt[b'post']
        self.post.read_data()
        self.names = get_all_font_names(self.sfnt[b'name'].raw, raw_is_table=True)
        self.is_otf = 'CFF ' in self.sfnt.tables
        self._sig = hash(self.sfnt[b'name'].raw)

        # Metrics for embedding in PDF
        pdf_scale = self.pdf_scale = lambda x:int(round(x*1000./self.units_per_em))
        self.pdf_ascent, self.pdf_descent = map(pdf_scale,
                        (self.os2.typo_ascender, self.os2.typo_descender))
        self.pdf_bbox = tuple(map(pdf_scale, self.bbox))
        self.pdf_capheight = pdf_scale(getattr(self.os2, 'cap_height',
                                               self.os2.typo_ascender))
        self.pdf_avg_width = pdf_scale(self.os2.average_char_width)
        self.pdf_stemv = 50 + int((self.os2.weight_class / 65.0) ** 2)
Example #8
0
def plugin_to_index(plugin, count):
    title = '<h3><img src="/plugin-icon.png"><a href=%s title="Plugin forum thread">%s</a></h3>' % (  # noqa
        quoteattr(plugin["thread_url"]),
        escape(plugin["name"]),
    )
    released = datetime(*tuple(map(int, re.split(r"\D", plugin["last_modified"])))[:6]).strftime("%e %b, %Y").lstrip()
    details = [
        "Version: <b>%s</b>" % escape(".".join(map(str, plugin["version"]))),
        "Released: <b>%s</b>" % escape(released),
        "Author: %s" % escape(plugin["author"]),
        "History: %s" % escape("Yes" if plugin["history"] else "No"),
        "calibre: %s" % escape(".".join(map(str, plugin["minimum_calibre_version"]))),
        "Platforms: %s" % escape(", ".join(sorted(plugin["supported_platforms"]) or ["all"])),
    ]
    if plugin["uninstall"]:
        details.append("Uninstall: %s" % escape(", ".join(plugin["uninstall"])))
    if plugin["donate"]:
        details.append('<a href=%s title="Donate">Donate</a>' % quoteattr(plugin["donate"]))
    block = []
    for li in details:
        if li.startswith("calibre:"):
            block.append("<br>")
        block.append("<li>%s</li>" % li)
    block = "<ul>%s</ul>" % ("\n".join(block))
    downloads = ('\xa0<span class="download-count">[%d total downloads]</span>' % count) if count else ""
    zipfile = '<div class="end"><a href=%s title="Download plugin" download=%s>Download plugin \u2193</a>%s</div>' % (
        quoteattr(plugin["file"]),
        quoteattr(plugin["name"] + ".zip"),
        downloads,
    )
    desc = plugin["description"] or ""
    if desc:
        desc = "<p>%s</p>" % desc
    return "%s\n%s\n%s\n%s\n\n" % (title, desc, block, zipfile)
Example #9
0
def plugin_to_index(plugin):
    title = '<h3><img src="http://icons.iconarchive.com/icons/oxygen-icons.org/oxygen/32/Apps-preferences-plugin-icon.png"><a href=%s title="Plugin forum thread">%s</a></h3>' % (  # noqa
        quoteattr(plugin['thread_url']), escape(plugin['name']))
    released = datetime(*tuple(map(int, re.split(r'\D', plugin['last_modified'])))[:6]).strftime('%e %b, %Y').lstrip()
    details = [
        'Version: <b>%s</b>' % escape('.'.join(map(str, plugin['version']))),
        'Released: <b>%s</b>' % escape(released),
        'Author: %s' % escape(plugin['author']),
        'History: %s' % escape('Yes' if plugin['history'] else 'No'),
        'calibre: %s' % escape('.'.join(map(str, plugin['minimum_calibre_version']))),
        'Platforms: %s' % escape(', '.join(sorted(plugin['supported_platforms']) or ['all'])),
    ]
    if plugin['uninstall']:
        details.append('Uninstall: %s' % escape(', '.join(plugin['uninstall'])))
    if plugin['donate']:
        details.append('<a href=%s title="Donate">Donate</a>' % quoteattr(plugin['donate']))
    block = []
    for li in details:
        if li.startswith('calibre:'):
            block.append('<br>')
        block.append('<li>%s</li>' % li)
    block = '<ul>%s</ul>' % ('\n'.join(block))
    zipfile = '<div class="end"><a href=%s title="Download plugin" download=%s>Download plugin \u2193</a></div>' % (
        quoteattr(plugin['file']), quoteattr(plugin['name'] + '.zip'))
    desc = plugin['description'] or ''
    if desc:
        desc = '<p>%s</p>' % desc
    return '%s\n%s\n%s\n%s\n\n' % (title, desc, block, zipfile)
Example #10
0
    def test_collate(self):
        """Test collate_iters function"""
        indicies = [index(i) for i in [0, 1, 2, 3]]
        helper = lambda i: indicies[i]

        makeiter1 = lambda: iter(indicies)
        makeiter2 = lambda: map(helper, [0, 1, 3])
        makeiter3 = lambda: map(helper, [1, 2])

        outiter = patchdir.collate_iters([makeiter1(), makeiter2()])
        assert Iter.equal(outiter,
                          iter([(indicies[0], indicies[0]),
                                (indicies[1], indicies[1]),
                                (indicies[2], None),
                                (indicies[3], indicies[3])]))

        assert Iter.equal(patchdir.collate_iters([makeiter1(),
                                                 makeiter2(),
                                                 makeiter3()]),
                          iter([(indicies[0], indicies[0], None),
                                (indicies[1], indicies[1], indicies[1]),
                                (indicies[2], None, indicies[2]),
                                (indicies[3], indicies[3], None)]), 1)

        assert Iter.equal(patchdir.collate_iters([makeiter1(), iter([])]),
                          map(lambda i: (i, None), indicies))
        assert Iter.equal(map(lambda i: (i, None), indicies),
                          patchdir.collate_iters([makeiter1(), iter([])]))
Example #11
0
def unique_justseen(iterable, key=None):
    """List unique elements, preserving order. Remember only the element just \
            seen.

    Parameters
    ----------
    iterable : `collections.Iterable`
        Iterables to check.

    key : `collections.Callable`, `None`, optional
        If ``None`` the values are taken as they are. If it's a callable the
        callable is applied to the value before comparing it.
        Default is ``None``.

    Returns
    -------
    iterable : generator
        An iterable containing all unique values just seen in the iterable.

    Examples
    --------
    >>> from nddata.utils.itertools_recipes import unique_justseen
    >>> list(unique_justseen('AAAABBBCCDAABBB'))
    ['A', 'B', 'C', 'D', 'A', 'B']

    >>> list(unique_justseen('ABBCcAD', str.lower))
    ['A', 'B', 'C', 'A', 'D']
    """
    return map(next, map(itemgetter(1), groupby(iterable, key)))
Example #12
0
    def createMorphology(self, nmlcell, moosecell, symmetric=False):
        """Create the MOOSE compartmental morphology in `moosecell` using the
        segments in NeuroML2 cell `nmlcell`. Create symmetric
        compartments if `symmetric` is True.

        """
        morphology = nmlcell.morphology
        segments = morphology.segment
        id_to_segment = dict([(seg.id, seg) for seg in segments])    
        if symmetric:
            compclass = moose.SymCompartment
        else:
            compclass = moose.Compartment
        # segment names are used as compartment names - assuming
        # naming convention does not clash with that in MOOSE
        cellpath = moosecell.path
        id_to_comp = {}
        for seg in segments:
            if seg.name is not None:
                id_to_comp[seg.id] = compclass('%s/%s' % (cellpath, seg.name))
            else:
                id_to_comp[seg.id] = compclass('%s/comp_%s' % (cellpath, seg.id))
        # Now assign the positions and connect up axial resistance
        if not symmetric:
            src, dst = 'axial', 'raxial'
        else:
            src, dst = 'proximal', 'distal'
        for segid, comp in id_to_comp.items():
            segment = id_to_segment[segid]
            try:
                parent = id_to_segment[str(segment.parent.segment)]
            except AttributeError:
                parent = None
            self.moose_to_nml[comp] = segment
            self.nml_to_moose[segment] = comp            
            p0 = segment.proximal            
            if p0 is None:
                if parent:
                    p0 = parent.distal
                else:
                    raise Exception('No proximal point and no parent segment for segment: name=%s, id=%s' % (segment.name, segment.id))
            comp.x0, comp.y0, comp.z0 = (x * self.lunit for x in map(float, (p0.x, p0.y, p0.z)))
            p1 = segment.distal
            comp.x, comp.y, comp.z = (x * self.lunit for x in map(float, (p1.x, p1.y, p1.z)))
            comp.length = np.sqrt((comp.x - comp.x0)**2
                                  + (comp.y - comp.y0)**2
                                  + (comp.z - comp.z0)**2)
            # This can pose problem with moose where both ends of
            # compartment have same diameter. We are averaging the two
            # - may be splitting the compartment into two is better?
            comp.diameter = (float(p0.diameter)+float(p1.diameter)) * self.lunit / 2
            if parent:
                pcomp = id_to_comp[parent.id]
                moose.connect(comp, src, pcomp, dst)
        sg_to_segments = {}        
        for sg in morphology.segmentGroup:
            sg_to_segments[sg.id] = [id_to_segment[str(m.segment)] for m in sg.member]
        self._cell_to_sg[nmlcell] = sg_to_segments
        return id_to_comp, id_to_segment, sg_to_segments
Example #13
0
def _generate_constant_defs(macro_defs, macro_type_map):
    constants = '\n'.join(map(
        lambda (name, macro): '    constexpr {type} const {name} = {value};'.format(
            type=macro_type_map[name], name=name, value=list(macro.get_tokens())[1].spelling),
        filter(lambda (name, _): name in macro_type_map, macro_defs.items())))
    if 'OFP_NO_BUFFER' not in macro_defs:
        return '\n'.join([constants, '    constexpr std::uint32_t const OFP_NO_BUFFER = 0xffffffff;'])
    return constants
Example #14
0
    def __init__(self, items, level1=DEFAULT_LEVEL1, level2=DEFAULT_LEVEL2, level3=DEFAULT_LEVEL3):
        items = map(lambda x: normalize('NFC', unicode(x)), filter(None, items))
        items = tuple(map(lambda x: x.encode('utf-8'), items))
        sort_keys = tuple(map(primary_sort_key, items))

        speedup, err = plugins['matcher']
        if speedup is None:
            raise RuntimeError('Failed to load the matcher plugin with error: %s' % err)
        self.m = speedup.Matcher(items, sort_keys, level1.encode('utf-8'), level2.encode('utf-8'), level3.encode('utf-8'))
Example #15
0
def _generate_enum_decls(enum_decls):
    return '\n\n'.join(map(
            lambda (name, enum): (
"""\
    enum {name}
    {{
{members}
    }};\
""".format(name=name, members=_generate_enum_members(enum.get_children()))),
            enum_decls.items()))
Example #16
0
 def __call__(self, data):
     self.beginResetModel()
     self.files = data['files']
     self.total_size = sum(map(itemgetter(3), self.files))
     self.images_size = sum(map(itemgetter(3), (f for f in self.files if f.category == 'image')))
     self.fonts_size = sum(map(itemgetter(3), (f for f in self.files if f.category == 'font')))
     psk = numeric_sort_key
     self.sort_keys = tuple((psk(entry.dir), psk(entry.basename), entry.size, psk(self.CATEGORY_NAMES.get(entry.category, '')))
                            for entry in self.files)
     self.endResetModel()
Example #17
0
def stringtotime(timestring):
    """Return time in seconds from w3 or duplicity timestring

    If there is an error parsing the string, or it doesn't look
    like a valid datetime string, return None.
    """
    try:
        date, daytime = timestring[:19].split("T")
        if len(timestring) == 16:
            # new format for filename time
            year, month, day = map(int,
                                   [date[0:4], date[4:6], date[6:8]])
            hour, minute, second = map(int,
                                       [daytime[0:2], daytime[2:4], daytime[4:6]])
        else:
            # old format for filename time
            year, month, day = map(int, date.split("-"))
            hour, minute, second = map(int,
                                       daytime.split(globals.time_separator))
        assert 1900 < year < 2100, year
        assert 1 <= month <= 12
        assert 1 <= day <= 31
        assert 0 <= hour <= 23
        assert 0 <= minute <= 59
        assert 0 <= second <= 61  # leap seconds
        # We want to return the time in units of seconds since the
        # epoch. Unfortunately the only functin that does this
        # works in terms of the current timezone and we have a
        # timezone offset in the string.
        timetuple = (year, month, day, hour, minute, second, -1, -1, 0)

        if len(timestring) == 16:
            # as said in documentation, time.gmtime() and timegm() are each others' inverse.
            # As far as UTC format is used in new file format,
            # do not rely on system's python DST and tzdata settings
            # and use functions that working with UTC
            utc_in_secs = calendar.timegm(timetuple)
        else:
            # mktime assumed that the tuple was a local time. Compensate
            # by subtracting the value for the current timezone.
            # We don't need to worry about DST here because we turned it
            # off in the tuple
            local_in_secs = time.mktime(timetuple)
            utc_in_secs = local_in_secs - time.timezone
        # Now apply the offset that we were given in the time string
        # This gives the correct number of seconds from the epoch
        # even when we're not in the same timezone that wrote the
        # string
        if len(timestring) == 16:
            return int(utc_in_secs)
        else:
            return int(utc_in_secs + tzdtoseconds(timestring[19:]))
    except (TypeError, ValueError, AssertionError):
        return None
Example #18
0
 def apply_color_space(self, color, pattern, stroke=False):
     wl = self.current_page.write_line
     if color is not None and pattern is None:
         wl(' '.join(map(fmtnum, color)) + (' RG' if stroke else ' rg'))
     elif color is None and pattern is not None:
         wl('/Pattern %s /%s %s'%('CS' if stroke else 'cs', pattern,
                                  'SCN' if stroke else 'scn'))
     elif color is not None and pattern is not None:
         col = ' '.join(map(fmtnum, color))
         wl('/PCSp %s %s /%s %s'%('CS' if stroke else 'cs', col, pattern,
                                  'SCN' if stroke else 'scn'))
Example #19
0
 def __init__(self, qwe):
     self.qwe = qwe
     self.attributes = {unicode(x):unicode(qwe.attribute(x)) for x in
             qwe.attributeNames()}
     self.input_controls = list(map(Control, qwe.findAll('input')))
     rc = [y for y in self.input_controls if y.type == 'radio']
     self.input_controls = [ic for ic in self.input_controls if ic.type != 'radio']
     rc_names = {x.name for x in rc}
     self.radio_controls = {name:RadioControl(name, [z.qwe for z in rc if z.name == name]) for name in rc_names}
     selects = list(map(SelectControl, qwe.findAll('select')))
     self.select_controls = {x.name:x for x in selects}
Example #20
0
def _generate_adapt_structs(collector):
    return '\n\n'.join(map(
        lambda (name, struct): (
"""\
BOOST_FUSION_ADAPT_STRUCT(
    canard::network::openflow::v{version}::v{version}_detail::{name},
{member_ppseq}
)\
""".format(version=collector.version, name=name, member_ppseq=_generate_member_ppseq(
    collector.version, struct.members, is_skip_ofp_match=(collector.version == 13)))),
        collector.struct_decls.items()))
Example #21
0
def pyimplementation():
    if hasattr(_platform, 'python_implementation'):
        return _platform.python_implementation()
    elif sys.platform.startswith('java'):
        return 'Jython ' + sys.platform
    elif hasattr(sys, 'pypy_version_info'):
        v = '.'.join(map(str, sys.pypy_version_info[:3]))
        if sys.pypy_version_info[3:]:
            v += '-' + ''.join(map(str, sys.pypy_version_info[3:]))
        return 'PyPy ' + v
    else:
        return 'CPython'
Example #22
0
def pyimplementation():
    if hasattr(_platform, "python_implementation"):
        return _platform.python_implementation()
    elif sys.platform.startswith("java"):
        return "Jython " + sys.platform
    elif hasattr(sys, "pypy_version_info"):
        v = ".".join(map(str, sys.pypy_version_info[:3]))
        if sys.pypy_version_info[3:]:
            v += "-" + "".join(map(str, sys.pypy_version_info[3:]))
        return "PyPy " + v
    else:
        return "CPython"
Example #23
0
def get(ctx, rd, what, book_id, library_id):
    book_id, rest = book_id.partition('_')[::2]
    try:
        book_id = int(book_id)
    except Exception:
        raise HTTPNotFound('Book with id %r does not exist' % book_id)
    db = get_db(ctx, rd, library_id)
    if db is None:
        raise HTTPNotFound('Library %r not found' % library_id)
    with db.safe_read_lock:
        if not ctx.has_id(rd, db, book_id):
            raise BookNotFound(book_id, db)
        library_id = db.server_library_id  # in case library_id was None
        if what == 'thumb':
            sz = rd.query.get('sz')
            w, h = 60, 80
            if sz is None:
                if rest:
                    try:
                        w, h = map(int, rest.split('_'))
                    except Exception:
                        pass
            elif sz == 'full':
                w = h = None
            elif 'x' in sz:
                try:
                    w, h = map(int, sz.partition('x')[::2])
                except Exception:
                    pass
            else:
                try:
                    w = h = int(sz)
                except Exception:
                    pass
            return cover(ctx, rd, library_id, db, book_id, width=w, height=h)
        elif what == 'cover':
            return cover(ctx, rd, library_id, db, book_id)
        elif what == 'opf':
            mi = db.get_metadata(book_id, get_cover=False)
            rd.outheaders['Content-Type'] = 'application/oebps-package+xml; charset=UTF-8'
            rd.outheaders['Last-Modified'] = http_date(timestampfromdt(mi.last_modified))
            return metadata_to_opf(mi)
        elif what == 'json':
            from calibre.srv.ajax import book_to_json
            data, last_modified = book_to_json(ctx, rd, db, book_id)
            rd.outheaders['Last-Modified'] = http_date(timestampfromdt(last_modified))
            return json(ctx, rd, get, data)
        else:
            try:
                return book_fmt(ctx, rd, library_id, db, book_id, what.lower())
            except NoSuchFormat:
                raise HTTPNotFound('No %s format for the book %r' % (what.lower(), book_id))
Example #24
0
 def numbers(self, name, step=0, type=int, counts=False):
     """Generate decoded numeric term values, optionally with frequency counts.
     
     :param name: field name
     :param step: precision step to select terms
     :param type: int or float
     :param counts: include frequency counts
     """
     convert = util.NumericUtils.sortableLongToDouble if issubclass(type, float) else int
     termsenum = index.MultiFields.getTerms(self.indexReader, name).iterator(None)
     termsenum = search.PrefixTermsEnum(termsenum, util.BytesRef(chr(ord(" ") + step)))
     values = map(convert, map(util.NumericUtils.prefixCodedToLong, termsenum))
     return ((value, termsenum.docFreq()) for value in values) if counts else values
Example #25
0
def _generate_struct_decls(struct_decls, is_skip_ofp_match=False):
    return '\n\n'.join(map(
        lambda (name, struct): (
"""\
        struct {name}
        {{
{members}
        }};
        static_assert(sizeof({name}) == {size}, "");\
""".format(
    name=name,
    members=_generate_struct_members(struct.members, is_skip_ofp_match),
    size=_struct_size(struct, is_skip_ofp_match))),
        struct_decls.items()))
Example #26
0
 def testInterface(self):
     "Distributed reading and writing."
     resources = client.Resources(self.hosts, limit=1)
     assert resources.unicast('GET', '/')
     assert not resources.unicast('POST', '/terms')
     responses = resources.broadcast('GET', '/')
     assert len(responses) == len(resources)
     for response in responses:
         (directory, count), = response().items()
         assert count == 0 and directory.startswith('org.apache.lucene.store.RAMDirectory@')
     responses = resources.broadcast('PUT', '/fields/text')
     assert all(response() == {'index': 'ANALYZED', 'store': 'NO', 'termvector': 'NO'} for response in responses)
     responses = resources.broadcast('PUT', '/fields/name', {'store': 'yes', 'index': 'not_analyzed'})
     assert all(response() == {'index': 'NOT_ANALYZED', 'store': 'YES', 'termvector': 'NO'} for response in responses)
     doc = {'name': 'sample', 'text': 'hello world'}
     responses = resources.broadcast('POST', '/docs', [doc])
     assert all(response() is None for response in responses)
     response = resources.unicast('POST', '/docs', [doc])
     assert response() is None
     responses = resources.broadcast('POST', '/update')
     assert all(response() >= 1 for response in responses)
     responses = resources.broadcast('GET', '/search?q=text:hello')
     docs = []
     for response in responses:
         result = response()
         assert result['count'] >= 1
         docs += result['docs']
     assert len(docs) == len(resources) + 1
     assert len(set(doc['__id__'] for doc in docs)) == 2
     self.stop(self.servers.pop(0))
     self.assertRaises(socket.error, resources.broadcast, 'GET', '/')
     assert resources.unicast('GET', '/')()
     del resources[self.hosts[0]]
     assert all(resources.broadcast('GET', '/'))
     assert list(map(len, resources.values())) == [1, 1]
     time.sleep(self.config['server.socket_timeout'] + 1)
     assert resources.unicast('GET', '/')
     counts = list(map(len, resources.values()))
     assert set(counts) == set([0, 1])
     assert resources.broadcast('GET', '/')
     assert list(map(len, resources.values())) == counts[::-1]
     host = self.hosts[1]
     resource = resources.request(host, 'GET', '/')
     resource.getresponse = lambda: getresponse(socket.error)
     self.assertRaises(socket.error, resources.getresponse, host, resource)
     resource = resources.request(host, 'GET', '/')
     resource.getresponse = lambda: getresponse(httplib.BadStatusLine)
     assert resources.getresponse(host, resource) is None
     resources.clear()
     self.assertRaises(ValueError, resources.unicast, 'GET', '/')
Example #27
0
 def __init__(self, items, level1=DEFAULT_LEVEL1, level2=DEFAULT_LEVEL2, level3=DEFAULT_LEVEL3, scorer=None):
     with wlock:
         if not workers:
             requests, results = Queue(), Queue()
             w = [Worker(requests, results) for i in range(max(1, cpu_count()))]
             [x.start() for x in w]
             workers.extend(w)
     items = map(lambda x: normalize('NFC', unicode(x)), filter(None, items))
     self.items = items = tuple(items)
     tasks = split(items, len(workers))
     self.task_maps = [{j:i for j, (i, _) in enumerate(task)} for task in tasks]
     scorer = scorer or default_scorer
     self.scorers = [scorer(tuple(map(itemgetter(1), task_items))) for task_items in tasks]
     self.sort_keys = None
Example #28
0
    def dump(self, items, out_stream, pdf_metadata):
        opts = self.opts
        self.outline = Outline(self.toc, items)
        page_size = get_page_size(self.opts)
        xdpi, ydpi = self.view.logicalDpiX(), self.view.logicalDpiY()
        ml, mr = opts.margin_left, opts.margin_right
        margin_side = min(ml, mr)
        ml, mr = ml - margin_side, mr - margin_side
        self.doc = PdfDevice(out_stream, page_size=page_size, left_margin=ml,
                             top_margin=0, right_margin=mr, bottom_margin=0,
                             xdpi=xdpi, ydpi=ydpi, errors=self.log.error,
                             debug=self.log.debug, compress=not
                             opts.uncompressed_pdf)

        self.page.setViewportSize(QSize(self.doc.width(), self.doc.height()))
        self.render_queue = items
        self.total_items = len(items)

        # TODO: Test margins
        mt, mb = map(self.doc.to_px, (opts.margin_top, opts.margin_bottom))
        ms = self.doc.to_px(margin_side, vertical=False)
        self.margin_top, self.margin_size, self.margin_bottom = map(
            lambda x:int(floor(x)), (mt, ms, mb))

        self.painter = QPainter(self.doc)
        self.doc.set_metadata(title=pdf_metadata.title,
                              author=pdf_metadata.author,
                              tags=pdf_metadata.tags)
        self.painter.save()
        try:
            if self.cover_data is not None:
                p = QPixmap()
                p.loadFromData(self.cover_data)
                if not p.isNull():
                    draw_image_page(QRect(0, 0, self.doc.width(), self.doc.height()),
                            self.painter, p,
                            preserve_aspect_ratio=self.opts.preserve_cover_aspect_ratio)
                    self.doc.end_page()
        finally:
            self.painter.restore()

        QTimer.singleShot(0, self.render_book)
        self.loop.exec_()

        # TODO: Outline and links
        self.painter.end()

        if self.doc.errors_occurred:
            raise Exception('PDF Output failed, see log for details')
Example #29
0
def sort_q_values(header_val):
    'Get sorted items from an HTTP header of type: a;q=0.5, b;q=0.7...'
    if not header_val:
        return []
    def item(x):
        e, r = x.partition(';')[::2]
        p, v = r.partition('=')[::2]
        q = 1.0
        if p == 'q' and v:
            try:
                q = max(0.0, min(1.0, float(v.strip())))
            except Exception:
                pass
        return e.strip(), q
    return tuple(map(itemgetter(0), sorted(map(item, parse_http_list(header_val)), key=itemgetter(1), reverse=True)))
Example #30
0
 def refresh_ids(self, ids):
     self.cache.clear_caches(book_ids=ids)
     try:
         return list(map(self.id_to_index, ids))
     except ValueError:
         pass
     return None
Example #31
0
def ddg_search(terms,
               site=None,
               br=None,
               log=prints,
               safe_search=False,
               dump_raw=None,
               timeout=60):
    # https://duck.co/help/results/syntax
    terms = map(ddg_term, terms)
    terms = [quote_term(t) for t in terms]
    if site is not None:
        terms.append(quote_term(('site:' + site)))
    q = '+'.join(terms)
    url = 'https://duckduckgo.com/html/?q={q}&kp={kp}'.format(
        q=q, kp=1 if safe_search else -1)
    log('Making ddg query: ' + url)
    br = br or browser()
    root = query(br, url, 'ddg', dump_raw, timeout=timeout)
    ans = []
    for a in root.xpath(
            '//*[@class="results"]//*[@class="result__title"]/a[@href and @class="result__a"]'
    ):
        ans.append(Result(ddg_href(a.get('href')), tostring(a), None))
    return ans, url
Example #32
0
def find_identical_books(mi, data):
    author_map, aid_map, title_map, lang_map = data
    found_books = None
    for a in mi.authors:
        author_ids = author_map.get(icu_lower(a))
        if author_ids is None:
            return set()
        books_by_author = {
            book_id
            for aid in author_ids for book_id in aid_map.get(aid, ())
        }
        if found_books is None:
            found_books = books_by_author
        else:
            found_books &= books_by_author
        if not found_books:
            return set()

    ans = set()
    titleq = fuzzy_title(mi.title)
    for book_id in found_books:
        title = title_map.get(book_id, '')
        if fuzzy_title(title) == titleq:
            ans.add(book_id)

    langq = tuple(
        filter(lambda x: x and x != 'und',
               map(canonicalize_lang, mi.languages or ())))
    if not langq:
        return ans

    def lang_matches(book_id):
        book_langq = lang_map.get(book_id)
        return not book_langq or langq == book_langq

    return {book_id for book_id in ans if lang_matches(book_id)}
Example #33
0
def _in(d):
    ret = []
    neg = False
    for i in d:
        if i[0] == 'range':
            subs = map(unichr, range(i[1][0], i[1][1] + 1))
            if neg:
                for char in subs:
                    try:
                        ret.remove(char)
                    except:
                        pass
            else:
                ret.extend(subs)
        elif i[0] == 'literal':
            if neg:
                try:
                    ret.remove(unichr(i[1]))
                except:
                    pass
            else:
                ret.append(unichr(i[1]))
        elif i[0] == 'category':
            subs = CATEGORIES.get(i[1], [''])
            if neg:
                for char in subs:
                    try:
                        ret.remove(char)
                    except:
                        pass
            else:
                ret.extend(subs)
        elif i[0] == 'negate':
            ret = list(CATEGORIES['category_any'])
            neg = True
    return ret
Example #34
0
def _in(d):
    ret = []
    neg = False
    for i in d:
        if i[0] == sre_parse.RANGE:
            subs = map(unichr, range(i[1][0], i[1][1] + 1))
            if neg:
                for char in subs:
                    try:
                        ret.remove(char)
                    except:
                        pass
            else:
                ret.extend(subs)
        elif i[0] == sre_parse.LITERAL:
            if neg:
                try:
                    ret.remove(unichr(i[1]))
                except:
                    pass
            else:
                ret.append(unichr(i[1]))
        elif i[0] == sre_parse.CATEGORY:
            subs = CATEGORIES.get(i[1], [''])
            if neg:
                for char in subs:
                    try:
                        ret.remove(char)
                    except:
                        pass
            else:
                ret.extend(subs)
        elif i[0] == sre_parse.NEGATE:
            ret = list(CATEGORIES['category_any'])
            neg = True
    return ret
Example #35
0
    def write_widths(self, objects):
        glyphs = sorted(self.used_glyphs | {0})
        widths = {
            g: self.metrics.pdf_scale(w)
            for g, w in zip(glyphs, self.metrics.glyph_widths(glyphs))
        }
        counter = Counter()
        for g, w in widths.items():
            counter[w] += 1
        most_common = counter.most_common(1)[0][0]
        self.descendant_font['DW'] = most_common
        widths = {g: w for g, w in widths.items() if w != most_common}

        groups = Array()
        for k, g in groupby(enumerate(widths.keys()),
                            lambda i_x: i_x[0] - i_x[1]):
            group = list(map(itemgetter(1), g))
            gwidths = [widths[g] for g in group]
            if len(set(gwidths)) == 1 and len(group) > 1:
                w = (min(group), max(group), gwidths[0])
            else:
                w = (min(group), Array(gwidths))
            groups.extend(w)
        self.descendant_font['W'] = objects.add(groups)
Example #36
0
def main():
    t = int(input())
    for _ in range(t):
        n = int(input())
        a = list(map(int, input().split()))

        l = []
        for i in range(n):
            if (a[i] == 1 and (len(l) == 0 or l[-1] != a[i])):
                l.append(a[i])
            else:
                l.append(a[i])

        for i in range(len(l)):
            if (l[i] == 1):
                oi = i
                break
        xi = oi
        ans = 0
        for i in range(oi + 1, len(l)):
            if (a[i] == 1):
                ans += (i - xi - 1)
                xi = i
        print(ans)
Example #37
0
 def select_rows(self,
                 identifiers,
                 using_ids=True,
                 change_current=True,
                 scroll=True):
     '''
     Select rows identified by identifiers. identifiers can be a set of ids,
     row numbers or QModelIndexes.
     '''
     rows = set([x.row() if hasattr(x, 'row') else x for x in identifiers])
     if using_ids:
         rows = set([])
         identifiers = set(identifiers)
         m = self.model()
         for row in xrange(m.rowCount(QModelIndex())):
             if m.id(row) in identifiers:
                 rows.add(row)
     rows = list(sorted(rows))
     if rows:
         row = rows[0]
         if change_current:
             self.set_current_row(row, select=False)
         if scroll:
             self.scroll_to_row(row)
     sm = self.selectionModel()
     sel = QItemSelection()
     m = self.model()
     max_col = m.columnCount(QModelIndex()) - 1
     # Create a range based selector for each set of contiguous rows
     # as supplying selectors for each individual row causes very poor
     # performance if a large number of rows has to be selected.
     for k, g in itertools.groupby(enumerate(rows), lambda (i, x): i - x):
         group = list(map(operator.itemgetter(1), g))
         sel.merge(
             QItemSelection(m.index(min(group), 0),
                            m.index(max(group), max_col)), sm.Select)
Example #38
0
 def parse_part():
     line = buf.readline()
     if not line:
         raise ValueError('Premature end of message')
     if not line.startswith(b'--' + sep):
         raise ValueError('Malformed start of multipart message: %s' % reprlib.repr(line))
     if line.endswith(b'--'):
         return None
     headers = read_headers(buf.readline)
     cr = headers.get('Content-Range')
     if not cr:
         raise ValueError('Missing Content-Range header in sub-part')
     if not cr.startswith('bytes '):
         raise ValueError('Malformed Content-Range header in sub-part, no prefix')
     try:
         start, stop = map(lambda x: int(x.strip()), cr.partition(' ')[-1].partition('/')[0].partition('-')[::2])
     except Exception:
         raise ValueError('Malformed Content-Range header in sub-part, failed to parse byte range')
     content_length = stop - start + 1
     ret = buf.read(content_length)
     if len(ret) != content_length:
         raise ValueError('Malformed sub-part, length of body not equal to length specified in Content-Range')
     buf.readline()
     return (start, ret)
Example #39
0
def extract_Pframe_all(tI, hIlm, listmodes):
    # Build precessing-frame waveform - uses GWFrames
    T_data = tI
    LM_data = np.array(list(map(list, listmodes)), dtype=np.int32)
    mode_data = np.array([hIlm[lm] for lm in listmodes])
    W_v3 = GWFrames.Waveform(T_data, LM_data, mode_data)
    W_v3.SetFrameType(1);
    W_v3.SetDataType(1);
    W_v3.TransformToCoprecessingFrame();

    # Time series for the dominant radiation vector
    quat = W_v3.Frame()
    logquat = np.array(list(map(lambda q: q.log(), quat)))
    quatseries = np.array(list(map(lambda q: np.array([q[0], q[1], q[2], q[3]]), quat)))
    logquatseries = np.array(list(map(lambda q: np.array([q[0], q[1], q[2], q[3]]), logquat)))
    Vfseries = np.array(list(map(Vf_from_quat, quat)))
    eulerVfseries = np.array(list(map(euler_from_quat, quat)))
    alphaVfseries = np.unwrap(eulerVfseries[:,0])
    betaVfseries = eulerVfseries[:,1]
    gammaVfseries = np.unwrap(eulerVfseries[:,2])
    eulerVfseries = np.array([alphaVfseries, betaVfseries, gammaVfseries]).T
    return [eulerVfseries, quatseries, logquatseries, Vfseries]
Example #40
0
def hist2d(x,
           y,
           bins=20,
           range=None,
           weights=None,
           levels=None,
           smooth=None,
           ax=None,
           color=None,
           plot_datapoints=True,
           plot_density=True,
           plot_contours=True,
           no_fill_contours=False,
           fill_contours=False,
           contour_kwargs=None,
           contourf_kwargs=None,
           data_kwargs=None,
           **kwargs):
    """
    Plot a 2-D histogram of samples.

    Parameters
    ----------
    x : array_like[nsamples,]
       The samples.

    y : array_like[nsamples,]
       The samples.

    levels : array_like
        The contour levels to draw.

    ax : matplotlib.Axes
        A axes instance on which to add the 2-D histogram.

    plot_datapoints : bool
        Draw the individual data points.

    plot_density : bool
        Draw the density colormap.

    plot_contours : bool
        Draw the contours.

    no_fill_contours : bool
        Add no filling at all to the contours (unlike setting
        ``fill_contours=False``, which still adds a white fill at the densest
        points).

    fill_contours : bool
        Fill the contours.

    contour_kwargs : dict
        Any additional keyword arguments to pass to the `contour` method.

    contourf_kwargs : dict
        Any additional keyword arguments to pass to the `contourf` method.

    data_kwargs : dict
        Any additional keyword arguments to pass to the `plot` method when
        adding the individual data points.

    """
    if ax is None:
        ax = plt.gca()

    # Set the default range based on the data range if not provided.
    if range is None:
        if "extent" in kwargs:
            logging.warn("Deprecated keyword argument 'extent'. "
                         "Use 'range' instead.")
            range = kwargs["extent"]
        else:
            range = [[x.min(), x.max()], [y.min(), y.max()]]

    # Set up the default plotting arguments.
    if color is None:
        color = "k"

    # Choose the default "sigma" contour levels.
    if levels is None:
        levels = 1.0 - np.exp(-0.5 * np.arange(0.5, 2.1, 0.5)**2)

    # This is the color map for the density plot, over-plotted to indicate the
    # density of the points near the center.
    density_cmap = LinearSegmentedColormap.from_list("density_cmap",
                                                     [color, (1, 1, 1, 0)])

    # This color map is used to hide the points at the high density areas.
    white_cmap = LinearSegmentedColormap.from_list("white_cmap", [(1, 1, 1),
                                                                  (1, 1, 1)],
                                                   N=2)

    # This "color map" is the list of colors for the contour levels if the
    # contours are filled.
    rgba_color = colorConverter.to_rgba(color)
    contour_cmap = [list(rgba_color) for l in levels] + [rgba_color]
    for i, l in enumerate(levels):
        contour_cmap[i][-1] *= float(i) / (len(levels) + 1)

    # We'll make the 2D histogram to directly estimate the density.
    try:
        H, X, Y = np.histogram2d(x.flatten(),
                                 y.flatten(),
                                 bins=bins,
                                 range=list(map(np.sort, range)),
                                 weights=weights)
    except ValueError:
        raise ValueError("It looks like at least one of your sample columns "
                         "have no dynamic range. You could try using the "
                         "'range' argument.")

    if smooth is not None:
        if gaussian_filter is None:
            raise ImportError("Please install scipy for smoothing")
        H = gaussian_filter(H, smooth)

    # Compute the density levels.
    Hflat = H.flatten()
    inds = np.argsort(Hflat)[::-1]
    Hflat = Hflat[inds]
    sm = np.cumsum(Hflat)
    sm /= sm[-1]
    V = np.empty(len(levels))
    for i, v0 in enumerate(levels):
        try:
            V[i] = Hflat[sm <= v0][-1]
        except:
            V[i] = Hflat[0]
    V.sort()
    m = np.diff(V) == 0
    if np.any(m):
        logging.warning("Too few points to create valid contours")
    while np.any(m):
        V[np.where(m)[0][0]] *= 1.0 - 1e-4
        m = np.diff(V) == 0
    V.sort()

    # Compute the bin centers.
    X1, Y1 = 0.5 * (X[1:] + X[:-1]), 0.5 * (Y[1:] + Y[:-1])

    # Extend the array for the sake of the contours at the plot edges.
    H2 = H.min() + np.zeros((H.shape[0] + 4, H.shape[1] + 4))
    H2[2:-2, 2:-2] = H
    H2[2:-2, 1] = H[:, 0]
    H2[2:-2, -2] = H[:, -1]
    H2[1, 2:-2] = H[0]
    H2[-2, 2:-2] = H[-1]
    H2[1, 1] = H[0, 0]
    H2[1, -2] = H[0, -1]
    H2[-2, 1] = H[-1, 0]
    H2[-2, -2] = H[-1, -1]
    X2 = np.concatenate([
        X1[0] + np.array([-2, -1]) * np.diff(X1[:2]),
        X1,
        X1[-1] + np.array([1, 2]) * np.diff(X1[-2:]),
    ])
    Y2 = np.concatenate([
        Y1[0] + np.array([-2, -1]) * np.diff(Y1[:2]),
        Y1,
        Y1[-1] + np.array([1, 2]) * np.diff(Y1[-2:]),
    ])

    if plot_datapoints:
        if data_kwargs is None:
            data_kwargs = dict()
        data_kwargs["color"] = data_kwargs.get("color", color)
        data_kwargs["ms"] = data_kwargs.get("ms", 2.0)
        data_kwargs["mec"] = data_kwargs.get("mec", "none")
        data_kwargs["alpha"] = data_kwargs.get("alpha", 0.1)
        ax.plot(x, y, "o", zorder=-1, rasterized=True, **data_kwargs)

    # Plot the base fill to hide the densest data points.
    if (plot_contours or plot_density) and not no_fill_contours:
        ax.contourf(X2,
                    Y2,
                    H2.T, [V.min(), H.max()],
                    cmap=white_cmap,
                    antialiased=False)

    if plot_contours and fill_contours:
        if contourf_kwargs is None:
            contourf_kwargs = dict()
        contourf_kwargs["colors"] = contourf_kwargs.get("colors", contour_cmap)
        contourf_kwargs["antialiased"] = contourf_kwargs.get(
            "antialiased", False)
        ax.contourf(X2, Y2, H2.T,
                    np.concatenate([[0], V, [H.max() * (1 + 1e-4)]]),
                    **contourf_kwargs)

    # Plot the density map. This can't be plotted at the same time as the
    # contour fills.
    elif plot_density:
        ax.pcolor(X, Y, H.max() - H.T, cmap=density_cmap)

    # Plot the contour edge colors.
    if plot_contours:
        if contour_kwargs is None:
            contour_kwargs = dict()
        contour_kwargs["colors"] = contour_kwargs.get("colors", color)
        ax.contour(X2, Y2, H2.T, V, **contour_kwargs)

    ax.set_xlim(range[0])
    ax.set_ylim(range[1])
Example #41
0
 def process_node(node):
     name = node['dest']
     if name and node['id'] not in seen_map[name]:
         ans[name].append({'id': node['id'], 'frag': node['frag']})
         seen_map[name].add(node['id'])
     tuple(map(process_node, node['children']))
Example #42
0
 def display(self):
     """Returns a :func:`list` of screen lines as unicode strings."""
     return [
         "".join(map(operator.attrgetter("data"), line))
         for line in self.buffer
     ]
Example #43
0
def corner(xs,
           bins=20,
           params_range=None,
           weights=None,
           cov=None,
           color="k",
           smooth=None,
           smooth1d=None,
           labels=None,
           label_kwargs=None,
           show_histograms=True,
           show_titles=False,
           title_fmt=".2f",
           title_kwargs=None,
           truths=None,
           add_truths=None,
           truth_color="#4682b4",
           add_truth_colors=None,
           cov_color=None,
           scale_hist=False,
           quantiles=None,
           show_quantiles=True,
           verbose=False,
           figaxes=None,
           max_n_ticks=5,
           top_ticks=False,
           use_math_text=False,
           hist_kwargs=None,
           **hist2d_kwargs):
    """
    Make a *sick* corner plot showing the projections of a data set in a
    multi-dimensional space. kwargs are passed to hist2d() or used for
    `matplotlib` styling.

    Parameters
    ----------
    xs : array_like[nsamples, ndim]
        The samples. This should be a 1- or 2-dimensional array. For a 1-D
        array this results in a simple histogram. For a 2-D array, the zeroth
        axis is the list of samples and the next axis are the dimensions of
        the space.

    bins : int or array_like[ndim,]
        The number of bins to use in histograms, either as a fixed value for
        all dimensions, or as a list of integers for each dimension.

    weights : array_like[nsamples,]
        The weight of each sample. If `None` (default), samples are given
        equal weight.

    color : str
        A ``matplotlib`` style color for all histograms.

    smooth, smooth1d : float
       The standard deviation for Gaussian kernel passed to
       `scipy.ndimage.gaussian_filter` to smooth the 2-D and 1-D histograms
       respectively. If `None` (default), no smoothing is applied.

    labels : iterable (ndim,)
        A list of names for the dimensions. If a ``xs`` is a
        ``pandas.DataFrame``, labels will default to column names.

    label_kwargs : dict
        Any extra keyword arguments to send to the `set_xlabel` and
        `set_ylabel` methods.

    show_histograms : bool
        Displays 1-D histogram showing marginalized distributions and quantiles.

    show_titles : bool
        Displays a title above each 1-D histogram showing the 0.5 quantile
        with the upper and lower errors supplied by the quantiles argument.

    title_fmt : string
        The format string for the quantiles given in titles. If you explicitly
        set ``show_titles=True`` and ``title_fmt=None``, the labels will be
        shown as the titles. (default: ``.2f``)

    title_kwargs : dict
        Any extra keyword arguments to send to the `set_title` command.

    params_range : iterable (ndim,)
        A list where each element is either a length 2 tuple containing
        lower and upper bounds or a float in range (0., 1.)
        giving the fraction of samples to include in bounds, e.g.,
        [(0.,10.), (1.,5), 0.999, etc.].
        If a fraction, the bounds are chosen to be equal-tailed.

    truths : iterable (ndim,)
        A list of reference values to indicate on the plots.  Individual
        values can be omitted by using ``None``.

    truth_color : str
        A ``matplotlib`` style color for the ``truths`` makers.

    scale_hist : bool
        Should the 1-D histograms be scaled in such a way that the zero line
        is visible?

    quantiles : iterable
        A list of fractional quantiles to show on the 1-D histograms as
        vertical dashed lines.

    verbose : bool
        If true, print the values of the computed quantiles.

    plot_contours : bool
        Draw contours for dense regions of the plot.

    use_math_text : bool
        If true, then axis tick labels for very large or small exponents will
        be displayed as powers of 10 rather than using `e`.

    max_n_ticks: int
        Maximum number of ticks to try to use

    top_ticks : bool
        If true, label the top ticks of each axis

    fig : matplotlib.Figure
        Overplot onto the provided figure object.

    hist_kwargs : dict
        Any extra keyword arguments to send to the 1-D histogram plots.

    **hist2d_kwargs
        Any remaining keyword arguments are sent to `corner.hist2d` to generate
        the 2-D histogram plots.

    """
    if quantiles is None:
        quantiles = []
    if title_kwargs is None:
        title_kwargs = dict()
    if label_kwargs is None:
        label_kwargs = dict()

    # Try filling in labels from pandas.DataFrame columns.
    if labels is None:
        try:
            labels = xs.columns
        except AttributeError:
            pass

    # Deal with 1D sample lists.
    xs = np.atleast_1d(xs)
    if len(xs.shape) == 1:
        xs = np.atleast_2d(xs)
    else:
        assert len(xs.shape) == 2, "The input sample array must be 1- or 2-D."
        xs = xs.T
    assert xs.shape[0] <= xs.shape[1], "I don't believe that you want more " \
                                       "dimensions than samples!"

    # Parse the weight array.
    if weights is not None:
        weights = np.asarray(weights)
        if weights.ndim != 1:
            raise ValueError("Weights must be 1-D")
        if xs.shape[1] != weights.shape[0]:
            raise ValueError("Lengths of weights must match number of samples")

    # Parse the parameter ranges.
    if params_range is None:
        if "extents" in hist2d_kwargs:
            logging.warn("Deprecated keyword argument 'extents'. "
                         "Use 'params_range' instead.")
            params_range = hist2d_kwargs.pop("extents")
        else:
            params_range = [[x.min(), x.max()] for x in xs]
            # Check for parameters that never change.
            m = np.array([e[0] == e[1] for e in params_range], dtype=bool)
            if np.any(m):
                raise ValueError(
                    ("It looks like the parameter(s) in "
                     "column(s) {0} have no dynamic range. "
                     "Please provide a `params_range` argument.").format(
                         ", ".join(map("{0}".format,
                                       np.arange(len(m))[m]))))

    else:
        # If any of the extents are percentiles, convert them to ranges.
        # Also make sure it's a normal list.
        params_range = list(params_range)
        for i, pr in enumerate(params_range):
            if pr is None:
                params_range[i] = [xs[i].min(), xs[i].max()]
            else:
                try:
                    emin, emax = params_range[i]
                except TypeError:
                    q = [
                        0.5 - 0.5 * params_range[i],
                        0.5 + 0.5 * params_range[i]
                    ]
                    params_range[i] = quantile(xs[i], q, weights=weights)

    if len(params_range) != xs.shape[0]:
        raise ValueError("Dimension mismatch between samples and params_range")

    # Parse the bin specifications.
    try:
        bins = [int(bins) for _ in params_range]
    except TypeError:
        if len(bins) != len(params_range):
            raise ValueError(
                "Dimension mismatch between bins and params_range")

    # Some magic numbers for pretty axis layout.
    K = len(xs)
    if show_histograms:  # total number of plots: grid nplot*nplot
        nplot = K
    else:
        nplot = K - 1
    factor = 2.0  # size of one side of one panel
    lbdim = 0.5 * factor  # size of left/bottom margin
    trdim = 0.2 * factor  # size of top/right margin
    whspace = 0.05  # w/hspace size
    plotdim = factor * nplot + factor * (nplot - 1.) * whspace
    dim = lbdim + plotdim + trdim

    # Create a new figure if one wasn't provided.
    # if fig is None:
    #     fig, axes = plt.subplots(nplot, nplot, figsize=(dim, dim))
    # else:
    #     print(type(fig.axes))
    #     try:
    #         axes = np.array(fig.axes).reshape((nplot, nplot))
    #     except:
    #         raise ValueError("Provided figure has {0} axes, but data has "
    #                          "dimensions K={1}".format(len(fig.axes), nplot))
    if figaxes is None:
        fig, axes = plt.subplots(nplot, nplot, figsize=(dim, dim))
    else:
        fig, axes = figaxes

    #idea is to pass in covariance, otherwise concoct something from the 1-sigma range.
    # if(cov==None):
    #     print("concocting covar elements from 1-sigma ranges")
    #     cov=np.zeros((K,K))
    #     for k in np.arange(K):
    #         q_16, q_50, q_84 = quantile(xs[k], [0.16, 0.5, 0.84],weights=weights)
    #         deltax=(q_84-q_16)/2.0
    #         cov[k,k]=deltax**2
    # print("cov=",cov)

    # Format the figure.
    lb = lbdim / dim
    tr = (lbdim + plotdim) / dim
    fig.subplots_adjust(left=lb,
                        bottom=lb,
                        right=tr,
                        top=tr,
                        wspace=whspace,
                        hspace=whspace)

    # If asked to show the 1D histograms
    if show_histograms:
        # Set up the default histogram keywords.
        if hist_kwargs is None:
            hist_kwargs = dict()
        hist_kwargs["color"] = hist_kwargs.get("color", color)
        if smooth1d is None:
            hist_kwargs["histtype"] = hist_kwargs.get("histtype", "step")
        for i, x in enumerate(xs):
            # Deal with masked arrays.
            if hasattr(x, "compressed"):
                x = x.compressed()

            if np.shape(xs)[0] == 1:
                ax = axes
            else:
                ax = axes[i, i]
            # Plot the histograms.
            if smooth1d is None:
                n, _, _ = ax.hist(x,
                                  bins=bins[i],
                                  weights=weights,
                                  density=True,
                                  range=np.sort(params_range[i]),
                                  **hist_kwargs)
            else:
                if gaussian_filter is None:
                    raise ImportError("Please install scipy for smoothing")
                n, b = np.histogram(x,
                                    bins=bins[i],
                                    weights=weights,
                                    density=True,
                                    range=np.sort(params_range[i]))
                n = gaussian_filter(n, smooth1d)
                x0 = np.array(list(zip(b[:-1], b[1:]))).flatten()
                y0 = np.array(list(zip(n, n))).flatten()
                ax.plot(x0, y0, **hist_kwargs)

            if truths is not None and truths[i] is not None:
                ax.axvline(truths[i], color=truth_color)

            if add_truths is not None:
                for add_truth, add_truth_color in zip(add_truths,
                                                      add_truth_colors):
                    if add_truth[i] is not None:
                        ax.axvline(add_truth[i], color=add_truth_color)

            # Plot quantiles if wanted.
            if show_quantiles:
                if len(quantiles) > 0:
                    qvalues = quantile(x, quantiles, weights=weights)
                    for q in qvalues:
                        ax.axvline(q, ls="dashed", color=color)

                    if verbose:
                        print("Quantiles:")
                        for item in zip(quantiles, qvalues):
                            print(item)

            if show_titles:
                title = None
                if title_fmt is not None:
                    # Compute the quantiles for the title. This might redo
                    # unneeded computation but who cares.
                    q_16, q_50, q_84 = quantile(x, [0.16, 0.5, 0.84],
                                                weights=weights)
                    q_m, q_p = q_50 - q_16, q_84 - q_50

                    # Format the quantile display.
                    fmt = "{{0:{0}}}".format(title_fmt).format
                    title = r"${{{0}}}_{{-{1}}}^{{+{2}}}$"
                    title = title.format(fmt(q_50), fmt(q_m), fmt(q_p))

                    # Add in the column name if it's given.
                    if labels is not None:
                        title = "{0} = {1}".format(labels[i], title)

                elif labels is not None:
                    title = "{0}".format(labels[i])

                if title is not None:
                    ax.set_title(title, **title_kwargs)

            # Set up the axes.
            ax.set_xlim(params_range[i])
            if scale_hist:
                maxn = np.max(n)
                ax.set_ylim(-0.1 * maxn, 1.1 * maxn)
            else:
                ax.set_ylim(0, 1.1 * np.max(n))
            ax.set_yticklabels([])
            ax.xaxis.set_major_locator(MaxNLocator(max_n_ticks, prune="lower"))

            if i < K - 1:
                if top_ticks:
                    ax.xaxis.set_ticks_position("top")
                    [l.set_rotation(45) for l in ax.get_xticklabels()]
                else:
                    ax.set_xticklabels([])
            else:
                [l.set_rotation(45) for l in ax.get_xticklabels()]
                if labels is not None:
                    ax.set_xlabel(labels[i], **label_kwargs)
                    ax.xaxis.set_label_coords(0.5, -0.3)

                # use MathText for axes ticks
                ax.xaxis.set_major_formatter(
                    ScalarFormatter(useMathText=use_math_text))

            for j, y in enumerate(xs):
                if np.shape(xs)[0] == 1:
                    ax = axes
                else:
                    ax = axes[i, j]
                if j > i:
                    ax.set_frame_on(False)
                    ax.set_xticks([])
                    ax.set_yticks([])
                    continue
                elif j == i:
                    continue

                # Deal with masked arrays.
                if hasattr(y, "compressed"):
                    y = y.compressed()

                hist2d(y,
                       x,
                       ax=ax,
                       range=[params_range[j], params_range[i]],
                       weights=weights,
                       color=color,
                       smooth=smooth,
                       bins=[bins[j], bins[i]],
                       **hist2d_kwargs)

                # Add an error ellipse based on the provided covariance matrix
                if cov is not None:
                    # #center
                    # cx=truths[j]#need to add checking for availability of truths?
                    # cy=truths[i]
                    # #ang=math.acos(cov[0,1]/math.sqrt(cov[0,0]*cov[1,1]))*180/math.pi
                    # print (j,i,labels[j],labels[i],"center=",cx,cy)
                    # N_thetas=60
                    # dtheta=2.0*math.pi/(N_thetas-1)
                    # thetas=np.arange(0,(2.0*math.pi+dtheta),dtheta)
                    # Cplus=(cov[i,i]+cov[j,j])/2.0
                    # Cminus=(-cov[i,i]+cov[j,j])/2.0
                    # print("cov[ii],cov[ij],cov[jj],Cplus,Cminus:",cov[i,i],cov[i,j],cov[j,j],Cplus,Cminus)
                    # ang=-math.pi/4.
                    # root=cov[i,j]/math.sqrt(cov[i,i]*cov[j,j])
                    # acoeff=math.sqrt(1-root)
                    # bcoeff=math.sqrt(1+root)
                    # xcoeff=math.sqrt(cov[j,j])
                    # ycoeff=math.sqrt(cov[i,i])
                    # print("a2,b2",acoeff*acoeff,bcoeff*bcoeff)
                    # print("a,b,ang, xcoeff,ycoeff, root=",acoeff,bcoeff,ang,xcoeff,ycoeff,root)
                    # elxs=[cx+xcoeff*(acoeff*math.cos(th)*math.cos(ang)-bcoeff*math.sin(th)*math.sin(ang)) for th in thetas]
                    # elys=[cy+ycoeff*(acoeff*math.cos(th)*math.sin(ang)+bcoeff*math.sin(th)*math.cos(ang)) for th in thetas]
                    # #print (thetas)
                    # #print (elxs)
                    # #print (elys)
                    # ax.plot(elxs,elys,color='r')
                    #
                    #center
                    if truths is None:
                        raise ValueError(
                            'Error: truths is None but is required to plot Fisher ellipses.'
                        )
                    cx = truths[
                        j]  # need to add checking for availability of truths?
                    cy = truths[i]
                    N_thetas = 60
                    dtheta = 2.0 * math.pi / (N_thetas - 1)
                    thetas = np.arange(0, (2.0 * math.pi + dtheta), dtheta)
                    ang = -math.pi / 4.
                    root = cov[i, j] / math.sqrt(cov[i, i] * cov[j, j])
                    if (root > 1): root = 1
                    if (root < -1): root = -1
                    acoeff = math.sqrt(1 - root)
                    bcoeff = math.sqrt(1 + root)
                    xcoeff = math.sqrt(cov[j, j])
                    ycoeff = math.sqrt(cov[i, i])
                    if "levels" in hist2d_kwargs:
                        levels = hist2d_kwargs["levels"]
                    else:
                        levels == 1.0 - np.exp(
                            -0.5 * np.arange(0.5, 2.1, 0.5)**2)
                    for xlev in levels:
                        # in the next line we convert the credibility limit
                        # to a "sigma" limit for a 2-d normal
                        # this becomes a scale-factor for the error ellipse
                        # 1-exp(x^2/(-2)=y
                        # -2*log(1-y)=x^2
                        lev_fac = math.sqrt(-2 * math.log(1 - xlev))
                        elxs = [
                            cx + lev_fac * xcoeff *
                            (acoeff * math.cos(th) * math.cos(ang) -
                             bcoeff * math.sin(th) * math.sin(ang))
                            for th in thetas
                        ]
                        elys = [
                            cy + lev_fac * ycoeff *
                            (acoeff * math.cos(th) * math.sin(ang) +
                             bcoeff * math.sin(th) * math.cos(ang))
                            for th in thetas
                        ]
                        if cov_color is None:
                            cov_color = 'k'
                        ax.plot(elxs, elys, color=cov_color)

                if truths is not None:
                    if truths[i] is not None and truths[j] is not None:
                        ax.plot(truths[j], truths[i], "s", color=truth_color)
                    if truths[j] is not None:
                        ax.axvline(truths[j], color=truth_color)
                    if truths[i] is not None:
                        ax.axhline(truths[i], color=truth_color)

                if add_truths is not None:
                    for add_truth, add_truth_color in zip(
                            add_truths, add_truth_colors):
                        print(add_truth)
                        if add_truth[i] is not None and add_truth[
                                j] is not None:
                            ax.plot(add_truth[j],
                                    add_truth[i],
                                    "s",
                                    color=add_truth_color)
                        if add_truth[j] is not None:
                            ax.axvline(add_truth[j], color=add_truth_color)
                        if add_truth[i] is not None:
                            ax.axhline(add_truth[i], color=add_truth_color)

                ax.xaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))
                ax.yaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))

                if i < K - 1:
                    ax.set_xticklabels([])
                else:
                    [l.set_rotation(45) for l in ax.get_xticklabels()]
                    if labels is not None:
                        ax.set_xlabel(labels[j], **label_kwargs)
                        ax.xaxis.set_label_coords(0.5, -0.3)

                    # use MathText for axes ticks
                    ax.xaxis.set_major_formatter(
                        ScalarFormatter(useMathText=use_math_text))

                if j > 0:
                    ax.set_yticklabels([])
                else:
                    [l.set_rotation(45) for l in ax.get_yticklabels()]
                    if labels is not None:
                        ax.set_ylabel(labels[i], **label_kwargs)
                        ax.yaxis.set_label_coords(-0.3, 0.5)

                    # use MathText for axes ticks
                    ax.yaxis.set_major_formatter(
                        ScalarFormatter(useMathText=use_math_text))

    # If not asking for the histograms to be plotted
    else:
        for i, x in enumerate(xs):
            if i == 0:
                continue
            # Deal with masked arrays.
            if hasattr(x, "compressed"):
                x = x.compressed()

            for j, y in enumerate(xs):
                if j == K - 1:
                    continue
                if np.shape(xs)[0] == 2:
                    ax = axes
                else:
                    ax = axes[i - 1, j]
                if j > i - 1:
                    ax.set_frame_on(False)
                    ax.set_xticks([])
                    ax.set_yticks([])
                    continue
                elif j == i:
                    continue

                # Deal with masked arrays.
                if hasattr(y, "compressed"):
                    y = y.compressed()

                hist2d(y,
                       x,
                       ax=ax,
                       range=[params_range[j], params_range[i]],
                       weights=weights,
                       color=color,
                       smooth=smooth,
                       bins=[bins[j], bins[i]],
                       **hist2d_kwargs)

                # Add an error ellipse based on the provided covariance matrix
                if cov is not None:
                    # #center
                    # cx=truths[j]#need to add checking for availability of truths?
                    # cy=truths[i]
                    # #ang=math.acos(cov[0,1]/math.sqrt(cov[0,0]*cov[1,1]))*180/math.pi
                    # print (j,i,labels[j],labels[i],"center=",cx,cy)
                    # N_thetas=60
                    # dtheta=2.0*math.pi/(N_thetas-1)
                    # thetas=np.arange(0,(2.0*math.pi+dtheta),dtheta)
                    # Cplus=(cov[i,i]+cov[j,j])/2.0
                    # Cminus=(-cov[i,i]+cov[j,j])/2.0
                    # print("cov[ii],cov[ij],cov[jj],Cplus,Cminus:",cov[i,i],cov[i,j],cov[j,j],Cplus,Cminus)
                    # ang=-math.pi/4.
                    # root=cov[i,j]/math.sqrt(cov[i,i]*cov[j,j])
                    # acoeff=math.sqrt(1-root)
                    # bcoeff=math.sqrt(1+root)
                    # xcoeff=math.sqrt(cov[j,j])
                    # ycoeff=math.sqrt(cov[i,i])
                    # print("a2,b2",acoeff*acoeff,bcoeff*bcoeff)
                    # print("a,b,ang, xcoeff,ycoeff, root=",acoeff,bcoeff,ang,xcoeff,ycoeff,root)
                    # elxs=[cx+xcoeff*(acoeff*math.cos(th)*math.cos(ang)-bcoeff*math.sin(th)*math.sin(ang)) for th in thetas]
                    # elys=[cy+ycoeff*(acoeff*math.cos(th)*math.sin(ang)+bcoeff*math.sin(th)*math.cos(ang)) for th in thetas]
                    # #print (thetas)
                    # #print (elxs)
                    # #print (elys)
                    # ax.plot(elxs,elys,color='r')
                    #
                    #center
                    cx = truths[
                        j]  # need to add checking for availability of truths?
                    cy = truths[i]
                    N_thetas = 60
                    dtheta = 2.0 * math.pi / (N_thetas - 1)
                    thetas = np.arange(0, (2.0 * math.pi + dtheta), dtheta)
                    ang = -math.pi / 4.
                    root = cov[i, j] / math.sqrt(cov[i, i] * cov[j, j])
                    if (root > 1): root = 1
                    if (root < -1): root = -1
                    acoeff = math.sqrt(1 - root)
                    bcoeff = math.sqrt(1 + root)
                    xcoeff = math.sqrt(cov[j, j])
                    ycoeff = math.sqrt(cov[i, i])
                    if "levels" in hist2d_kwargs:
                        levels = hist2d_kwargs["levels"]
                    else:
                        levels == 1.0 - np.exp(
                            -0.5 * np.arange(0.5, 2.1, 0.5)**2)
                    for xlev in levels:
                        # in the next line we convert the credibility limit
                        # to a "sigma" limit for a 2-d normal
                        # this becomes a scale-factor for the error ellipse
                        # 1-exp(x^2/(-2)=y
                        # -2*log(1-y)=x^2
                        lev_fac = math.sqrt(-2 * math.log(1 - xlev))
                        elxs = [
                            cx + lev_fac * xcoeff *
                            (acoeff * math.cos(th) * math.cos(ang) -
                             bcoeff * math.sin(th) * math.sin(ang))
                            for th in thetas
                        ]
                        elys = [
                            cy + lev_fac * ycoeff *
                            (acoeff * math.cos(th) * math.sin(ang) +
                             bcoeff * math.sin(th) * math.cos(ang))
                            for th in thetas
                        ]
                        if cov_color is None:
                            cov_color = 'k'
                        ax.plot(elxs, elys, color=cov_color)

                if truths is not None:
                    if truths[i] is not None and truths[j] is not None:
                        ax.plot(truths[j], truths[i], "s", color=truth_color)
                    if truths[j] is not None:
                        ax.axvline(truths[j], color=truth_color)
                    if truths[i] is not None:
                        ax.axhline(truths[i], color=truth_color)

                if add_truths is not None:
                    for add_truth, add_truth_color in zip(
                            add_truths, add_truth_colors):
                        print(add_truth)
                        if add_truth[i] is not None and add_truth[
                                j] is not None:
                            ax.plot(add_truth[j],
                                    add_truth[i],
                                    "s",
                                    color=add_truth_color)
                        if add_truth[j] is not None:
                            ax.axvline(add_truth[j], color=add_truth_color)
                        if add_truth[i] is not None:
                            ax.axhline(add_truth[i], color=add_truth_color)

                ax.xaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))
                ax.yaxis.set_major_locator(
                    MaxNLocator(max_n_ticks, prune="lower"))

                if i < K - 1:
                    ax.set_xticklabels([])
                else:
                    [l.set_rotation(45) for l in ax.get_xticklabels()]
                    if labels is not None:
                        ax.set_xlabel(labels[j], **label_kwargs)
                        ax.xaxis.set_label_coords(0.5, -0.3)

                    # use MathText for axes ticks
                    ax.xaxis.set_major_formatter(
                        ScalarFormatter(useMathText=use_math_text))

                if j > 0:
                    ax.set_yticklabels([])
                else:
                    [l.set_rotation(45) for l in ax.get_yticklabels()]
                    if labels is not None:
                        ax.set_ylabel(labels[i], **label_kwargs)
                        ax.yaxis.set_label_coords(-0.3, 0.5)

                    # use MathText for axes ticks
                    ax.yaxis.set_major_formatter(
                        ScalarFormatter(useMathText=use_math_text))

    return fig, axes
Example #44
0
def uindex(index):
    "Convert an index (a tuple of path parts) to unicode for printing"
    if index:
        return os.path.join(*map(ufn, index))
    else:
        return u'.'
Example #45
0
def is_namespace(name):
    "Is the name either a valid campaign name or core?"
    return name in map(os.path.basename, scopelist())
def get_categories(dbcache,
                   sort='name',
                   book_ids=None,
                   first_letter_sort=False):
    if sort not in CATEGORY_SORTS:
        raise ValueError('sort ' + sort + ' not a valid value')

    fm = dbcache.field_metadata
    book_rating_map = dbcache.fields['rating'].book_value_map
    lang_map = dbcache.fields['languages'].book_value_map

    categories = {}
    book_ids = frozenset(book_ids) if book_ids else book_ids
    pm_cache = {}

    def get_metadata(book_id):
        ans = pm_cache.get(book_id)
        if ans is None:
            ans = pm_cache[book_id] = dbcache._get_proxy_metadata(book_id)
        return ans

    bids = None
    first_letter_sort = bool(first_letter_sort)

    for category, is_multiple, is_composite in find_categories(fm):
        tag_class = create_tag_class(category, fm)
        sort_on, reverse = sort, False
        if is_composite:
            if bids is None:
                bids = dbcache._all_book_ids(
                ) if book_ids is None else book_ids
            cats = dbcache.fields[category].get_composite_categories(
                tag_class, book_rating_map, bids, is_multiple, get_metadata)
        elif category == 'news':
            cats = dbcache.fields['tags'].get_news_category(
                tag_class, book_ids)
        else:
            cat = fm[category]
            brm = book_rating_map
            dt = cat['datatype']
            if dt == 'rating':
                if category != 'rating':
                    brm = dbcache.fields[category].book_value_map
                if sort_on == 'name':
                    sort_on, reverse = 'rating', True
            cats = dbcache.fields[category].get_categories(
                tag_class, brm, lang_map, book_ids)
            if (category != 'authors' and dt == 'text' and cat['is_multiple']
                    and cat['display'].get('is_names', False)):
                for item in cats:
                    item.sort = author_to_author_sort(item.sort)
        cats.sort(key=category_sort_keys[first_letter_sort][sort_on],
                  reverse=reverse)
        categories[category] = cats

    # Needed for legacy databases that have multiple ratings that
    # map to n stars
    for r in categories['rating']:
        for x in tuple(categories['rating']):
            if r.name == x.name and r.id != x.id:
                r.id_set |= x.id_set
                r.count = len(r.id_set)
                categories['rating'].remove(x)
                break

    # User categories
    user_categories = clean_user_categories(dbcache).copy()

    # First add any grouped search terms to the user categories
    muc = dbcache.pref('grouped_search_make_user_categories', [])
    gst = dbcache.pref('grouped_search_terms', {})
    for c in gst:
        if c not in muc:
            continue
        user_categories[c] = []
        for sc in gst[c]:
            for t in categories.get(sc, ()):
                user_categories[c].append([t.name, sc, 0])

    if user_categories:
        # We want to use same node in the user category as in the source
        # category. To do that, we need to find the original Tag node. There is
        # a time/space tradeoff here. By converting the tags into a map, we can
        # do the verification in the category loop much faster, at the cost of
        # temporarily duplicating the categories lists.
        taglist = {}
        for c, items in categories.iteritems():
            taglist[c] = dict(map(lambda t: (icu_lower(t.name), t), items))

        # Add the category values to the user categories
        for user_cat in sorted(user_categories.iterkeys(), key=sort_key):
            items = []
            names_seen = {}
            user_cat_is_gst = user_cat in gst
            for name, label, ign in user_categories[user_cat]:
                n = icu_lower(name)
                if label in taglist and n in taglist[label]:
                    if user_cat_is_gst:
                        # for gst items, make copy and consolidate the tags by name.
                        if n in names_seen:
                            # We must combine this node into a previous one with
                            # the same name ignoring case. As part of the process,
                            # remember the source categories and correct the
                            # average rating
                            t = names_seen[n]
                            other_tag = taglist[label][n]
                            t.id_set |= other_tag.id_set
                            t.count = len(t.id_set)
                            t.original_categories.add(other_tag.category)

                            total_rating = 0
                            count = 0
                            for id_ in t.id_set:
                                rating = book_rating_map.get(id_, 0)
                                if rating:
                                    total_rating += rating / 2
                                    count += 1
                            if total_rating and count:
                                t.avg_rating = total_rating / count
                        else:
                            # Must deepcopy so we don't share the id_set between nodes
                            t = copy.deepcopy(taglist[label][n])
                            t.original_categories = {t.category}
                            names_seen[n] = t
                            items.append(t)
                    else:
                        items.append(taglist[label][n])
                # else: do nothing, to not include nodes w zero counts
            cat_name = '@' + user_cat  # add the '@' to avoid name collision
            items.sort(key=category_sort_keys[False][sort])
            categories[cat_name] = items

    # ### Finally, the saved searches category ####
    items = []
    queries = dbcache._search_api.saved_searches.queries
    for srch in sorted(queries, key=sort_key):
        items.append(
            Tag(srch,
                sort=srch,
                search_expression=queries[srch],
                category='search',
                is_editable=False))
    if len(items):
        categories['search'] = items

    return categories
Example #47
0
    def ajax_books_in(self,
                      category,
                      item,
                      sort='title',
                      num=25,
                      offset=0,
                      sort_order='asc',
                      get_additional_fields=''):
        '''
        Return the books (as list of ids) present in the specified category.
        '''
        try:
            dname, ditem = map(decode_name, (category, item))
        except:
            raise cherrypy.HTTPError(404,
                                     'Invalid encoded param: %r' % category)

        try:
            num = int(num)
        except:
            raise cherrypy.HTTPError(404, "Invalid num: %r" % num)
        try:
            offset = int(offset)
        except:
            raise cherrypy.HTTPError(404, "Invalid offset: %r" % offset)

        if sort_order not in ('asc', 'desc'):
            sort_order = 'asc'

        sfield = self.db.data.sanitize_sort_field_name(sort)
        if sfield not in self.db.field_metadata.sortable_field_keys():
            raise cherrypy.HTTPError(404,
                                     '%s is not a valid sort field' % sort)

        if dname in ('allbooks', 'newest'):
            ids = self.search_cache('')
        elif dname == 'search':
            try:
                ids = self.search_cache('search:"%s"' % ditem)
            except:
                raise cherrypy.HTTPError(404,
                                         'Search: %r not understood' % ditem)
        else:
            try:
                cid = int(ditem)
            except:
                raise cherrypy.HTTPError(
                    404, 'Category id %r not an integer' % ditem)

            if dname == 'news':
                dname = 'tags'
            ids = self.db.get_books_for_category(dname, cid)
            all_ids = set(self.search_cache(''))
            # Implement restriction
            ids = ids.intersection(all_ids)

        ids = list(ids)
        self.db.data.multisort(fields=[(sfield, sort_order == 'asc')],
                               subsort=True,
                               only_ids=ids)
        total_num = len(ids)
        ids = ids[offset:offset + num]

        result = {
            'total_num':
            total_num,
            'sort_order':
            sort_order,
            'offset':
            offset,
            'num':
            len(ids),
            'sort':
            sort,
            'base_url':
            absurl(self.opts.url_prefix,
                   '/ajax/books_in/%s/%s' % (category, item)),
            'book_ids':
            ids
        }

        if get_additional_fields:
            additional_fields = {}
            for field in get_additional_fields.split(','):
                field = field.strip()
                if field:
                    flist = additional_fields[field] = []
                    for id_ in ids:
                        flist.append(
                            self.db.new_api.field_for(field,
                                                      id_,
                                                      default_value=None))
            if additional_fields:
                result['additional_fields'] = additional_fields
        return result
Example #48
0
from future_builtins import map

__license__   = 'GPL v3'
__copyright__ = '2008, Kovid Goyal [email protected]'
__docformat__ = 'restructuredtext en'
__appname__   = u'calibre'
numeric_version = (2, 58, 0)
__version__   = u'.'.join(map(unicode, numeric_version))
__author__    = u"Kovid Goyal <*****@*****.**>"

'''
Various run time constants.
'''

import sys, locale, codecs, os, importlib, collections

_plat = sys.platform.lower()
iswindows = 'win32' in _plat or 'win64' in _plat
isosx     = 'darwin' in _plat
isnewosx  = isosx and getattr(sys, 'new_app_bundle', False)
isfreebsd = 'freebsd' in _plat
isnetbsd = 'netbsd' in _plat
isdragonflybsd = 'dragonfly' in _plat
isbsd = isfreebsd or isnetbsd or isdragonflybsd
islinux   = not(iswindows or isosx or isbsd)
isfrozen  = hasattr(sys, 'frozen')
isunix = isosx or islinux
isportable = os.environ.get('CALIBRE_PORTABLE_BUILD', None) is not None
ispy3 = sys.version_info.major > 2
isxp = iswindows and sys.getwindowsversion().major < 6
is64bit = sys.maxsize > (1 << 32)
Example #49
0
def mi():
    return map(int, input().strip().split(" "))
Example #50
0
def do_split(split_point, log, before=True):
    '''
    Split tree into a *before* and an *after* tree at ``split_point``.

    :param split_point: The Element at which to split
    :param before: If True tree is split before split_point, otherwise after split_point
    :return: before_tree, after_tree
    '''
    if before:
        # We cannot adjust for after since moving an after split point to a
        # parent will cause breakage if the parent contains any content
        # after the original split point
        split_point = adjust_split_point(split_point, log)
    tree = split_point.getroottree()
    path = tree.getpath(split_point)

    tree, tree2 = copy.deepcopy(tree), copy.deepcopy(tree)
    root, root2 = tree.getroot(), tree2.getroot()
    body, body2 = map(get_body, (root, root2))
    split_point = root.xpath(path)[0]
    split_point2 = root2.xpath(path)[0]

    def nix_element(elem, top=True):
        # Remove elem unless top is False in which case replace elem by its
        # children
        parent = elem.getparent()
        if top:
            parent.remove(elem)
        else:
            index = parent.index(elem)
            parent[index:index + 1] = list(elem.iterchildren())

    # Tree 1
    hit_split_point = False
    keep_descendants = False
    split_point_descendants = frozenset(split_point.iterdescendants())
    for elem in tuple(body.iterdescendants()):
        if elem is split_point:
            hit_split_point = True
            if before:
                nix_element(elem)
            else:
                # We want to keep the descendants of the split point in
                # Tree 1
                keep_descendants = True
                # We want the split point element, but not its tail
                elem.tail = '\n'

            continue
        if hit_split_point:
            if keep_descendants:
                if elem in split_point_descendants:
                    # elem is a descendant keep it
                    continue
                else:
                    # We are out of split_point, so prevent further set
                    # lookups of split_point_descendants
                    keep_descendants = False
            nix_element(elem)

    # Tree 2
    ancestors = frozenset(XPath('ancestor::*')(split_point2))
    for elem in tuple(body2.iterdescendants()):
        if elem is split_point2:
            if not before:
                # Keep the split point element's tail, if it contains non-whitespace
                # text
                tail = elem.tail
                if tail and not tail.isspace():
                    parent = elem.getparent()
                    idx = parent.index(elem)
                    if idx == 0:
                        parent.text = (parent.text or '') + tail
                    else:
                        sib = parent[idx - 1]
                        sib.tail = (sib.tail or '') + tail
                # Remove the element itself
                nix_element(elem)
            break
        if elem in ancestors:
            # We have to preserve the ancestors as they could have CSS
            # styles that are inherited/applicable, like font or
            # width. So we only remove the text, if any.
            elem.text = '\n'
        else:
            nix_element(elem, top=False)

    body2.text = '\n'

    return tree, tree2
Example #51
0
def chars(lines):
    return ["".join(map(operator.attrgetter("data"), line)) for line in lines]
Example #52
0
def msi():
    return map(str, input().strip().split(" "))
Example #53
0
    def virtualize_resources(self):

        changed = set()
        link_uid = self.book_render_data['link_uid']
        resource_template = link_uid + '|{}|'
        xlink_xpath = XPath('//*[@xl:href]')
        link_xpath = XPath('//h:a[@href]')
        res_link_xpath = XPath('//h:link[@href]')

        def link_replacer(base, url):
            if url.startswith('#'):
                frag = urlunquote(url[1:])
                if not frag:
                    return url
                changed.add(base)
                return resource_template.format(encode_url(base, frag))
            purl = urlparse(url)
            if purl.netloc or purl.query:
                return url
            if purl.scheme and purl.scheme != 'file':
                return url
            if not purl.path or purl.path.startswith('/'):
                return url
            url, frag = purl.path, purl.fragment
            name = self.href_to_name(url, base)
            if name:
                if self.has_name(name):
                    frag = urlunquote(frag)
                    url = resource_template.format(encode_url(name, frag))
                else:
                    if isinstance(name, unicode):
                        name = name.encode('utf-8')
                    url = 'missing:' + force_unicode(quote(name), 'utf-8')
                changed.add(base)
            return url

        ltm = self.book_render_data['link_to_map']

        for name, mt in self.mime_map.iteritems():
            mt = mt.lower()
            if mt in OEB_STYLES:
                replaceUrls(self.parsed(name), partial(link_replacer, name))
                self.virtualized_names.add(name)
            elif mt in OEB_DOCS:
                self.virtualized_names.add(name)
                root = self.parsed(name)
                for link in res_link_xpath(root):
                    ltype = (link.get('type') or 'text/css').lower()
                    rel = (link.get('rel') or 'stylesheet').lower()
                    if ltype != 'text/css' or rel != 'stylesheet':
                        # This link will not be loaded by the browser anyway
                        # and will causes the resource load check to hang
                        link.attrib.clear()
                        changed.add(name)
                rewrite_links(root, partial(link_replacer, name))
                for a in link_xpath(root):
                    href = a.get('href')
                    if href.startswith(link_uid):
                        a.set('href', 'javascript:void(0)')
                        parts = decode_url(href.split('|')[1])
                        lname, lfrag = parts[0], parts[1]
                        ltm.setdefault(lname,
                                       {}).setdefault(lfrag or '',
                                                      set()).add(name)
                        a.set(
                            'data-' + link_uid,
                            json.dumps({
                                'name': lname,
                                'frag': lfrag
                            },
                                       ensure_ascii=False))
                    else:
                        a.set('target', '_blank')
                        a.set('rel', 'noopener noreferrer')
                    changed.add(name)
            elif mt == 'image/svg+xml':
                self.virtualized_names.add(name)
                changed.add(name)
                xlink = XLINK('href')
                for elem in xlink_xpath(self.parsed(name)):
                    elem.set(xlink, link_replacer(name, elem.get(xlink)))

        for name, amap in ltm.iteritems():
            for k, v in tuple(amap.iteritems()):
                amap[k] = tuple(v)  # needed for JSON serialization

        tuple(map(self.dirty, changed))
Example #54
0
 def utc_trns( loctime ):
     terms			= loctime.translate( trtab ).split()
     if len( terms ) == 7:
         # convert .123 into 123000 microseconds
         terms[6]               += '0' * ( 6 - len( terms[6] ))
     return datetime.datetime( *map( int, terms ), tzinfo=pytz.utc )
Example #55
0
File: utils.py Project: sss/calibre
    def _load_index(self):
        'Load the index, automatically removing incorrectly sized thumbnails and pruning to fit max_size'
        try:
            os.makedirs(self.location)
        except OSError as err:
            if err.errno != errno.EEXIST:
                self.log('Failed to make thumbnail cache dir:',
                         as_unicode(err))
        self.total_size = 0
        self.items = OrderedDict()
        order = self._read_order()

        def listdir(*args):
            try:
                return os.listdir(os.path.join(*args))
            except EnvironmentError:
                return ()  # not a directory or no permission or whatever

        entries = ('/'.join((parent, subdir, entry))
                   for parent in listdir(self.location)
                   for subdir in listdir(self.location, parent)
                   for entry in listdir(self.location, parent, subdir))

        invalidate = set()
        try:
            with open(os.path.join(self.location, 'invalidate'), 'rb') as f:
                raw = f.read()
        except EnvironmentError as err:
            if getattr(err, 'errno', None) != errno.ENOENT:
                self.log('Failed to read thumbnail invalidate data:',
                         as_unicode(err))
        else:
            try:
                os.remove(os.path.join(self.location, 'invalidate'))
            except EnvironmentError as err:
                self.log('Failed to remove thumbnail invalidate data:',
                         as_unicode(err))
            else:

                def record(line):
                    try:
                        uuid, book_id = line.partition(' ')[0::2]
                        book_id = int(book_id)
                        return (uuid, book_id)
                    except Exception:
                        return None

                invalidate = {record(x) for x in raw.splitlines()}
        items = []
        try:
            for entry in entries:
                try:
                    uuid, name = entry.split('/')[0::2]
                    book_id, timestamp, size, thumbnail_size = name.split('-')
                    book_id, timestamp, size = int(book_id), float(
                        timestamp), int(size)
                    thumbnail_size = tuple(
                        map(int,
                            thumbnail_size.partition('x')[0::2]))
                except (ValueError, TypeError, IndexError, KeyError,
                        AttributeError):
                    continue
                key = (uuid, book_id)
                path = os.path.join(self.location, entry)
                if self.thumbnail_size == thumbnail_size and key not in invalidate:
                    items.append(
                        (key, Entry(path, size, timestamp, thumbnail_size)))
                    self.total_size += size
                else:
                    self._do_delete(path)
        except EnvironmentError as err:
            self.log('Failed to read thumbnail cache dir:', as_unicode(err))

        self.items = OrderedDict(
            sorted(items, key=lambda x: order.get(hash(x[0]), 0)))
        self._apply_size()
Example #56
0
def test_history_timestamp_abbreviations():
    """Test timezone abbreviation support. """
    abbrev			= timestamp.support_abbreviations( 'CA', reset=True )
    assert sorted( abbrev ) == ['ADT', 'AST', 'CDT', 'CST', 'EDT', 'EST', 'MDT', 'MST', 'NDT', 'NST', 'PDT', 'PST']

    # Perform all the remaining timezone abbreviation tests relative to a known range of times, to
    # avoid differences in the future due to timezone changes.
    ts				= timestamp( "2014-04-24 08:00:00 MDT" )
    assert near( ts.value, 1398348000.0 )

    # Try to add all of the Americas to the CA abbreviations already supported; can't be done (too
    # many inconsistencies)
    try:
        abbrev			= timestamp.support_abbreviations( 'America' )
        assert False, "Many zones should have been ambiguously abbreviated"
    except AmbiguousTimeZoneError as exc:
        assert "America/Mazatlan" in str( exc )

    exclude			= [
        'America/Mazatlan', 'America/Merida', 'America/Mexico_City', 'America/Monterrey',
        'America/Bahia_Banderas', 'America/Cancun', 'America/Chihuahua', 'America/Havana',
        'America/Santa_Isabel', 'America/Grand_Turk', 'America/Cayman', 'America/Port-au-Prince',
        'America/Metlakatla',
    ]
    #print()
    #print( "America, w/o %r" % ( exclude ))
    abbrev			= timestamp.support_abbreviations( 'America', exclude=exclude )
    #print( sorted( abbrev ))
    #print( reprlib.repr( timestamp._tzabbrev ))
    pytz_version		= tuple( map( int, pytz.__version__.split( '.' )))
    if pytz_version < (2015,4):
        logging.warning( "pytz < 2015.4; HADT/HAST vs. HDT/HST" )
        assert sorted( abbrev ) == ['ACT', 'AKDT', 'AKST', 'AMST', 'AMT', 'ART', 'BOT', 'BRST', 'BRT', 'CLST', 'CLT',
                                    'COT', 'ECT', 'EGST', 'EGT', 'FNT', 'GFT', 'GMT', 'GYT', 'HADT', 'HAST',
                                    'PET', 'PMDT', 'PMST', 'PYST', 'PYT', 'SRT', 'UYST', 'UYT', 'VET', 'WGST', 'WGT']
    elif pytz_version < (2015,7):
        logging.warning( "pytz < 2015.7; had UYST" )
        assert sorted( abbrev ) == ['ACT', 'AKDT', 'AKST', 'AMST', 'AMT', 'ART', 'BOT', 'BRST', 'BRT', 'CLST', 'CLT',
                                    'COT', 'ECT', 'EGST', 'EGT', 'FNT', 'GFT', 'GMT', 'GYT', 'HDT', 'HST',
                                    'PET', 'PMDT', 'PMST', 'PYST', 'PYT', 'SRT', 'UYST', 'UYT', 'VET', 'WGST', 'WGT']
    elif pytz_version < (2017,2):
        assert sorted( abbrev ) == ['ACT', 'AKDT', 'AKST', 'AMST', 'AMT', 'ART', 'BOT', 'BRST', 'BRT', 'CLST', 'CLT',
                                    'COT', 'ECT', 'EGST', 'EGT', 'FNT', 'GFT', 'GMT', 'GYT', 'HDT', 'HST',
                                    'PET', 'PMDT', 'PMST', 'PYST', 'PYT', 'SRT', 'UYT', 'VET', 'WGST', 'WGT']
    else:
        # As of pytz 2017.2, alot of these zones are now using time zones consistent with CA; only a few added.
        assert sorted( abbrev ) == ['AKDT', 'AKST', 'GMT', 'HDT', 'HST']

    # We *can* add Europe/Berlin
    abbrev			= timestamp.support_abbreviations( 'Europe/Berlin' )
    assert sorted( abbrev ) == ['CEST', 'CET']
    assert 'CEST' in timestamp._tzabbrev
    assert 'EEST' not in timestamp._tzabbrev

    # And all of Europe, w/o some troublesome time zones
    exclude			= [ 'Europe/Simferopol', 'Europe/Istanbul', 'Europe/Minsk', 'Europe/Chisinau', 'Europe/Dublin' ]
    #print()
    #print( "Europe, w/o %r" % ( exclude ))
    abbrev			= timestamp.support_abbreviations( 'Europe', exclude=exclude )
    #print( sorted( abbrev ))
    if pytz_version < (2015,2):
        assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'MSK', 'SAMT', 'WEST', 'WET']
    elif pytz_version < (2016,3):
        assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'IST', 'MSK', 'SAMT', 'WEST', 'WET']
    elif pytz_version < (2016,7):
        assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'IST', 'MSK', 'SAMT', 'WEST', 'WET']
    elif pytz_version < (2018,5):
        assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'IST', 'MSK', 'WEST', 'WET']
    else:
        assert sorted( abbrev ) == ['BST', 'EEST', 'EET', 'MSK', 'WEST', 'WET']
        
    assert 'EEST' in timestamp._tzabbrev
    try:
        timestamp.support_abbreviations( 'Asia' )
        assert False, "Asia/Jerusalem IST should have mismatched Europe/Dublin IST"
    except AmbiguousTimeZoneError as exc:
        assert "Asia/Jerusalem" in str( exc )

    assert near( parse_offset( '< 1:00:00.001' ),	-3600.001 )
    assert near( parse_offset( '<:1.001' ), 		   -1.001 )
    assert near( parse_offset( '>1:0.001' ),		   60.001 )
    assert near( parse_offset( '>1' ), 			    1 )

    # While Asia is internally very inconsistent (eg. EEST), countries should be internally consisent
    abbrev			= timestamp.support_abbreviations( 'JO', reset=True ) # Jordan
    #print( sorted( abbrev ))
    assert sorted( abbrev ) == [ 'EEST', 'EET']
    z,dst,off			= timestamp._tzabbrev['EEST']
    assert str(z) == 'Asia/Amman'	and dst == True  and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 3:00:00"
    abbrev			= timestamp.support_abbreviations( 'IE', reset=True ) # Israel
    #print( sorted( abbrev ))
    assert sorted( abbrev ) == [ 'GMT', 'IST' ]
    # Jordan, Israel and Lebanon only work if we pick a region to exclude, for one EEST definition
    abbrev			= timestamp.support_abbreviations( ['JO', 'IE', 'LB'],
                                                                   exclude=[ 'Asia/Amman' ], reset=True )
    #print( sorted( abbrev ))
    assert sorted( abbrev ) == [ 'EEST', 'EET', 'GMT', 'IST' ]
    z,dst,off			= timestamp._tzabbrev['EEST']
    assert str(z) == 'Asia/Beirut'	and dst == True  and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 3:00:00"

    # Australia zones incompatible with a bunch of other timezone abbreviations, eg. CST; reset
    abbrev			= timestamp.support_abbreviations( 'Australia', reset=True )
    #print( sorted( abbrev ))
    #print( repr( timestamp._tzabbrev ))
    if pytz_version < (2017,2):
        assert sorted( abbrev ) == ['ACDT', 'ACST', 'ACWST', 'AEDT', 'AEST', 'AWST', 'LHDT', 'LHST']
        z,dst,off		= timestamp._tzabbrev['LHST']
        assert str(z) == 'Australia/Lord_Howe'	and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == ">10:30:00"
    else:
        assert sorted( abbrev ) ==  ['ACDT', 'ACST', 'AEDT', 'AEST', 'AWST']


    # Ensure that non-ambiguous (DST-specific) zone abbreviations override ambiguous (no longer
    # relevant, as pytz >= 2014.7 no longer contains dst == None for some of the Australian zones
    # without DST)
    abbrev			= timestamp.support_abbreviations( [ 'Australia/Adelaide' ], reset=True )
    assert sorted( abbrev ) == [ 'ACDT', 'ACST' ]
    z,dst,off			= timestamp._tzabbrev['ACST']
    assert str(z) == 'Australia/Adelaide'	and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 9:30:00"
    abbrev			= timestamp.support_abbreviations( [ 'Australia/Adelaide', 'Australia/Darwin' ], reset=True )
    #print( sorted( abbrev ))
    #print( repr( timestamp._tzabbrev ))
    z,dst,off			= timestamp._tzabbrev['ACST']
    assert str(z) in ( 'Australia/Darwin',
                       'Australia/Adelaide' ) and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "> 9:30:00"

    # Check that zones with complete, permanent offset changes (not just DST) are handled.  We know
    # that within a year of 2014-04-28, the America/Eirunepe (west Amazonas) zone had such a change
    # (pre pytz 2017.2, anyway...)
    if pytz_version < (2017,2):
        abbrev			= timestamp.support_abbreviations( [ 'America/Eirunepe' ], at=datetime.datetime( 2014, 4, 28 ), reset=True)
        #print( sorted( abbrev ))
        assert sorted( abbrev ) == [ 'ACT', 'AMT' ]
        z,dst,off			= timestamp._tzabbrev['ACT']
        assert str(z) == 'America/Eirunepe'		and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "< 5:00:00"
        z,dst,off			= timestamp._tzabbrev['AMT']
        assert str(z) == 'America/Eirunepe'		and dst == False and format_offset( timedelta_total_seconds( off ), ms=None ) == "< 4:00:00"
Example #57
0
def main( argv=None ):
    parser			= argparse.ArgumentParser(
        formatter_class=argparse.RawDescriptionHelpFormatter,
        epilog = """\
    If an address starting with '/' is provided (eg. --address /dev/ttyS1), then
    a Modbus/RTU serial framer is assumed.  Otherwise, a Modbus/TCP framer is used.

    The --evil option takes the following :
      truncate         -- return only part of the response
      delay[:#.#[-#.#]]-- delay response by #.#[-#.#] seconds (default == 5)
      corrupt[:<shat>] -- corrupt Modbus/TCP protocol response (default == "transaction")
         :transaction    -- Transaction ID (only relevant to Modbus/TCP servers)
         :protocol       -- Protocol ID
         :unit           -- Unit number
         :function       -- Function code
         :registers      -- Amount of response data

    Register range(s) and value(s) must be supplied:

      <begin>[-<end>][=<val>[,<val>]]

    EXAMPLE

      modbus_sim.py --address localhost:7502 --evil delay:2.5 40001-40100

        Starts a simulated Modbus/TCP PLC serving Holding registers 40001-40100
        == 0, on port 7502 on interface 'localhost', which delays all responses
        for 2.5 seconds.

      modbus_sim.py --address /dev/ttyS0 --evil delay:.01-.1 40001-40100

        Starts a simulated Modbus/RTU PLC serving Holding registers 40001-40100
        == 0, on serial port /dev/ttyS0, which delays all responses for between
        .01-.1 seconds.

    """ )
    parser.add_argument( '-v', '--verbose',
                         default=1, action="count", help="Display logging information." )
    parser.add_argument('-l', '--log',
                        type=str, default=None, help="Direct log output to the specified file" )
    parser.add_argument( '-a', '--address', default="0.0.0.0:502",
                         help="Default [interface][:port] to bind to (default: any, port 502)" )
    parser.add_argument( '-r', '--range',	default=1,
                         help="Number of ports to try, if busy       (default: 1)" )
    parser.add_argument( '-e', '--evil',	default=None,
                         help="Evil Modbus/TCP protocol framer       (default: None)" )
    parser.add_argument( '-c', '--config',	default=None,
                         help="""JSON config data for Modbus framer (eg. {"baudrate":19200}) (default: None)""" )
    parser.add_argument( 'registers', nargs="+" )
    args			= parser.parse_args( argv )

    # Deduce logging level and target file (if any)
    levelmap			= {
        0: logging.CRITICAL,
        1: logging.ERROR,
        2: logging.WARNING,
        3: logging.INFO,
        4: logging.DEBUG,
        }
    cpppo.log_cfg['level']	= ( levelmap[args.verbose]
                                    if args.verbose in levelmap
                                    else logging.DEBUG )
    if args.log:
        cpppo.log_cfg['filename']= args.log # log rotation not supported

    logging.basicConfig( **cpppo.log_cfg )

    #---------------------------------------------------------------------------#
    # run the server you want
    #---------------------------------------------------------------------------#

    # Deduce interface:port to bind, and correct types.  Interface defaults to
    # '' (INADDR_ANY) if only :port is supplied.  Port defaults to 502 if only
    # interface is supplied.  After this block, 'address' is always a tuple like
    # ("interface",502).  If '/', then start a Modbus/RTU serial server,
    # otherwise a Modbus/TCP network server.  Create an address_sequence
    # yielding all the relevant target addresses we might need to try.

    # We must initialize 'framer' here (even if its the same as the 'starter'
    # default), because we may make an Evil...() derived class below...
    starter_kwds		= {}
    if args.address.startswith( '/' ):
        starter			= StartRtuServerLogging
        framer			= modbus_rtu_framer_collecting
        try:
            import serial
        except ImportError:
            logging.error( "Modbus/RTU not supported; ensure PySerial is available" )
            raise
        starter_kwds		= {
            # Default serial configs; may be overridden, eg:
            #     --config '{"baudrate":19200, "slaves":[1,2,3]}'
            'stopbits':			1,
            'bytesize':			8,
            'parity':			serial.PARITY_NONE,
            'baudrate':			4800,
            'timeout':			0.5,
            'slaves':			None,
            'ignore_missing_slaves':	True,
        }
        address_sequence	= [ args.address ]
        assert args.range == 1, \
            "A range of serial ports is unsupported"
    else:
        starter			= StartTcpServerLogging
        framer			= ModbusSocketFramer
        address			= args.address.split(':')
        assert 1 <= len( address ) <= 2
        address			= (
            str( address[0] ),
            int( address[1] ) if len( address ) > 1 else Defaults.Port )
        log.info( "--server '%s' produces address=%r", args.address, address )
        address_sequence	= (
            (address[0],port)
            for port in range( address[1], address[1] + int( args.range ))
        )

    #---------------------------------------------------------------------------#
    # Evil Framers, manipulate packets resulting from underlying Framers
    #---------------------------------------------------------------------------#
    if args.evil == "truncate":

        class EvilFramerTruncateResponse( framer ):
            def buildPacket(self, message):
                ''' Creates a *truncated* ready to send modbus packet.  Truncates from 1
                to all of the bytes, before returning response.

                :param message: The populated request/response to send
                '''
                packet		= super( EvilFramerTruncateResponse, self ).buildPacket( message )
                datalen		= len( packet )
                corrlen		= datalen - random.randint( 1, datalen )

                log.info( "Corrupting response; truncating from %d to %d bytes", datalen, corrlen )

                return packet[:corrlen]

        framer			= EvilFramerTruncateResponse
        log.info( "--evil '%s' uses EvilFramerTruncateResponse", args.evil )

    elif args.evil and args.evil.startswith( 'delay' ):

        class EvilFramerDelayResponse( framer ):
            delay		= 5

            def buildPacket(self, message):
                ''' Creates a ready to send modbus packet but delays the return.

                :param message: The populated request/response to send
                '''
                packet		= super( EvilFramerDelayResponse, self ).buildPacket( message )

                log.info( "Delaying response for %s seconds", self.delay )
                delay		= self.delay
                if isinstance( delay, (list,tuple) ):
                    delay	= random.uniform( *delay )
                time.sleep( delay )

                return packet

        framer			= EvilFramerDelayResponse
        # If a "--evil delay:1.5" is provided, pull out the number and change
        # the ModbusSockerFramerDelayResponse class' .delay value to the specified
        # number of seconds
        req			= args.evil.split( ':', 1 )
        assert 1 <= len( req ) <= 2
        if len( req ) == 2:
            # Specified delay value or range
            delay		= tuple( map( float, req[1].split( '-' )))
            assert 1 <= len( delay ) <= 2
            EvilFramerDelayResponse.delay = delay if len( delay ) > 1 else delay[0]
        log.info( "--evil '%s' uses EvilFramerDelayResponse, which delays responses for %s seconds",
                args.evil, EvilFramerDelayResponse.delay )

    elif args.evil and args.evil.startswith( 'corrupt' ):

        class EvilFramerCorruptResponse( framer ):
            what			= "transaction"

            def buildPacket(self, message):
                ''' Creates a *corrupted* ready to send modbus packet.  Truncates from 1
                to all of the bytes, before returning response.

                :param message: The populated request/response to send

                WARNING: pymodbus seems to swallow any exceptions thrown by these
                methods.  This seems like a defect; it should log them, at least.
                '''
                try:
                    log.info("Encoding package")
                    message.encode()

                    if self.what == "transaction":
                        message.transaction_id ^= 0xFFFF
                        packet	= super( EvilFramerCorruptResponse, self ).buildPacket( message )
                        message.transaction_id ^= 0xFFFF
                    elif self.what == "registers":
                        if isinstance( message, ReadRegistersResponseBase ):
                            # These have '.registers' attribute, which is a list.
                            # Add/remove some
                            saveregs		= message.registers
                            if len( message.registers ) == 0 or random.randint( 0, 1 ):
                                message.registers += [999]
                            else:
                                message.registers = message.registers[:-1]
                            packet		= super( EvilFramerCorruptResponse, self ).buildPacket( message )
                            message.registers	= saveregs
                        elif isinstance( message, WriteSingleRegisterResponse ):
                            # Flip the responses address bits and then flip them back.
                            message.address    ^= 0xFFFF
                            packet		= super( EvilFramerCorruptResponse, self ).buildPacket( message )
                            message.address    ^= 0xFFFF
                        elif isinstance( message, WriteMultipleRegistersResponse ):
                            # Flip the responses address bits and then flip them back.
                            message.address    ^= 0xFFFF
                            packet		= super( EvilFramerCorruptResponse, self ).buildPacket( message )
                            message.address    ^= 0xFFFF
                        else:
                            raise NotImplementedException(
                                "Unhandled class for register corruption; not implemented" )
                    elif self.what == "protocol":
                        message.protocol_id    ^= 0xFFFF
                        packet			= super( EvilFramerCorruptResponse, self ).buildPacket( message )
                        message.protocol_id    ^= 0xFFFF
                    elif self.what == "unit":
                        message.unit_id	       ^= 0xFF
                        packet			= super( EvilFramerCorruptResponse, self ).buildPacket( message )
                        message.unit_id	       ^= 0xFF
                    elif self.what == "function":
                        message.function_code  ^= 0xFF
                        packet			= super( EvilFramerCorruptResponse, self ).buildPacket( message )
                        message.function_code  ^= 0xFF
                    else:
                        raise NotImplementedException(
                            "Unknown corruption specified; not implemented" )
                except Exception:
                    log.info( "Could not build corrupt packet: %s", traceback.format_exc() )
                return packet

        framer			= EvilFramerCorruptResponse
        # If a "--evil corrupt:<what>" is provided, corrupt the specified entry.
        req			= args.evil.split(":", 1)
        assert 1 <= len( req ) <= 2
        if len( req ) == 2:
            EvilFramerCorruptResponse.what = req[1]
        log.info( "--evil '%s' uses EvilFramerCorruptResponse, which corrupts the responses %s entry",
                args.evil, EvilFramerCorruptResponse.what )

    elif args.evil:

        log.error( "Unrecognized --evil argument: %s", args.evil )
        return 1

    if args.config:
        try:
            starter_kwds.update( **json.loads( args.config ))
        except Exception as exc:
            log.error( "Failed to parse JSON --config Modbus Framer config: %s; %s", args.config, exc )
            raise

    #---------------------------------------------------------------------------#
    # Start the PLC simulation engine on a port in the range; will serve forever
    #---------------------------------------------------------------------------#
    for address in address_sequence:
        try:
            for k in sorted( starter_kwds.keys() ):
                log.info( "config: %24s: %s", k, starter_kwds[k] )
            starter( registers=args.registers, framer=framer, address=address, **starter_kwds )
        except KeyboardInterrupt:
            return 1
        except Exception:
            log.info( "Couldn't start PLC simulator on %s: %s",
                    address, traceback.format_exc() )

    log.error( "Failed to start PLC simulator on %s, over a range of %s ports",
               args.address, args.range )
    return 1
Example #58
0
d['tweak_fmt_order'] = ['EPUB', 'AZW3']
d['update_metadata_from_calibre'] = True
d['nestable_dock_widgets'] = False
d['dock_top_left'] = 'horizontal'
d['dock_top_right'] = 'horizontal'
d['dock_bottom_left'] = 'horizontal'
d['dock_bottom_right'] = 'horizontal'
d['preview_serif_family'] = 'Liberation Serif'
d['preview_sans_family'] = 'Liberation Sans'
d['preview_mono_family'] = 'Liberation Mono'
d['preview_standard_font_family'] = 'serif'
d['preview_base_font_size'] = 18
d['preview_mono_font_size'] = 14
d['preview_minimum_font_size'] = 8
d['remove_existing_links_when_linking_sheets'] = True
d['charmap_favorites'] = list(map(ord, '\xa0\u2002\u2003\u2009\xad' '‘’“”‹›«»‚„' '—–§¶†‡©®™' '→⇒•·°±−×÷¼½½¾' '…µ¢£€¿¡¨´¸ˆ˜' 'ÀÁÂÃÄÅÆÇÈÉÊË' 'ÌÍÎÏÐÑÒÓÔÕÖØ' 'ŒŠÙÚÛÜÝŸÞßàá' 'âãäåæçèéêëìí' 'îïðñòóôõöøœš' 'ùúûüýÿþªºαΩ∞'))  # noqa
d['folders_for_types'] = {'style':'styles', 'image':'images', 'font':'fonts', 'audio':'audio', 'video':'video'}
d['pretty_print_on_open'] = False
d['disable_completion_popup_for_search'] = False
d['saved_searches'] = []
d['insert_tag_mru'] = ['p', 'div', 'li', 'h1', 'h2', 'h3', 'h4', 'em', 'strong', 'td', 'tr']
d['spell_check_case_sensitive_sort'] = False
d['inline_spell_check'] = True
d['custom_themes'] = {}
d['remove_unused_classes'] = False
d['global_book_toolbar'] = [
'new-file', 'open-book',  'save-book', None, 'global-undo', 'global-redo', 'create-checkpoint', None, 'donate', 'user-manual']
d['global_tools_toolbar'] = [
    'check-book', 'spell-check-book', 'edit-toc', 'insert-character',
    'manage-fonts', 'smarten-punctuation', 'remove-unused-css', 'show-reports'
]
Example #59
0
    def parse_graminit_c(self, filename):
        """Parse the .c file written by pgen.  (Internal)

        The file looks as follows.  The first two lines are always this:

        #include "pgenheaders.h"
        #include "grammar.h"

        After that come four blocks:

        1) one or more state definitions
        2) a table defining dfas
        3) a table defining labels
        4) a struct defining the grammar

        A state definition has the following form:
        - one or more arc arrays, each of the form:
          static arc arcs_<n>_<m>[<k>] = {
                  {<i>, <j>},
                  ...
          };
        - followed by a state array, of the form:
          static state states_<s>[<t>] = {
                  {<k>, arcs_<n>_<m>},
                  ...
          };

        """
        try:
            f = open(filename)
        except IOError as err:
            print("Can't open %s: %s" % (filename, err))
            return False
        # The code below essentially uses f's iterator-ness!
        lineno = 0

        # Expect the two #include lines
        lineno, line = lineno + 1, next(f)
        assert line == '#include "pgenheaders.h"\n', (lineno, line)
        lineno, line = lineno + 1, next(f)
        assert line == '#include "grammar.h"\n', (lineno, line)

        # Parse the state definitions
        lineno, line = lineno + 1, next(f)
        allarcs = {}
        states = []
        while line.startswith("static arc "):
            while line.startswith("static arc "):
                mo = re.match(r"static arc arcs_(\d+)_(\d+)\[(\d+)\] = {$",
                              line)
                assert mo, (lineno, line)
                n, m, k = map(int, mo.groups())
                arcs = []
                for _ in range(k):
                    lineno, line = lineno + 1, next(f)
                    mo = re.match(r"\s+{(\d+), (\d+)},$", line)
                    assert mo, (lineno, line)
                    i, j = map(int, mo.groups())
                    arcs.append((i, j))
                lineno, line = lineno + 1, next(f)
                assert line == "};\n", (lineno, line)
                allarcs[(n, m)] = arcs
                lineno, line = lineno + 1, next(f)
            mo = re.match(r"static state states_(\d+)\[(\d+)\] = {$", line)
            assert mo, (lineno, line)
            s, t = map(int, mo.groups())
            assert s == len(states), (lineno, line)
            state = []
            for _ in range(t):
                lineno, line = lineno + 1, next(f)
                mo = re.match(r"\s+{(\d+), arcs_(\d+)_(\d+)},$", line)
                assert mo, (lineno, line)
                k, n, m = map(int, mo.groups())
                arcs = allarcs[n, m]
                assert k == len(arcs), (lineno, line)
                state.append(arcs)
            states.append(state)
            lineno, line = lineno + 1, next(f)
            assert line == "};\n", (lineno, line)
            lineno, line = lineno + 1, next(f)
        self.states = states

        # Parse the dfas
        dfas = {}
        mo = re.match(r"static dfa dfas\[(\d+)\] = {$", line)
        assert mo, (lineno, line)
        ndfas = int(mo.group(1))
        for i in range(ndfas):
            lineno, line = lineno + 1, next(f)
            mo = re.match(r'\s+{(\d+), "(\w+)", (\d+), (\d+), states_(\d+),$',
                          line)
            assert mo, (lineno, line)
            symbol = mo.group(2)
            number, x, y, z = map(int, mo.group(1, 3, 4, 5))
            assert self.symbol2number[symbol] == number, (lineno, line)
            assert self.number2symbol[number] == symbol, (lineno, line)
            assert x == 0, (lineno, line)
            state = states[z]
            assert y == len(state), (lineno, line)
            lineno, line = lineno + 1, next(f)
            mo = re.match(r'\s+("(?:\\\d\d\d)*")},$', line)
            assert mo, (lineno, line)
            first = {}
            rawbitset = eval(mo.group(1))
            for i, c in enumerate(rawbitset):
                byte = ord(c)
                for j in range(8):
                    if byte & (1 << j):
                        first[i * 8 + j] = 1
            dfas[number] = (state, first)
        lineno, line = lineno + 1, next(f)
        assert line == "};\n", (lineno, line)
        self.dfas = dfas

        # Parse the labels
        labels = []
        lineno, line = lineno + 1, next(f)
        mo = re.match(r"static label labels\[(\d+)\] = {$", line)
        assert mo, (lineno, line)
        nlabels = int(mo.group(1))
        for i in range(nlabels):
            lineno, line = lineno + 1, next(f)
            mo = re.match(r'\s+{(\d+), (0|"\w+")},$', line)
            assert mo, (lineno, line)
            x, y = mo.groups()
            x = int(x)
            if y == "0":
                y = None
            else:
                y = eval(y)
            labels.append((x, y))
        lineno, line = lineno + 1, next(f)
        assert line == "};\n", (lineno, line)
        self.labels = labels

        # Parse the grammar struct
        lineno, line = lineno + 1, next(f)
        assert line == "grammar _PyParser_Grammar = {\n", (lineno, line)
        lineno, line = lineno + 1, next(f)
        mo = re.match(r"\s+(\d+),$", line)
        assert mo, (lineno, line)
        ndfas = int(mo.group(1))
        assert ndfas == len(self.dfas)
        lineno, line = lineno + 1, next(f)
        assert line == "\tdfas,\n", (lineno, line)
        lineno, line = lineno + 1, next(f)
        mo = re.match(r"\s+{(\d+), labels},$", line)
        assert mo, (lineno, line)
        nlabels = int(mo.group(1))
        assert nlabels == len(self.labels), (lineno, line)
        lineno, line = lineno + 1, next(f)
        mo = re.match(r"\s+(\d+)$", line)
        assert mo, (lineno, line)
        start = int(mo.group(1))
        assert start in self.number2symbol, (lineno, line)
        self.start = start
        lineno, line = lineno + 1, next(f)
        assert line == "};\n", (lineno, line)
        try:
            lineno, line = lineno + 1, next(f)
        except StopIteration:
            pass
        else:
            assert 0, (lineno, line)
Example #60
0
 def getter(w):
     return list(
         map(unicode, (w.item(i).text() for i in xrange(w.count()))))