Exemplo n.º 1
0
    def getEventDetail(self, uri, format=None):
        """
        Gets event detail information.

        :type uri: str
        :param uri: Event identifier as either a EMSC event unique identifier,
            e.g. ``"19990817_0000001"`` or a QuakeML-formatted event URI, e.g.
            ``"quakeml:eu.emsc/event#19990817_0000001"``.
        :type format: ``'list'``, ``'xml'`` or ``'catalog'``, optional
        :param format: Format of returned results. Defaults to ``'xml'``.
        :rtype: :class:`~obspy.core.event.Catalog`, list or str
        :return: Method will return either an ObsPy
            :class:`~obspy.core.event.Catalog` object, a list of event
            dictionaries or a QuakeML string depending on the ``format``
            keyword.

        .. seealso:: http://www.seismicportal.eu/services/event/detail/info/

        .. rubric:: Example

        >>> from obspy.neries import Client
        >>> client = Client()
        >>> result = client.getEventDetail("19990817_0000001", 'list')
        >>> len(result)  # Number of calculated origins
        12
        >>> result[0]  # Details about first calculated origin  #doctest: +SKIP
        {'author': u'EMSC', 'event_id': u'19990817_0000001',
         'origin_id': 1465935, 'longitude': 29.972,
         'datetime': UTCDateTime(1999, 8, 17, 0, 1, 35), 'depth': -10.0,
         'magnitude': 6.7, 'magnitude_type': u'mw', 'latitude': 40.749}
        """
        # deprecation warning if format is not set
        if format is None:
            msg = "The default setting format='xml' for obspy.neries." + \
                "Client.getEventDetail() will be changed in the future to " + \
                "format='catalog'. Please call this function with the " + \
                "format keyword in order to hide this deprecation warning."
            warnings.warn(msg, category=DeprecationWarning)
            format = "xml"
        # parse parameters
        kwargs = {}
        if format == 'list':
            kwargs['format'] = 'json'
        else:
            kwargs['format'] = 'xml'
        if str(uri).startswith('quakeml:'):
            # QuakeML-formatted event URI
            kwargs['uri'] = str(uri)
        else:
            # EMSC event unique identifier
            kwargs['unid'] = str(uri)
        # fetch data
        data = self._fetch("/services/event/detail", **kwargs)
        # format output
        if format == "list":
            return self._json2list(data.decode())
        elif format == "catalog":
            return readEvents(compatibility.BytesIO(data), 'QUAKEML')
        else:
            return data
Exemplo n.º 2
0
 def __str__(self):
     ret = ("Network {id} {description}\n"
            "\tStation Count: {selected}/{total} (Selected/Total)\n"
            "\t{start_date} - {end_date}\n"
            "\tAccess: {restricted} {alternate_code}{historical_code}\n")
     ret = ret.format(
         id=self.code,
         description="(%s)" % self.description if self.description else "",
         selected=self.selected_number_of_stations,
         total=self.total_number_of_stations,
         start_date=str(self.start_date),
         end_date=str(self.end_date) if self.end_date else "",
         restricted=self.restricted_status,
         alternate_code="Alternate Code: %s " % self.alternate_code if
         self.alternate_code else "",
         historical_code="historical Code: %s " % self.historical_code if
         self.historical_code else "")
     contents = self.get_contents()
     ret += "\tContains:\n"
     ret += "\t\tStations (%i):\n" % len(contents["stations"])
     ret += "\n".join(["\t\t\t%s" % _i for _i in contents["stations"]])
     ret += "\n"
     ret += "\t\tChannels (%i):\n" % len(contents["channels"])
     ret += "\n".join(textwrap.wrap(", ".join(
         contents["channels"]), initial_indent="\t\t\t",
         subsequent_indent="\t\t\t", expand_tabs=False))
     return ret
Exemplo n.º 3
0
    def test_object_implements_py2_unicode_method(self):
        my_unicode_str = u'Unicode string: \u5b54\u5b50'
        class A(object):
            def __str__(self):
                return my_unicode_str
        a = A()
        self.assertEqual(len(str(a)), 18)
        if utils.PY2:
            self.assertTrue(hasattr(a, '__unicode__'))
        else:
            self.assertFalse(hasattr(a, '__unicode__'))
        self.assertEqual(str(a), my_unicode_str)
        self.assertTrue(isinstance(str(a).encode('utf-8'), bytes))
        if utils.PY2:
            self.assertTrue(type(unicode(a)) == unicode)
            self.assertEqual(unicode(a), my_unicode_str)

        # Manual equivalent on Py2 without the decorator:
        if not utils.PY3:
            class B(object):
                def __unicode__(self):
                    return u'Unicode string: \u5b54\u5b50'
                def __str__(self):
                    return unicode(self).encode('utf-8')
            b = B()
            assert str(a) == str(b)
Exemplo n.º 4
0
def update_single_item(item):
    """
    Synchronize single item (movie or episode) with next-episode-net

    :param item: video item
    :type item: dict
    """
    data = {
        'user': {
            'username': addon.getSetting('username'),
            'hash': addon.getSetting('hash')
        }}
    if item['type'] == 'episode':
        data['tvshows'] = [{
            'thetvdb_id': get_tvdb_id(item['tvshowid']),
            'season': str(item['season']),
            'episode': str(item['episode']),
            'watched': '1' if item['playcount'] else '0'
            }]
    elif item['type'] == 'movie':
        data['movies'] = [{
            'watched': '1' if item['playcount'] else '0'
        }]
        if 'tt' in item['imdbnumber']:
            data['imdb_id'] = item['imdbnumber']
        else:
            data['imdb_id'] = item['uniqueid']['imdb']
    log_data_sent(data)
    send_data(data)
Exemplo n.º 5
0
def _write_io_units(parent, obj):
    sub = etree.SubElement(parent, "InputUnits")
    etree.SubElement(sub, "Name").text = str(obj.input_units)
    etree.SubElement(sub, "Description").text = str(obj.input_units_description)
    sub = etree.SubElement(parent, "OutputUnits")
    etree.SubElement(sub, "Name").text = str(obj.output_units)
    etree.SubElement(sub, "Description").text = str(obj.output_units_description)
Exemplo n.º 6
0
def origin__geo_interface__(self):
    """
    __geo_interface__ method for GeoJSON-type GIS protocol

    :return: dict of valid GeoJSON

    Reference
    ---------
    Python geo_interface specifications:
    https://gist.github.com/sgillies/2217756

    """
    time = None
    update_time = None
    
    coords = [self.longitude, self.latitude]
    if self.depth is not None:
        coords.append(self.depth)
    if isinstance(self.time, UTCDateTime):
        time = str(self.time)
        coords.append(self.time.timestamp)
    
    if self.creation_info and self.creation_info.creation_time is not None:
        update_time = str(self.creation_info.creation_time)

    point = {
        "type": "Point",
        "coordinates": coords,
        "id": str(self.resource_id),
        }
    props = {
        "time": time,
        "updated": update_time,
        }
    return {"type": "Feature", "properties": props, "geometry": point}
Exemplo n.º 7
0
def _order_totals(context):
    """
    Add shipping/tax/discount/order types and totals to the template
    context. Use the context's completed order object for email
    receipts, or the cart object for checkout.
    """
    fields = ["shipping_type", "shipping_total", "discount_total",
              "tax_type", "tax_total"]
    if "order" in context:
        for field in fields + ["item_total"]:
            context[field] = getattr(context["order"], field)
    else:
        context["item_total"] = context["request"].cart.total_price()
        if context["item_total"] == 0:
            # Ignore session if cart has no items, as cart may have
            # expired sooner than the session.
            context["tax_total"] = 0
            context["discount_total"] = 0
            context["shipping_total"] = 0
        else:
            for field in fields:
                context[field] = context["request"].session.get(field, None)
    context["order_total"] = context.get("item_total", None)
    if context.get("shipping_total", None) is not None:
        context["order_total"] += Decimal(str(context["shipping_total"]))
    if context.get("discount_total", None) is not None:
        context["order_total"] -= Decimal(str(context["discount_total"]))
    if context.get("tax_total", None) is not None:
        context["order_total"] += Decimal(str(context["tax_total"]))

    context["stripe_key"] = settings.STRIPE_PUBLIC_KEY
    context["s_currency"] = settings.STRIPE_CURRENCY

    return context
Exemplo n.º 8
0
def test_analysing_interface_dependencies(monkeypatch, task_workbench,
                                          interface_dep_collector):
    """Test analysing the dependencies in an interface.

    """
    runtime = {'test'}
    interface = ('LinspaceLoopInterface', 'ecpy.LoopTask')
    plugin = task_workbench.get_plugin('ecpy.tasks')
    monkeypatch.setattr(plugin.get_interface_infos(interface), 'dependencies',
                        runtime)

    dep = set()
    errors = dict()
    run = interface_dep_collector.analyse(task_workbench,
                                          {'interface_id': str(interface)},
                                          getitem, dep, errors)

    assert run == runtime
    assert interface in dep
    assert not errors

    dep.clear()
    run = interface_dep_collector.analyse(task_workbench,
                                          {'interface_id':
                                              ('__dummy__', 'LoopTask')},
                                          getitem, dep, errors)
    assert not run
    assert not dep
    assert str(('__dummy__', 'LoopTask')) in errors
Exemplo n.º 9
0
def quote_from_bytes(bs, safe='/'):
    """Like quote(), but accepts a bytes object rather than a str, and does
    not perform string-to-bytes encoding.  It always returns an ASCII string.
    quote_from_bytes(b'abc def\x3f') -> 'abc%20def%3f'
    """
    if not isinstance(bs, (bytes, bytearray)):
        raise TypeError("quote_from_bytes() expected bytes")
    if not bs:
        return str('')
    ### For Python-Future:
    bs = bytes(bs)
    ###
    if isinstance(safe, str):
        # Normalize 'safe' by converting to bytes and removing non-ASCII chars
        safe = str(safe).encode('ascii', 'ignore')
    else:
        ### For Python-Future:
        safe = bytes(safe)
        ###
        safe = bytes([c for c in safe if c < 128])
    if not bs.rstrip(_ALWAYS_SAFE_BYTES + safe):
        return bs.decode()
    try:
        quoter = _safe_quoters[safe]
    except KeyError:
        _safe_quoters[safe] = quoter = Quoter(safe).__getitem__
    return str('').join([quoter(char) for char in bs])
Exemplo n.º 10
0
 def __str__(self):
     ret = (
         "Channel '{id}', Location '{location}' {description}\n"
         "\tTimerange: {start_date} - {end_date}\n"
         "\tLatitude: {latitude:.2f}, Longitude: {longitude:.2f}, "
         "Elevation: {elevation:.1f} m, Local Depth: {depth:.1f} m\n"
         "{azimuth}"
         "{dip}"
         "{channel_types}"
         "\tSampling Rate: {sampling_rate:.2f} Hz\n"
         "\tSensor: {sensor}\n"
         "{response}")\
         .format(
             id=self.code, location=self.location_code,
             description="(%s)" % self.description
             if self.description else "",
             start_date=str(self.start_date),
             end_date=str(self.end_date) if self.end_date else "--",
             latitude=self.latitude, longitude=self.longitude,
             elevation=self.elevation, depth=self.depth,
             azimuth="\tAzimuth: %.2f degrees from north, clockwise\n" %
             self.azimuth if self.azimuth is not None else "",
             dip="\tDip: %.2f degrees down from horizontal\n" %
             self.dip if self.dip is not None else "",
             channel_types="\tChannel types: %s\n" % ", ".join(self.types)
                 if self.types else "",
             sampling_rate=self.sample_rate, sensor=self.sensor.type,
             response="\tResponse information available"
                 if self.response else "")
     return ret
Exemplo n.º 11
0
  def testRDFURN(self):
    """Test RDFURN handling."""
    # Make a url object
    str_url = "aff4:/hunts/W:AAAAAAAA/Results"
    url = rdfvalue.RDFURN(str_url, age=1)
    self.assertEqual(url.age, 1)
    self.assertEqual(url.Path(), "/hunts/W:AAAAAAAA/Results")
    self.assertEqual(str(url), str_url)
    self.assertEqual(url.scheme, "aff4")

    # Test the Add() function
    url = url.Add("some", age=2).Add("path", age=3)
    self.assertEqual(url.age, 3)
    self.assertEqual(url.Path(), "/hunts/W:AAAAAAAA/Results/some/path")
    self.assertEqual(str(url), utils.Join(str_url, "some", "path"))

    # Test that we can handle urns with a '?' and do not interpret them as
    # a delimiter between url and parameter list.
    str_url = "aff4:/C.0000000000000000/fs/os/c/regex.*?]&[+{}--"
    url = rdfvalue.RDFURN(str_url, age=1)
    self.assertEqual(url.Path(), str_url[5:])

    # Some more special characters...
    for path in ["aff4:/test/?#asd", "aff4:/test/#asd", "aff4:/test/?#"]:
      self.assertEqual(path, str(rdfvalue.RDFURN(path)))
Exemplo n.º 12
0
def get_api_base():
    if not production:
        api_base = str("https://sandbox-api.openpay.mx")
    else:
        api_base = str("https://api.openpay.mx")

    return api_base
Exemplo n.º 13
0
    def migrate(self):
        """method that is called to migrate this migration"""

        check = self.migration_required()
        if check == False:
            print("%s has already been migrated, skip it!" % self)
            return None

        print("Migrating %s" % self)

        self.check_migration()  # check the configuration of the Migration
        connection = self.open_db_connection()

        cursor = connection.cursor()
        cursor.execute(self.query)
        fields = [row[0] for row in cursor.description]

        if check is None:
            # update existing migrations
            self.process_cursor_for_update(connection, cursor, fields)
            self.update_tidestamp()

        else:
            # do the normal migration method
            self.process_cursor(connection, cursor, fields)

            if self.tidestamp_value:
                AppliedMigration.objects.create(classname=str(self), tidestamp=self.tidestamp_value)
            else:
                AppliedMigration.objects.create(classname=str(self))
Exemplo n.º 14
0
  def testBasicParsingOldFormat(self):
    """Test we can parse a standard file."""
    history_file = os.path.join(self.base_path, "parser_test", "History")
    history = chrome_history.ChromeParser(open(history_file, "rb"))
    entries = [x for x in history.Parse()]

    try:
      dt1 = datetime.datetime(1970, 1, 1)
      dt1 += datetime.timedelta(microseconds=entries[0][0])
    except (TypeError, ValueError):
      dt1 = entries[0][0]

    try:
      dt2 = datetime.datetime(1970, 1, 1)
      dt2 += datetime.timedelta(microseconds=entries[-1][0])
    except (TypeError, ValueError):
      dt2 = entries[-1][0]

    # Check that our results are properly time ordered
    time_results = [x[0] for x in entries]
    self.assertEqual(time_results, sorted(time_results))

    self.assertEqual(str(dt1), "2011-04-07 12:03:11")
    self.assertEqual(entries[0][2], "http://start.ubuntu.com/10.04/Google/")

    self.assertEqual(str(dt2), "2011-05-23 08:37:27.061516")
    self.assertStartsWith(
        entries[-1][2], "https://chrome.google.com/webs"
        "tore/detail/mfjkgbjaikamkkojmak"
        "jclmkianficch")

    self.assertLen(entries, 71)
Exemplo n.º 15
0
  def __init__(self, uri=None, page_size=None, shortcuts=None,
    arrows=None, snippets=None, use_unicode=None, *args, **kwargs):
    """Constructs a new EmailSettingsGeneral object with the given arguments.

    Args:
      uri: string (optional) The uri of this object for HTTP requests.
      page_size: int (optional) The number of conversations to be shown per page.
      shortcuts: Boolean (optional) Whether to enable keyboard shortcuts.
      arrows: Boolean (optional) Whether to display arrow-shaped personal
              indicators next to email sent specifically to the user.
      snippets: Boolean (optional) Whether to display snippets of the messages
                in the inbox and when searching.
      use_unicode: Boolean (optional) Whether to use UTF-8 (unicode) encoding
                   for all outgoing messages.
      args: The other parameters to pass to gdata.entry.GDEntry constructor.
      kwargs: The other parameters to pass to gdata.entry.GDEntry constructor.
    """
    super(EmailSettingsGeneral, self).__init__(*args, **kwargs)
    if uri:
      self.uri = uri
    if page_size is not None:
      self.page_size = str(page_size)
    if shortcuts is not None:
      self.shortcuts = str(shortcuts)
    if arrows is not None:
      self.arrows = str(arrows)
    if snippets is not None:
      self.snippets = str(snippets)
    if use_unicode is not None:
      self.use_unicode = str(use_unicode)
Exemplo n.º 16
0
    def save_form_data(self, instance, data):
        """
        The ``KeywordsWidget`` field will return data as a string of
        comma separated IDs for the ``Keyword`` model - convert these
        into actual ``AssignedKeyword`` instances. Also delete
        ``Keyword`` instances if their last related ``AssignedKeyword``
        instance is being removed.
        """
        from mezzanine.generic.models import AssignedKeyword, Keyword

        related_manager = getattr(instance, self.name)
        # Get a list of Keyword IDs being removed.
        old_ids = [str(a.keyword_id) for a in related_manager.all()]
        new_ids = data.split(",")
        removed_ids = set(old_ids) - set(new_ids)
        # Remove current AssignedKeyword instances.
        related_manager.all().delete()
        # Convert the data into AssignedKeyword instances.
        if data:
            data = [AssignedKeyword(keyword_id=i) for i in new_ids]
        # Remove Keyword instances than no longer have a
        # related AssignedKeyword instance.
        existing = AssignedKeyword.objects.filter(keyword__id__in=removed_ids)
        existing_ids = set([str(a.keyword_id) for a in existing])
        unused_ids = removed_ids - existing_ids
        Keyword.objects.filter(id__in=unused_ids).delete()
        super(KeywordsField, self).save_form_data(instance, data)
Exemplo n.º 17
0
  def testBasicParsing(self):
    """Test we can parse a standard file."""
    history_file = os.path.join(self.base_path, "parser_test", "History2")
    history = chrome_history.ChromeParser(open(history_file, "rb"))
    entries = [x for x in history.Parse()]

    try:
      dt1 = datetime.datetime(1970, 1, 1)
      dt1 += datetime.timedelta(microseconds=entries[0][0])
    except (TypeError, ValueError):
      dt1 = entries[0][0]

    try:
      dt2 = datetime.datetime(1970, 1, 1)
      dt2 += datetime.timedelta(microseconds=entries[-1][0])
    except (TypeError, ValueError):
      dt2 = entries[-1][0]

    # Check that our results are properly time ordered
    time_results = [x[0] for x in entries]
    self.assertEqual(time_results, sorted(time_results))

    self.assertEqual(str(dt1), "2013-05-03 15:11:26.556635")
    self.assertStartsWith(entries[0][2],
                          "https://www.google.ch/search?q=why+you+shouldn")

    self.assertEqual(str(dt2), "2013-05-03 15:11:39.763984")
    self.assertStartsWith(entries[-1][2], "http://www.test.ch/")

    self.assertLen(entries, 4)
Exemplo n.º 18
0
 def start(self, n):
     self.context["cores"] = self.cores
     if self.mem:
         if self.memtype == "rss":
             self.context["mem"] = "#$ -l rss=%sM".format(
                 int(float(self.mem) * 1024 / self.cores)
             )
         else:
             self.context["mem"] = "#$ -l mem_free=%sM".format(
                 int(float(self.mem) * 1024)
             )
     else:
         self.context["mem"] = ""
     if self.queue:
         self.context["queue"] = "#$ -q %s" % self.queue
     else:
         self.context["queue"] = ""
     self.context["tag"] = self.tag if self.tag else "bcbio"
     self.context["pename"] = str(self.pename)
     self.context["resources"] = "\n".join([
         _prep_sge_resource(r)
         for r in str(self.resources).split(";")
         if r.strip()
     ])
     self.context["exports"] = _local_environment_exports()
     return super(BcbioSGEEngineSetLauncher, self).start(n)
Exemplo n.º 19
0
def thumbnails(html):
    """
    Given a HTML string, converts paths in img tags to thumbnail
    paths, using Mezzanine's ``thumbnail`` template tag. Used as
    one of the default values in the ``RICHTEXT_FILTERS`` setting.
    """
    from django.conf import settings
    from bs4 import BeautifulSoup
    from mezzanine.core.templatetags.mezzanine_tags import thumbnail

    # If MEDIA_URL isn't in the HTML string, there aren't any
    # images to replace, so bail early.
    if settings.MEDIA_URL.lower() not in html.lower():
        return html

    dom = BeautifulSoup(html, "html.parser")
    for img in dom.findAll("img"):
        src = img.get("src", "")
        src_in_media = src.lower().startswith(settings.MEDIA_URL.lower())
        width = img.get("width")
        height = img.get("height")
        if src_in_media and str(width).isdigit() and str(height).isdigit():
            img["src"] = settings.MEDIA_URL + thumbnail(src, width, height)
    # BS adds closing br tags, which the browser interprets as br tags.
    return str(dom).replace("</br>", "")
Exemplo n.º 20
0
Arquivo: flow.py Projeto: google/grr
  def _HandleRelational(self, args):
    requests_and_responses = data_store.REL_DB.ReadAllFlowRequestsAndResponses(
        str(args.client_id), str(args.flow_id))

    result = ApiListFlowRequestsResult()
    stop = None
    if args.count:
      stop = args.offset + args.count

    for request, response_dict in itertools.islice(requests_and_responses,
                                                   args.offset, stop):
      client_urn = args.client_id.ToClientURN()
      request_state = rdf_flow_runner.RequestState(
          client_id=client_urn,
          id=request.request_id,
          next_state=request.next_state,
          session_id=client_urn.Add("flows").Add(str(request.flow_id)))
      api_request = ApiFlowRequest(
          request_id=str(request.request_id), request_state=request_state)

      if response_dict:
        responses = [
            response_dict[i].AsLegacyGrrMessage() for i in sorted(response_dict)
        ]
        for r in responses:
          r.ClearPayload()

        api_request.responses = responses

      result.items.append(api_request)

    return result
Exemplo n.º 21
0
Arquivo: flow.py Projeto: google/grr
  def Handle(self, args, token=None):
    if data_store.RelationalDBEnabled():
      count = args.count or db.MAX_COUNT

      logs = data_store.REL_DB.ReadFlowLogEntries(
          str(args.client_id), str(args.flow_id), args.offset, count,
          args.filter)
      total_count = data_store.REL_DB.CountFlowLogEntries(
          str(args.client_id), str(args.flow_id))
      return ApiListFlowLogsResult(
          items=[
              ApiFlowLog().InitFromFlowLogEntry(log, str(args.flow_id))
              for log in logs
          ],
          total_count=total_count)
    else:
      flow_urn = args.flow_id.ResolveClientFlowURN(args.client_id, token=token)
      logs_collection = flow.GRRFlow.LogCollectionForFID(flow_urn)

      result = api_call_handler_utils.FilterCollection(logs_collection,
                                                       args.offset, args.count,
                                                       args.filter)

      return ApiListFlowLogsResult(
          items=[ApiFlowLog().InitFromFlowLog(x) for x in result],
          total_count=len(logs_collection))
Exemplo n.º 22
0
def str_value(v, encoding='utf-8', bool_int=True, none='NULL'):
    import datetime
    import decimal
    
    if callable(v):
        v = v()
    if isinstance(v, datetime.datetime):
        return v.strftime('%Y-%m-%d %H:%M:%S')
    elif isinstance(v, datetime.date):
        return v.strftime('%Y-%m-%d')
    elif isinstance(v, datetime.time):
        return v.strftime('%H:%M:%S')
    elif isinstance(v, decimal.Decimal):
        return str(v)
    elif isinstance(v, str):
        return v.encode(encoding)
    elif v is None:
        return none
    elif isinstance(v, bool):
        if bool_int:
            if v:
                return '1'
            else:
                return '0'
        else:
            return str(v)
    else:
        return str(v)
Exemplo n.º 23
0
 def test_initUTCDateTime(self):
     dt = UTCDateTime(year=2008, month=1, day=1)
     self.assertEqual(str(dt), "2008-01-01T00:00:00.000000Z")
     dt = UTCDateTime(year=2008, julday=1, hour=12, microsecond=5000)
     self.assertEqual(str(dt), "2008-01-01T12:00:00.005000Z")
     # without parameters returns current date time
     dt = UTCDateTime()
Exemplo n.º 24
0
def _unpack_value(value, prefix='', whitelist=None, blacklist=None):
    """
    Unpack values from a data structure and convert to string. Call
    the corresponding functions for dict or iterables or use simple
    string conversion for scalar variables.

    Parameters
    ----------
    value : dict, iterable, scalar variable
        Value to be unpacked.
    prefix : str, optional
        Prefix to preprend to resulting string. Defaults to empty
        string.
    """

    try:
        return _generate_string_from_dict(value,
                                          blacklist=blacklist,
                                          whitelist=whitelist,
                                          prefix=prefix + 'd')
    except AttributeError:
        # not a dict
        try:
            return prefix + _generate_string_from_iterable(value, prefix='i')
        except TypeError:
            # not an iterable
            if isinstance(value, float):
                return prefix + str(_save_convert_float_to_int(value))
            else:
                return prefix + str(value)
Exemplo n.º 25
0
def test_collect_interface_dependencies(task_workbench):
    """Test collecting the dependencies found in an interface.

    """
    runtime = ['test']
    interface = ('LinspaceLoopInterface', 'LoopTask')
    plugin = task_workbench.get_plugin('ecpy.tasks')
    plugin.get_interface_infos(interface).dependencies = runtime

    dep = defaultdict(dict)
    errors = defaultdict(dict)
    run = collect_interface(task_workbench,
                            {'interface_class': str(interface)},
                            getitem, dep, errors)

    assert run == runtime
    assert INTERFACE_DEP_TYPE in dep
    assert interface in dep[INTERFACE_DEP_TYPE]
    assert not errors

    dep.clear()
    run = collect_interface(task_workbench,
                            {'interface_class': ('__dummy__', 'LoopTask')},
                            getitem, dep, errors)
    assert not run
    assert not dep
    assert INTERFACE_DEP_TYPE in errors
    assert str(('__dummy__', 'LoopTask')) in errors[INTERFACE_DEP_TYPE]
Exemplo n.º 26
0
 def __repr__(self):
     if PY2 and isinstance(self.value, unicode):
         val = str(self.value)    # make it a newstr to remove the u prefix
     else:
         val = self.value
     return '<%s: %s=%s>' % (self.__class__.__name__,
                             str(self.key), repr(val))
Exemplo n.º 27
0
    def OutputString(self, attrs=None):
        # Build up our result
        #
        result = []
        append = result.append

        # First, the key=value pair
        append("%s=%s" % (self.key, self.coded_value))

        # Now add any defined attributes
        if attrs is None:
            attrs = self._reserved
        items = sorted(self.items())
        for key, value in items:
            if value == "":
                continue
            if key not in attrs:
                continue
            if key == "expires" and isinstance(value, int):
                append("%s=%s" % (self._reserved[key], _getdate(value)))
            elif key == "max-age" and isinstance(value, int):
                append("%s=%d" % (self._reserved[key], value))
            elif key == "secure":
                append(str(self._reserved[key]))
            elif key == "httponly":
                append(str(self._reserved[key]))
            else:
                append("%s=%s" % (self._reserved[key], value))

        # Return the result
        return _semispacejoin(result)
Exemplo n.º 28
0
  def UpdateCell(self, row, col, inputValue, key, wksht_id='default'):
    """Updates an existing cell.

    Args:
      row: int The row the cell to be editted is in
      col: int The column the cell to be editted is in
      inputValue: str the new value of the cell
      key: str The key of the spreadsheet in which this cell resides.
      wksht_id: str The ID of the worksheet which holds this cell.

    Returns:
      The updated cell entry
    """
    row = str(row)
    col = str(col)
    # make the new cell
    new_cell = gdata.spreadsheet.Cell(row=row, col=col, inputValue=inputValue)
    # get the edit uri and PUT
    cell = 'R%sC%s' % (row, col)
    entry = self.GetCellsFeed(key, wksht_id, cell)
    for a_link in entry.link:
      if a_link.rel == 'edit':
        entry.cell = new_cell
        return self.Put(entry, a_link.href,
            converter=gdata.spreadsheet.SpreadsheetsCellFromString)
Exemplo n.º 29
0
def _order_totals(context):
    """
    Add ``item_total``, ``shipping_total``, ``discount_total``, ``tax_total``,
    and ``order_total`` to the template context. Use the order object for
    email receipts, or the cart object for checkout.
    """
    if "order" in context:
        for f in ("item_total", "shipping_total", "discount_total",
                  "tax_total"):
            context[f] = getattr(context["order"], f)
    else:
        context["item_total"] = context["request"].cart.total_price()
        if context["item_total"] == 0:
            # Ignore session if cart has no items, as cart may have
            # expired sooner than the session.
            context["tax_total"] = context["discount_total"] = \
                context["shipping_total"] = 0
        else:
            for f in ("shipping_type", "shipping_total", "discount_total",
                      "tax_type", "tax_total"):
                context[f] = context["request"].session.get(f, None)
    context["order_total"] = context.get("item_total", None)
    if context.get("shipping_total", None) is not None:
        context["order_total"] += Decimal(str(context["shipping_total"]))
    if context.get("discount_total", None) is not None:
        context["order_total"] -= Decimal(str(context["discount_total"]))
    if context.get("tax_total", None) is not None:
        context["order_total"] += Decimal(str(context["tax_total"]))
    return context
def hexdump(data, linesize):
    r"""
    Turns bytes into a unicode string of the format:

    ::

        >>> print(hexdump(b'0' * 100, 16))
        hexundump(\"\"\"
        0000   30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30   0000000000000000
        0010   30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30   0000000000000000
        0020   30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30   0000000000000000
        0030   30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30   0000000000000000
        0040   30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30   0000000000000000
        0050   30 30 30 30 30 30 30 30 30 30 30 30 30 30 30 30   0000000000000000
        0060   30 30 30 30                                       0000
        \"\"\")
    """
    if len(data) < 16**4:
        fmt = "%%04X   %%-%ds   %%s" % (3*linesize-1,)
    elif len(data) < 16**8:
        fmt = "%%08X   %%-%ds   %%s" % (3*linesize-1,)
    else:
        raise ValueError("hexdump cannot process more than 16**8 or 4294967296 bytes")
    prettylines = []
    for i in range(0, len(data), linesize):
        line = data[i:i+linesize]
        hextext = " ".join(HEXPRINT[b] for b in iterateints(line))
        rawtext = "".join(PRINTABLE[b] for b in iterateints(line))
        prettylines.append(fmt % (i, str(hextext), str(rawtext)))
    return "\n".join(prettylines)
Exemplo n.º 31
0
 def test_string(self):
     d = b'\x01\x02\x03abc123\xff\xfe'
     t = xmlrpclib.Binary(d)
     self.assertEqual(str(t), str(d, "latin-1"))
Exemplo n.º 32
0
def plot_optimization(record, record_g0, num=7, fname=None, title=None,
                      figsize=None, **kwargs):
    fig = plt.figure(figsize=figsize)
    if num > 1:
        n = (num + 1) // 2
        gs = gridspec.GridSpec(n, n)
        ax = plt.subplot(gs[1:, 0:-1])
        share = None
    else:
        ax = fig.add_subplot(111)
    if title:
        ax.annotate(title, (0, 1), (5, -5), 'axes fraction', 'offset points',
                    ha='left', va='top')
    err, g0 = zip(*record_g0)
    if not np.all(np.isinf(err)):
        ax.loglog(g0, err, 'xk')
        # best value is plotted blue
        ax.loglog(g0[-1], err[-1], 'xb', mew=2)
    # infinite values are plotted with red crosses
    if np.inf in err:
        g0_inf = [g0_ for (err_, g0_) in record_g0 if err_ == np.inf]
        err_inf = np.mean(ax.get_ylim())
        ax.loglog(g0_inf, err_inf * np.ones(len(g0_inf)), 'xr')
        for i in range(len(record)):
            if record[i][0] == np.inf:
                record[i] = (err_inf,) + record[i][1:]
    if num > 1:
        for i, rec in enumerate(record):
            err, g0, b, W, _, _ = rec
            if i < n:
                gsp = gs[0, i]
                l = str(i + 1)
            elif i < num - 1:
                gsp = gs[i - n + 1, -1]
                l = str(i + 1)
            else:
                gsp = gs[n - 1, -1]
                l = 'best'
            ax2 = plt.subplot(gsp, sharex=share, sharey=share)
            plot_lstsq(rec, ax=ax2)
            ax2.annotate(l, (0, 1), (5, -5), 'axes fraction',
                         'offset points', ha='left', va='top')
            l2 = 'g$_0$=%.1e\nb=%.1e' % (g0, b)
            l2 = l2 + '\nW%s=%.1e' % ('$_1$' * (len(W) > 1), W[0])
            ax2.annotate(l2, (1, 1), (-5, -5), 'axes fraction',
                         'offset points', ha='right', va='top',
                         size='xx-small')
            if l != 'best':
                ax.annotate(l, (g0, err), (5, 5), 'data', 'offset points',
                            ha='left', va='bottom')
            if i == 0:
                share = ax2
                yl = (r'$\ln \frac{E_{\mathrm{obs}\,ij}}{G_{ij}B_jR_i}$')
                if len(W) == 1:
                    yl = (r'$\ln \frac{E_{\mathrm{obs}\,i}}{G_iR_i}$')
                ax2.set_ylabel(yl)
                plt.setp(ax2.get_xticklabels(), visible=False)
            elif l == 'best':
                ax2.set_xlabel(r'time ($\mathrm{s}$)')
                plt.setp(ax2.get_yticklabels(), visible=False)
            else:
                plt.setp(ax2.get_xticklabels(), visible=False)
                plt.setp(ax2.get_yticklabels(), visible=False)
        ax2.locator_params(axis='y', nbins=4)
        ax2.locator_params(axis='x', nbins=3)
    ax.set_xlabel(r'g$_0$ ($\mathrm{m}^{-1}$)')
    # yl = (r'error $\mathrm{rms}\left(\ln\frac{E_{\mathrm{obs}, ij}}'
    #      r'{E_{\mathrm{mod}, ij}}\right)$')
    ax.set_ylabel(r'misfit $\epsilon$')
    _savefig(fig, fname=fname, **kwargs)
Exemplo n.º 33
0
 def __str__(self):
     return str(self.label)
Exemplo n.º 34
0
 def __str__(self):
     return str(self.title)
Exemplo n.º 35
0
    def testKnowledgeBaseMultipleProvidesNoDict(self):
        with self.assertRaises(RuntimeError) as context:
            self._RunKBIFlow(["TooManyProvides"])

        self.assertIn("multiple provides clauses", str(context.exception))
Exemplo n.º 36
0
    def testKnowledgeBaseNoProvides(self):
        with self.assertRaises(RuntimeError) as context:
            self._RunKBIFlow(["NoProvides"])

        self.assertIn("does not have a provide", str(context.exception))
def stylable_items_for_result(cl, result, form):
    """
    Return an iterator which returns all columns to display in the list.
    This method is based on items_for_result(), yet completely refactored.
    """
    first = True
    pk = cl.lookup_opts.pk.attname

    # Read any custom properties
    list_column_classes = getattr(cl.model_admin, 'list_column_classes', {})

    # figure out which field to indent
    mptt_indent_field = _get_mptt_indent_field(cl, result)

    # Parse all fields to display
    for field_name in cl.list_display:
        row_attr = ''

        # This is all standard stuff, refactored to separate methods.
        result_repr, row_classes = stylable_column_repr(cl, result, field_name)
        if force_text(result_repr) == '':
            result_repr = mark_safe('&nbsp;')

        # Custom stuff, select row classes
        if field_name == mptt_indent_field:
            level = getattr(result, result._mptt_meta.level_attr)
            row_attr += ' style="padding-left:%spx"' % (
                5 + MPTT_ADMIN_LEVEL_INDENT * level)

        column_class = list_column_classes.get(field_name)
        if column_class:
            row_classes.append(column_class)

        if row_classes:
            row_attr += ' class="%s"' % ' '.join(row_classes)

        # Add the link tag to the first field, or use list_display_links if it's defined.
        if (first and not cl.list_display_links
            ) or field_name in cl.list_display_links:
            table_tag = ('th' if first else 'td')
            first = False
            url = cl.url_for_result(result)

            link_attr = ''
            if cl.is_popup:
                # Convert the pk to something that can be used in Javascript.
                # Problem cases are long ints (23L) and non-ASCII strings.
                if cl.to_field:
                    attr = str(cl.to_field)
                else:
                    attr = pk
                value = result.serializable_value(attr)
                result_id = repr(force_text(value))[1:]
                link_attr += ' onclick="opener.dismissRelatedLookupPopup(window, %s); return false;"' % result_id

            yield mark_safe(u'<%s%s><a href="%s"%s>%s</a></%s>' %
                            (table_tag, row_attr, url, link_attr,
                             conditional_escape(result_repr), table_tag))
        else:
            # By default the fields come from ModelAdmin.list_editable,
            # but if we pull the fields out of the form instead,
            # custom ModelAdmin instances can provide fields on a per request basis
            if form and field_name in form.fields:
                bf = form[field_name]
                result_repr = mark_safe(force_text(bf.errors) + force_text(bf))
            else:
                result_repr = conditional_escape(result_repr)

            yield mark_safe(u'<td%s>%s</td>' % (row_attr, result_repr))

    if form:
        yield mark_safe(u'<td>%s</td>' %
                        force_text(form[cl.model._meta.pk.name]))
Exemplo n.º 38
0
 def test_datetime_datetime(self):
     d = datetime.datetime(2007,1,2,3,4,5)
     t = xmlrpclib.DateTime(d)
     self.assertEqual(str(t), '20070102T03:04:05')
Exemplo n.º 39
0
 def test_time_tuple(self):
     d = (2007,6,9,10,38,50,5,160,0)
     t = xmlrpclib.DateTime(d)
     self.assertEqual(str(t), '20070609T10:38:50')
Exemplo n.º 40
0
 def test_default(self):
     t = xmlrpclib.Binary()
     self.assertEqual(str(t), '')
Exemplo n.º 41
0
def thumbnail(image_url,
              width,
              height,
              upscale=True,
              quality=95,
              left=.5,
              top=.5,
              padding=False,
              padding_color="#fff",
              rotate=False):
    """
    Given the URL to an image, resizes the image using the given width
    and height on the first time it is requested, and returns the URL
    to the new resized image. If width or height are zero then original
    ratio is maintained. When ``upscale`` is False, images smaller than
    the given size will not be grown to fill that size. The given width
    and height thus act as maximum dimensions.
    """

    if not image_url:
        return ""
    try:
        from PIL import Image, ImageFile, ImageOps
    except ImportError:
        return ""

    image_url = unquote(str(image_url)).split("?")[0]
    if image_url.startswith(settings.MEDIA_URL):
        image_url = image_url.replace(settings.MEDIA_URL, "", 1)
    image_dir, image_name = os.path.split(image_url)
    image_prefix, image_ext = os.path.splitext(image_name)
    filetype = {".png": "PNG", ".gif": "GIF"}.get(image_ext, "JPEG")
    thumb_name = "%s-%sx%s" % (image_prefix, width, height)
    if not upscale:
        thumb_name += "-no-upscale"
    if left != .5 or top != .5:
        left = min(1, max(0, left))
        top = min(1, max(0, top))
        thumb_name = "%s-%sx%s" % (thumb_name, left, top)
    thumb_name += "-padded-%s" % padding_color if padding else ""
    thumb_name = "%s%s" % (thumb_name, image_ext)

    # `image_name` is used here for the directory path, as each image
    # requires its own sub-directory using its own name - this is so
    # we can consistently delete all thumbnails for an individual
    # image, which is something we do in filebrowser when a new image
    # is written, allowing us to purge any previously generated
    # thumbnails that may match a new image name.
    thumb_dir = os.path.join(settings.MEDIA_ROOT, image_dir,
                             settings.THUMBNAILS_DIR_NAME, image_name)
    if not os.path.exists(thumb_dir):
        try:
            os.makedirs(thumb_dir)
        except OSError:
            pass

    thumb_path = os.path.join(thumb_dir, thumb_name)
    thumb_url = "%s/%s/%s" % (settings.THUMBNAILS_DIR_NAME,
                              quote(image_name.encode("utf-8")),
                              quote(thumb_name.encode("utf-8")))
    image_url_path = os.path.dirname(image_url)
    if image_url_path:
        thumb_url = "%s/%s" % (image_url_path, thumb_url)

    try:
        thumb_exists = os.path.exists(thumb_path)
    except UnicodeEncodeError:
        # The image that was saved to a filesystem with utf-8 support,
        # but somehow the locale has changed and the filesystem does not
        # support utf-8.
        from mezzanine.core.exceptions import FileSystemEncodingChanged
        raise FileSystemEncodingChanged()
    if thumb_exists:
        # Thumbnail exists, don't generate it.
        return thumb_url
    elif not default_storage.exists(image_url):
        # Requested image does not exist, just return its URL.
        return image_url

    f = default_storage.open(image_url)
    try:
        image = Image.open(f)
    except:
        # Invalid image format.
        return image_url

    image_info = image.info
    to_width = int(width)
    to_height = int(height)
    from_width = image.size[0]
    from_height = image.size[1]

    if rotate and from_width < from_height:
        t2 = to_width
        to_width = to_height
        to_height = t2

    if not upscale:
        to_width = min(to_width, from_width)
        to_height = min(to_height, from_height)

    # Set dimensions.
    if to_width == 0:
        to_width = from_width * to_height // from_height
    elif to_height == 0:
        to_height = from_height * to_width // from_width
    if image.mode not in ("P", "L", "RGBA"):
        try:
            image = image.convert("RGBA")
        except:
            return image_url
    # Required for progressive jpgs.
    ImageFile.MAXBLOCK = 2 * (max(image.size)**2)

    # Padding.
    if padding and to_width and to_height:
        from_ratio = float(from_width) / from_height
        to_ratio = float(to_width) / to_height
        pad_size = None
        if to_ratio < from_ratio:
            pad_height = int(to_height * (float(from_width) / to_width))
            pad_size = (from_width, pad_height)
            pad_top = (pad_height - from_height) // 2
            pad_left = 0
        elif to_ratio > from_ratio:
            pad_width = int(to_width * (float(from_height) / to_height))
            pad_size = (pad_width, from_height)
            pad_top = 0
            pad_left = (pad_width - from_width) // 2
        if pad_size is not None:
            pad_container = Image.new("RGBA", pad_size, padding_color)
            pad_container.paste(image, (pad_left, pad_top))
            image = pad_container

    # Create the thumbnail.
    to_size = (to_width, to_height)
    to_pos = (left, top)
    try:
        image = ImageOps.fit(image, to_size, Image.ANTIALIAS, 0, to_pos)
        image = image.save(thumb_path, filetype, quality=quality, **image_info)
        # Push a remote copy of the thumbnail if MEDIA_URL is
        # absolute.
        if "://" in settings.MEDIA_URL:
            with open(thumb_path, "rb") as f:
                default_storage.save(thumb_url, File(f))
    except Exception:
        # If an error occurred, a corrupted image may have been saved,
        # so remove it, otherwise the check for it existing will just
        # return the corrupted image next time it's requested.
        try:
            os.remove(thumb_path)
        except Exception:
            pass
        return image_url
    return thumb_url
Exemplo n.º 42
0
 def test_time_struct(self):
     d = time.localtime(1181399930.036952)
     t = xmlrpclib.DateTime(d)
     self.assertEqual(str(t), time.strftime("%Y%m%dT%H:%M:%S", d))
Exemplo n.º 43
0
def anon_url(*url):
    """
    Return a URL string consisting of the Anonymous redirect URL and an arbitrary number of values appended.
    """
    return '' if None in url else '%s%s' % (plexpy.CONFIG.ANON_REDIRECT, ''.join(str(s) for s in url))
Exemplo n.º 44
0
 def test_repr(self):
     f = xmlrpclib.Fault(42, 'Test Fault')
     self.assertEqual(repr(f), "<Fault 42: 'Test Fault'>")
     self.assertEqual(repr(f), str(f))
Exemplo n.º 45
0
    def _get_recently_added(self, media_type=None):
        from plexpy.notification_handler import format_group_index

        pms_connect = pmsconnect.PmsConnect()

        recently_added = []
        done = False
        start = 0

        while not done:
            recent_items = pms_connect.get_recently_added_details(start=str(start), count='10', media_type=media_type)
            filtered_items = [i for i in recent_items['recently_added']
                              if self.start_time < helpers.cast_to_int(i['added_at'])]
            if len(filtered_items) < 10:
                done = True
            else:
                start += 10

            recently_added.extend(filtered_items)

        if media_type in ('movie', 'other_video'):
            movie_list = []
            for item in recently_added:
                # Filter included libraries
                if item['section_id'] not in self.config['incl_libraries']:
                    continue

                if self.start_time < helpers.cast_to_int(item['added_at']) < self.end_time:
                    movie_list.append(item)

            recently_added = movie_list

        if media_type == 'show':
            shows_list = []
            show_rating_keys = []
            for item in recently_added:
                # Filter included libraries
                if item['section_id'] not in self.config['incl_libraries']:
                    continue

                if item['media_type'] == 'show':
                    show_rating_key = item['rating_key']
                elif item['media_type'] == 'season':
                    show_rating_key = item['parent_rating_key']
                elif item['media_type'] == 'episode':
                    show_rating_key = item['grandparent_rating_key']

                if show_rating_key in show_rating_keys:
                    continue

                show_metadata = pms_connect.get_metadata_details(show_rating_key, media_info=False)
                children = pms_connect.get_item_children(show_rating_key, media_type=media_type, get_grandchildren=True)
                filtered_children = [i for i in children['children_list']
                                     if self.start_time < helpers.cast_to_int(i['added_at']) < self.end_time]
                filtered_children.sort(key=lambda x: helpers.cast_to_int(x['parent_media_index']))

                if not filtered_children:
                    continue

                seasons = []
                for (index, title), children in groupby(filtered_children,
                                                        key=lambda x: (x['parent_media_index'], x['parent_title'])):
                    episodes = list(children)
                    num, num00 = format_group_index([helpers.cast_to_int(d['media_index']) for d in episodes])

                    seasons.append({'media_index': index,
                                    'title': title,
                                    'episode_range': num00,
                                    'episode_count': len(episodes),
                                    'episode': episodes})

                num, num00 = format_group_index([helpers.cast_to_int(d['media_index']) for d in seasons])

                show_metadata['season_range'] = num00
                show_metadata['season_count'] = len(seasons)
                show_metadata['season'] = seasons

                shows_list.append(show_metadata)
                show_rating_keys.append(show_rating_key)

            recently_added = shows_list

        if media_type == 'artist':
            artists_list = []
            artist_rating_keys = []
            for item in recently_added:
                # Filter included libraries
                if item['section_id'] not in self.config['incl_libraries']:
                    continue

                if item['media_type'] == 'artist':
                    artist_rating_key = item['rating_key']
                elif item['media_type'] == 'album':
                    artist_rating_key = item['parent_rating_key']
                elif item['media_type'] == 'track':
                    artist_rating_key = item['grandparent_rating_key']

                if artist_rating_key in artist_rating_keys:
                    continue

                artist_metadata = pms_connect.get_metadata_details(artist_rating_key, media_info=False)
                children = pms_connect.get_item_children(artist_rating_key, media_type=media_type)
                filtered_children = [i for i in children['children_list']
                                     if self.start_time < helpers.cast_to_int(i['added_at']) < self.end_time]
                filtered_children.sort(key=lambda x: x['added_at'])

                if not filtered_children:
                    continue

                albums = []
                for a in filtered_children:
                    album_metadata = pms_connect.get_metadata_details(a['rating_key'], media_info=False)
                    album_metadata['track_count'] = helpers.cast_to_int(album_metadata['children_count'])
                    albums.append(album_metadata)

                artist_metadata['album_count'] = len(albums)
                artist_metadata['album'] = albums

                artists_list.append(artist_metadata)
                artist_rating_keys.append(artist_rating_key)

            recently_added = artists_list

        return recently_added
Exemplo n.º 46
0
def convert_to_unicode(input_string):
    if isinstance(input_string, str):
        return input_string
    else:
        return str(input_string, encoding='utf8', errors='replace')
Exemplo n.º 47
0
    def test_pack(self):
        writekey = b"\x01" * 16
        fingerprint = b"\x02" * 32

        n = uri.WriteableSSKFileURI(writekey, fingerprint)
        u1 = uri.DirectoryURI(n)
        self.failIf(u1.is_readonly())
        self.failUnless(u1.is_mutable())
        self.failUnless(IURI.providedBy(u1))
        self.failIf(IFileURI.providedBy(u1))
        self.failUnless(IDirnodeURI.providedBy(u1))
        self.failUnless("DirectoryURI" in str(u1))
        u1_filenode = u1.get_filenode_cap()
        self.failUnless(u1_filenode.is_mutable())
        self.failIf(u1_filenode.is_readonly())

        u2 = uri.from_string(u1.to_string())
        self.failUnlessReallyEqual(u1.to_string(), u2.to_string())
        self.failIf(u2.is_readonly())
        self.failUnless(u2.is_mutable())
        self.failUnless(IURI.providedBy(u2))
        self.failIf(IFileURI.providedBy(u2))
        self.failUnless(IDirnodeURI.providedBy(u2))

        u2i = uri.from_string(u1.to_string(), deep_immutable=True)
        self.failUnless(isinstance(u2i, uri.UnknownURI))

        u3 = u2.get_readonly()
        self.failUnless(u3.is_readonly())
        self.failUnless(u3.is_mutable())
        self.failUnless(IURI.providedBy(u3))
        self.failIf(IFileURI.providedBy(u3))
        self.failUnless(IDirnodeURI.providedBy(u3))

        u3i = uri.from_string(u2.to_string(), deep_immutable=True)
        self.failUnless(isinstance(u3i, uri.UnknownURI))

        u3n = u3._filenode_uri
        self.failUnless(u3n.is_readonly())
        self.failUnless(u3n.is_mutable())
        u3_filenode = u3.get_filenode_cap()
        self.failUnless(u3_filenode.is_mutable())
        self.failUnless(u3_filenode.is_readonly())

        u3a = uri.from_string(u3.to_string())
        self.failUnlessIdentical(u3a, u3a.get_readonly())

        u4 = uri.ReadonlyDirectoryURI(u2._filenode_uri.get_readonly())
        self.failUnlessReallyEqual(u4.to_string(), u3.to_string())
        self.failUnless(u4.is_readonly())
        self.failUnless(u4.is_mutable())
        self.failUnless(IURI.providedBy(u4))
        self.failIf(IFileURI.providedBy(u4))
        self.failUnless(IDirnodeURI.providedBy(u4))

        u4_verifier = u4.get_verify_cap()
        u4_verifier_filenode = u4_verifier.get_filenode_cap()
        self.failUnless(isinstance(u4_verifier_filenode, uri.SSKVerifierURI))

        verifiers = [u1.get_verify_cap(), u2.get_verify_cap(),
                     u3.get_verify_cap(), u4.get_verify_cap(),
                     uri.DirectoryURIVerifier(n.get_verify_cap()),
                     ]
        for v in verifiers:
            self.failUnless(IVerifierURI.providedBy(v))
            self.failUnlessReallyEqual(v._filenode_uri,
                                 u1.get_verify_cap()._filenode_uri)
Exemplo n.º 48
0
    def retrieve_data(self):
        from plexpy.notification_handler import get_img_info, set_hash_image_info

        if not self.config['incl_libraries']:
            logger.warn("Tautulli Newsletters :: Failed to retrieve %s newsletter data: no libraries selected." % self.NAME)

        media_types = set()
        for s in self._get_sections():
            if str(s['section_id']) in self.config['incl_libraries']:
                if s['section_type'] == 'movie' and s['agent'] == 'com.plexapp.agents.none':
                    media_types.add('other_video')
                else:
                    media_types.add(s['section_type'])

        recently_added = {}
        for media_type in media_types:
            if media_type not in recently_added:
                recently_added[media_type] = self._get_recently_added(media_type)

        movies = recently_added.get('movie', [])
        shows = recently_added.get('show', [])
        artists = recently_added.get('artist', [])
        albums = [a for artist in artists for a in artist['album']]
        other_video = recently_added.get('other_video', [])

        if self.is_preview or helpers.get_img_service(include_self=True) == 'self-hosted':
            for item in movies + shows + albums + other_video:
                if item['media_type'] == 'album':
                    height = 150
                    fallback = 'cover'
                else:
                    height = 225
                    fallback = 'poster'

                item['thumb_hash'] = set_hash_image_info(
                    img=item['thumb'], width=150, height=height, fallback=fallback)

                if item['art']:
                    item['art_hash'] = set_hash_image_info(
                        img=item['art'], width=500, height=280,
                        opacity=25, background='282828', blur=3, fallback='art')
                else:
                    item['art_hash'] = ''

                item['thumb_url'] = ''
                item['art_url'] = ''
                item['poster_url'] = item['thumb_url']  # Keep for backwards compatibility

        elif helpers.get_img_service():
            # Upload posters and art to image hosting service
            for item in movies + shows + albums + other_video:
                if item['media_type'] == 'album':
                    height = 150
                    fallback = 'cover'
                else:
                    height = 225
                    fallback = 'poster'

                img_info = get_img_info(
                    img=item['thumb'], rating_key=item['rating_key'], title=item['title'],
                    width=150, height=height, fallback=fallback)

                item['thumb_url'] = img_info.get('img_url') or common.ONLINE_POSTER_THUMB

                img_info = get_img_info(
                    img=item['art'], rating_key=item['rating_key'], title=item['title'],
                    width=500, height=280, opacity=25, background='282828', blur=3, fallback='art')

                item['art_url'] = img_info.get('img_url')

                item['thumb_hash'] = ''
                item['art_hash'] = ''
                item['poster_url'] = item['thumb_url']  # Keep for backwards compatibility

        else:
            for item in movies + shows + albums + other_video:
                item['thumb_hash'] = ''
                item['art_hash'] = ''
                item['thumb_url'] = ''
                item['art_url'] = ''
                item['poster_url'] = item['thumb_url']  # Keep for backwards compatibility

        self.data['recently_added'] = recently_added

        return self.data
Exemplo n.º 49
0
 def in_menu_template(self, template_name):
     if self.in_menus is not None:
         for i, l, t in settings.PAGE_MENU_TEMPLATES:
             if not str(i) in self.in_menus and t == template_name:
                 return False
     return True
Exemplo n.º 50
0
def set_newsletter_config(newsletter_id=None, agent_id=None, **kwargs):
    if str(agent_id).isdigit():
        agent_id = int(agent_id)
    else:
        logger.error("Tautulli Newsletters :: Unable to set existing newsletter: invalid agent_id %s."
                     % agent_id)
        return False

    agent = next((a for a in available_newsletter_agents() if a['id'] == agent_id), None)

    if not agent:
        logger.error("Tautulli Newsletters :: Unable to retrieve existing newsletter agent: invalid agent_id %s."
                     % agent_id)
        return False

    config_prefix = 'newsletter_config_'
    email_config_prefix = 'newsletter_email_'

    newsletter_config = {k[len(config_prefix):]: kwargs.pop(k)
                         for k in list(kwargs.keys()) if k.startswith(config_prefix)}
    email_config = {k[len(email_config_prefix):]: kwargs.pop(k)
                    for k in list(kwargs.keys()) if k.startswith(email_config_prefix)}

    for cfg, val in email_config.items():
        # Check for a password config keys and a blank password from the HTML form
        if 'password' in cfg and val == '    ':
            # Get the previous password so we don't overwrite it with a blank value
            old_newsletter_config = get_newsletter_config(newsletter_id=newsletter_id)
            email_config[cfg] = old_newsletter_config['email_config'][cfg]

    subject = kwargs.pop('subject')
    body = kwargs.pop('body')
    message = kwargs.pop('message')

    agent_class = get_agent_class(agent_id=agent['id'],
                                  config=newsletter_config, email_config=email_config,
                                  subject=subject, body=body, message=message)

    keys = {'id': newsletter_id}
    values = {'agent_id': agent['id'],
              'agent_name': agent['name'],
              'agent_label': agent['label'],
              'id_name': kwargs.get('id_name', ''),
              'friendly_name': kwargs.get('friendly_name', ''),
              'newsletter_config': json.dumps(agent_class.config),
              'email_config': json.dumps(agent_class.email_config),
              'subject': agent_class.subject,
              'body': agent_class.body,
              'message': agent_class.message,
              'cron': kwargs.get('cron'),
              'active': kwargs.get('active')
              }

    db = database.MonitorDatabase()
    try:
        db.upsert(table_name='newsletters', key_dict=keys, value_dict=values)
        logger.info("Tautulli Newsletters :: Updated newsletter agent: %s (newsletter_id %s)."
                    % (agent['label'], newsletter_id))
        newsletter_handler.schedule_newsletters(newsletter_id=newsletter_id)
        blacklist_logger()
        return True
    except Exception as e:
        logger.warn("Tautulli Newsletters :: Unable to update newsletter agent: %s." % e)
        return False
Exemplo n.º 51
0
    def get_payload(self, i=None, decode=False):
        """Return a reference to the payload.

        The payload will either be a list object or a string.  If you mutate
        the list object, you modify the message's payload in place.  Optional
        i returns that index into the payload.

        Optional decode is a flag indicating whether the payload should be
        decoded or not, according to the Content-Transfer-Encoding header
        (default is False).

        When True and the message is not a multipart, the payload will be
        decoded if this header's value is `quoted-printable' or `base64'.  If
        some other encoding is used, or the header is missing, or if the
        payload has bogus data (i.e. bogus base64 or uuencoded data), the
        payload is returned as-is.

        If the message is a multipart and the decode flag is True, then None
        is returned.
        """
        # Here is the logic table for this code, based on the email5.0.0 code:
        #   i     decode  is_multipart  result
        # ------  ------  ------------  ------------------------------
        #  None   True    True          None
        #   i     True    True          None
        #  None   False   True          _payload (a list)
        #   i     False   True          _payload element i (a Message)
        #   i     False   False         error (not a list)
        #   i     True    False         error (not a list)
        #  None   False   False         _payload
        #  None   True    False         _payload decoded (bytes)
        # Note that Barry planned to factor out the 'decode' case, but that
        # isn't so easy now that we handle the 8 bit data, which needs to be
        # converted in both the decode and non-decode path.
        if self.is_multipart():
            if decode:
                return None
            if i is None:
                return self._payload
            else:
                return self._payload[i]
        # For backward compatibility, Use isinstance and this error message
        # instead of the more logical is_multipart test.
        if i is not None and not isinstance(self._payload, list):
            raise TypeError('Expected list, got %s' % type(self._payload))
        payload = self._payload
        # cte might be a Header, so for now stringify it.
        cte = str(self.get('content-transfer-encoding', '')).lower()
        # payload may be bytes here.
        if isinstance(payload, str):
            payload = str(
                payload)  # for Python-Future, so surrogateescape works
            if utils._has_surrogates(payload):
                bpayload = payload.encode('ascii', 'surrogateescape')
                if not decode:
                    try:
                        payload = bpayload.decode(
                            self.get_param('charset', 'ascii'), 'replace')
                    except LookupError:
                        payload = bpayload.decode('ascii', 'replace')
            elif decode:
                try:
                    bpayload = payload.encode('ascii')
                except UnicodeError:
                    # This won't happen for RFC compliant messages (messages
                    # containing only ASCII codepoints in the unicode input).
                    # If it does happen, turn the string into bytes in a way
                    # guaranteed not to fail.
                    bpayload = payload.encode('raw-unicode-escape')
        if not decode:
            return payload
        if cte == 'quoted-printable':
            return utils._qdecode(bpayload)
        elif cte == 'base64':
            # XXX: this is a bit of a hack; decode_b should probably be factored
            # out somewhere, but I haven't figured out where yet.
            value, defects = decode_b(b''.join(bpayload.splitlines()))
            for defect in defects:
                self.policy.handle_defect(self, defect)
            return value
        elif cte in ('x-uuencode', 'uuencode', 'uue', 'x-uue'):
            in_file = BytesIO(bpayload)
            out_file = BytesIO()
            try:
                uu.decode(in_file, out_file, quiet=True)
                return out_file.getvalue()
            except uu.Error:
                # Some decoding problem
                return bpayload
        if isinstance(payload, str):
            return bpayload
        return payload
Exemplo n.º 52
0
    def test_pack(self):
        u = uri.WriteableSSKFileURI(self.writekey, self.fingerprint)
        self.failUnlessReallyEqual(u.writekey, self.writekey)
        self.failUnlessReallyEqual(u.fingerprint, self.fingerprint)
        self.failIf(u.is_readonly())
        self.failUnless(u.is_mutable())
        self.failUnless(IURI.providedBy(u))
        self.failUnless(IMutableFileURI.providedBy(u))
        self.failIf(IDirnodeURI.providedBy(u))
        self.failUnless("WriteableSSKFileURI" in str(u))

        u2 = uri.from_string(u.to_string())
        self.failUnlessReallyEqual(u2.writekey, self.writekey)
        self.failUnlessReallyEqual(u2.fingerprint, self.fingerprint)
        self.failIf(u2.is_readonly())
        self.failUnless(u2.is_mutable())
        self.failUnless(IURI.providedBy(u2))
        self.failUnless(IMutableFileURI.providedBy(u2))
        self.failIf(IDirnodeURI.providedBy(u2))

        u2i = uri.from_string(u.to_string(), deep_immutable=True)
        self.failUnless(isinstance(u2i, uri.UnknownURI), u2i)
        u2ro = uri.from_string(uri.ALLEGED_READONLY_PREFIX + u.to_string())
        self.failUnless(isinstance(u2ro, uri.UnknownURI), u2ro)
        u2imm = uri.from_string(uri.ALLEGED_IMMUTABLE_PREFIX + u.to_string())
        self.failUnless(isinstance(u2imm, uri.UnknownURI), u2imm)

        u3 = u2.get_readonly()
        readkey = hashutil.ssk_readkey_hash(self.writekey)
        self.failUnlessReallyEqual(u3.fingerprint, self.fingerprint)
        self.failUnlessReallyEqual(u3.readkey, readkey)
        self.failUnless(u3.is_readonly())
        self.failUnless(u3.is_mutable())
        self.failUnless(IURI.providedBy(u3))
        self.failUnless(IMutableFileURI.providedBy(u3))
        self.failIf(IDirnodeURI.providedBy(u3))

        u3i = uri.from_string(u3.to_string(), deep_immutable=True)
        self.failUnless(isinstance(u3i, uri.UnknownURI), u3i)
        u3ro = uri.from_string(uri.ALLEGED_READONLY_PREFIX + u3.to_string())
        self.failUnlessReallyEqual(u3.to_string(), u3ro.to_string())
        u3imm = uri.from_string(uri.ALLEGED_IMMUTABLE_PREFIX + u3.to_string())
        self.failUnless(isinstance(u3imm, uri.UnknownURI), u3imm)

        u4 = uri.ReadonlySSKFileURI(readkey, self.fingerprint)
        self.failUnlessReallyEqual(u4.fingerprint, self.fingerprint)
        self.failUnlessReallyEqual(u4.readkey, readkey)
        self.failUnless(u4.is_readonly())
        self.failUnless(u4.is_mutable())
        self.failUnless(IURI.providedBy(u4))
        self.failUnless(IMutableFileURI.providedBy(u4))
        self.failIf(IDirnodeURI.providedBy(u4))

        u4i = uri.from_string(u4.to_string(), deep_immutable=True)
        self.failUnless(isinstance(u4i, uri.UnknownURI), u4i)
        u4ro = uri.from_string(uri.ALLEGED_READONLY_PREFIX + u4.to_string())
        self.failUnlessReallyEqual(u4.to_string(), u4ro.to_string())
        u4imm = uri.from_string(uri.ALLEGED_IMMUTABLE_PREFIX + u4.to_string())
        self.failUnless(isinstance(u4imm, uri.UnknownURI), u4imm)

        u4a = uri.from_string(u4.to_string())
        self.failUnlessReallyEqual(u4a, u4)
        self.failUnless("ReadonlySSKFileURI" in str(u4a))
        self.failUnlessIdentical(u4a.get_readonly(), u4a)

        u5 = u4.get_verify_cap()
        self.failUnless(IVerifierURI.providedBy(u5))
        self.failUnlessReallyEqual(u5.get_storage_index(), u.get_storage_index())
        u7 = u.get_verify_cap()
        self.failUnless(IVerifierURI.providedBy(u7))
        self.failUnlessReallyEqual(u7.get_storage_index(), u.get_storage_index())
Exemplo n.º 53
0
 def is_onlyws(self):
     return self._initial_size == 0 and (not self or str(self).isspace())
Exemplo n.º 54
0
    def testRDFStruct(self):
        tested = TestStruct()

        # cant set integers for string attributes.
        self.assertRaises(type_info.TypeValueError, setattr, tested, "foobar",
                          1)

        # This is a string so a string assignment is good:
        tested.foobar = "Hello"
        self.assertEqual(tested.foobar, "Hello")

        # This field must be another TestStruct instance..
        self.assertRaises(ValueError, setattr, tested, "nested", "foo")

        # Its ok to assign a compatible semantic protobuf.
        tested.nested = TestStruct(foobar="nested_foo")

        # Not OK to use the wrong semantic type.
        self.assertRaises(ValueError, setattr, tested, "nested",
                          PartialTest1(int=1))

        # Not OK to assign a serialized string - even if it is for the right type -
        # since there is no type checking.
        serialized = TestStruct(foobar="nested_foo").SerializeToString()
        self.assertRaises(ValueError, setattr, tested, "nested", serialized)

        # Nested accessors.
        self.assertEqual(tested.nested.foobar, "nested_foo")

        # Test repeated elements:

        # Empty list is ok:
        tested.repeated = []
        self.assertEqual(tested.repeated, [])

        tested.repeated = ["string"]
        self.assertEqual(tested.repeated, ["string"])

        self.assertRaises(type_info.TypeValueError, setattr, tested,
                          "repeated", [1, 2, 3])

        # Coercing on assignment. This field is an RDFURN:
        tested.urn = "www.example.com"
        self.assertIsInstance(tested.urn, rdfvalue.RDFURN)

        self.assertEqual(tested.urn, rdfvalue.RDFURN("www.example.com"))

        # Test enums.
        self.assertEqual(tested.type, 3)
        self.assertEqual(tested.type.name, "THIRD")

        tested.type = "FIRST"
        self.assertEqual(tested.type, 1)

        # Check that string assignments are case-insensitive.
        tested.type = "second"
        self.assertEqual(tested.type, 2)
        tested.type = "ThIrD"
        self.assertEqual(tested.type, 3)

        # Non-valid types are rejected.
        self.assertRaises(type_info.TypeValueError, setattr, tested, "type",
                          "Foo")

        # Strings of digits should be accepted.
        tested.type = "2"
        self.assertEqual(tested.type, 2)
        # unicode strings should be treated the same way.
        tested.type = u"2"
        self.assertEqual(tested.type, 2)
        # Out of range values are permitted and preserved through serialization.
        tested.type = 4
        self.assertEqual(tested.type, 4)
        serialized_type = str(tested.type).encode("utf-8")
        tested.type = 1
        tested.type = serialized_type
        self.assertEqual(tested.type, 4)
Exemplo n.º 55
0
    def _make_checker_results(self, smap):
        self._monitor.raise_if_cancelled()
        healthy = True
        report = []
        summary = []
        vmap = smap.make_versionmap()
        recoverable = smap.recoverable_versions()
        unrecoverable = smap.unrecoverable_versions()

        if recoverable:
            report.append("Recoverable Versions: " + "/".join([
                "%d*%s" % (len(vmap[v]), smap.summarize_version(v))
                for v in recoverable
            ]))
        if unrecoverable:
            report.append("Unrecoverable Versions: " + "/".join([
                "%d*%s" % (len(vmap[v]), smap.summarize_version(v))
                for v in unrecoverable
            ]))
        if smap.unrecoverable_versions():
            healthy = False
            summary.append("some versions are unrecoverable")
            report.append("Unhealthy: some versions are unrecoverable")
        if len(recoverable) == 0:
            healthy = False
            summary.append("no versions are recoverable")
            report.append("Unhealthy: no versions are recoverable")
        if len(recoverable) > 1:
            healthy = False
            summary.append("multiple versions are recoverable")
            report.append("Unhealthy: there are multiple recoverable versions")

        if recoverable:
            best_version = smap.best_recoverable_version()
            report.append("Best Recoverable Version: " +
                          smap.summarize_version(best_version))
            counters = self._count_shares(smap, best_version)
            s = counters["count-shares-good"]
            k = counters["count-shares-needed"]
            N = counters["count-shares-expected"]
            if s < N:
                healthy = False
                report.append("Unhealthy: best version has only %d shares "
                              "(encoding is %d-of-%d)" % (s, k, N))
                summary.append("%d shares (enc %d-of-%d)" % (s, k, N))
        elif unrecoverable:
            healthy = False
            # find a k and N from somewhere
            first = list(unrecoverable)[0]
            # not exactly the best version, but that doesn't matter too much
            counters = self._count_shares(smap, first)
        else:
            # couldn't find anything at all
            counters = {
                "count-shares-good": 0,
                "count-shares-needed": 3,  # arbitrary defaults
                "count-shares-expected": 10,
                "count-good-share-hosts": 0,
                "count-wrong-shares": 0,
            }

        corrupt_share_locators = []
        problems = []
        if self.bad_shares:
            report.append("Corrupt Shares:")
            summary.append("Corrupt Shares:")
        for (server, shnum, f) in sorted(self.bad_shares, key=id):
            serverid = server.get_serverid()
            locator = (server, self._storage_index, shnum)
            corrupt_share_locators.append(locator)
            s = "%s-sh%d" % (server.get_name(), shnum)
            if f.check(CorruptShareError):
                ft = f.value.reason
            else:
                ft = str(f)
            report.append(" %s: %s" % (s, ft))
            summary.append(s)
            p = (serverid, self._storage_index, shnum, f)
            problems.append(p)
            msg = ("CorruptShareError during mutable verify, "
                   "serverid=%(serverid)s, si=%(si)s, shnum=%(shnum)d, "
                   "where=%(where)s")
            log.msg(format=msg,
                    serverid=server.get_name(),
                    si=base32.b2a(self._storage_index),
                    shnum=shnum,
                    where=ft,
                    level=log.WEIRD,
                    umid="EkK8QA")

        sharemap = dictutil.DictOfSets()
        for verinfo in vmap:
            for (shnum, server, timestamp) in vmap[verinfo]:
                shareid = "%s-sh%d" % (smap.summarize_version(verinfo), shnum)
                sharemap.add(shareid, server)
        if healthy:
            summary = "Healthy"
        else:
            summary = "Unhealthy: " + " ".join(summary)

        count_happiness = servers_of_happiness(sharemap)

        cr = CheckResults(
            from_string(self._node.get_uri()),
            self._storage_index,
            healthy=healthy,
            recoverable=bool(recoverable),
            count_happiness=count_happiness,
            count_shares_needed=counters["count-shares-needed"],
            count_shares_expected=counters["count-shares-expected"],
            count_shares_good=counters["count-shares-good"],
            count_good_share_hosts=counters["count-good-share-hosts"],
            count_recoverable_versions=len(recoverable),
            count_unrecoverable_versions=len(unrecoverable),
            servers_responding=list(smap.get_reachable_servers()),
            sharemap=sharemap,
            count_wrong_shares=counters["count-wrong-shares"],
            list_corrupt_shares=corrupt_share_locators,
            count_corrupt_shares=len(corrupt_share_locators),
            list_incompatible_shares=[],
            count_incompatible_shares=0,
            summary=summary,
            report=report,
            share_problems=problems,
            servermap=smap.copy())
        return cr
Exemplo n.º 56
0
def decode_header(header):
    """Decode a message header value without converting charset.

    Returns a list of (string, charset) pairs containing each of the decoded
    parts of the header.  Charset is None for non-encoded parts of the header,
    otherwise a lower-case string containing the name of the character set
    specified in the encoded string.

    header may be a string that may or may not contain RFC2047 encoded words,
    or it may be a Header object.

    An email.errors.HeaderParseError may be raised when certain decoding error
    occurs (e.g. a base64 decoding exception).
    """
    # If it is a Header object, we can just return the encoded chunks.
    if hasattr(header, '_chunks'):
        return [(_charset._encode(string, str(charset)), str(charset))
                for string, charset in header._chunks]
    # If no encoding, just return the header with no charset.
    if not ecre.search(header):
        return [(header, None)]
    # First step is to parse all the encoded parts into triplets of the form
    # (encoded_string, encoding, charset).  For unencoded strings, the last
    # two parts will be None.
    words = []
    for line in header.splitlines():
        parts = ecre.split(line)
        first = True
        while parts:
            unencoded = parts.pop(0)
            if first:
                unencoded = unencoded.lstrip()
                first = False
            if unencoded:
                words.append((unencoded, None, None))
            if parts:
                charset = parts.pop(0).lower()
                encoding = parts.pop(0).lower()
                encoded = parts.pop(0)
                words.append((encoded, encoding, charset))
    # Now loop over words and remove words that consist of whitespace
    # between two encoded strings.
    import sys
    droplist = []
    for n, w in enumerate(words):
        if n > 1 and w[1] and words[n - 2][1] and words[n - 1][0].isspace():
            droplist.append(n - 1)
    for d in reversed(droplist):
        del words[d]

    # The next step is to decode each encoded word by applying the reverse
    # base64 or quopri transformation.  decoded_words is now a list of the
    # form (decoded_word, charset).
    decoded_words = []
    for encoded_string, encoding, charset in words:
        if encoding is None:
            # This is an unencoded word.
            decoded_words.append((encoded_string, charset))
        elif encoding == 'q':
            word = header_decode(encoded_string)
            decoded_words.append((word, charset))
        elif encoding == 'b':
            paderr = len(
                encoded_string) % 4  # Postel's law: add missing padding
            if paderr:
                encoded_string += '==='[:4 - paderr]
            try:
                word = base64mime.decode(encoded_string)
            except binascii.Error:
                raise HeaderParseError('Base64 decoding error')
            else:
                decoded_words.append((word, charset))
        else:
            raise AssertionError('Unexpected encoding: ' + encoding)
    # Now convert all words to bytes and collapse consecutive runs of
    # similarly encoded words.
    collapsed = []
    last_word = last_charset = None
    for word, charset in decoded_words:
        if isinstance(word, str):
            word = bytes(word, 'raw-unicode-escape')
        if last_word is None:
            last_word = word
            last_charset = charset
        elif charset != last_charset:
            collapsed.append((last_word, last_charset))
            last_word = word
            last_charset = charset
        elif last_charset is None:
            last_word += BSPACE + word
        else:
            last_word += word
    collapsed.append((last_word, last_charset))
    return collapsed
Exemplo n.º 57
0
    def testCSVPluginWithValuesOfSameType(self):
        responses = []
        for i in range(10):
            responses.append(
                rdf_client_fs.StatEntry(
                    pathspec=rdf_paths.PathSpec(path="/foo/bar/%d" % i,
                                                pathtype="OS"),
                    st_mode=33184,  # octal = 100640 => u=rw,g=r,o= => -rw-r-----
                    st_ino=1063090,
                    st_dev=64512,
                    st_nlink=1 + i,
                    st_uid=139592,
                    st_gid=5000,
                    st_size=0,
                    st_atime=1336469177,
                    st_mtime=1336129892,
                    st_ctime=1336129892))

        zip_fd, prefix = self.ProcessValuesToZip(
            {rdf_client_fs.StatEntry: responses})
        self.assertEqual(
            set(zip_fd.namelist()),
            set([
                "%s/MANIFEST" % prefix,
                "%s/ExportedFile/from_StatEntry.csv" % prefix
            ]))

        parsed_manifest = yaml.load(zip_fd.read("%s/MANIFEST" % prefix))
        self.assertEqual(parsed_manifest,
                         {"export_stats": {
                             "StatEntry": {
                                 "ExportedFile": 10
                             }
                         }})

        with zip_fd.open("%s/ExportedFile/from_StatEntry.csv" %
                         prefix) as filedesc:
            content = filedesc.read().decode("utf-8")

        parsed_output = list(csv.DictReader(io.StringIO(content)))
        self.assertLen(parsed_output, 10)
        for i in range(10):
            # Make sure metadata is filled in.
            self.assertEqual(parsed_output[i]["metadata.client_urn"],
                             "aff4:/%s" % self.client_id)
            self.assertEqual(parsed_output[i]["metadata.hostname"],
                             "Host-0.example.com")
            self.assertEqual(parsed_output[i]["metadata.mac_address"],
                             "aabbccddee00\nbbccddeeff00")
            self.assertEqual(parsed_output[i]["metadata.source_urn"],
                             self.results_urn)
            self.assertEqual(
                parsed_output[i]["metadata.hardware_info.bios_version"],
                "Bios-Version-0")

            self.assertEqual(parsed_output[i]["urn"],
                             "aff4:/%s/fs/os/foo/bar/%d" % (self.client_id, i))
            self.assertEqual(parsed_output[i]["st_mode"], "-rw-r-----")
            self.assertEqual(parsed_output[i]["st_ino"], "1063090")
            self.assertEqual(parsed_output[i]["st_dev"], "64512")
            self.assertEqual(parsed_output[i]["st_nlink"], str(1 + i))
            self.assertEqual(parsed_output[i]["st_uid"], "139592")
            self.assertEqual(parsed_output[i]["st_gid"], "5000")
            self.assertEqual(parsed_output[i]["st_size"], "0")
            self.assertEqual(parsed_output[i]["st_atime"],
                             "2012-05-08 09:26:17")
            self.assertEqual(parsed_output[i]["st_mtime"],
                             "2012-05-04 11:11:32")
            self.assertEqual(parsed_output[i]["st_ctime"],
                             "2012-05-04 11:11:32")
            self.assertEqual(parsed_output[i]["st_blksize"], "0")
            self.assertEqual(parsed_output[i]["st_rdev"], "0")
            self.assertEqual(parsed_output[i]["symlink"], "")
Exemplo n.º 58
0
 def __eq__(self, other):
     # other may be a Header or a string.  Both are fine so coerce
     # ourselves to a unicode (of the unencoded header value), swap the
     # args and do another comparison.
     return other == str(self)
Exemplo n.º 59
0
 def __init__(self,
              form,
              request,
              formentry_model=FormEntry,
              fieldentry_model=FieldEntry,
              *args,
              **kwargs):
     """
     Iterate through the fields of the ``forms.models.Form`` instance and
     create the form fields required to control including the field in
     the export (with a checkbox) or filtering the field which differs
     across field types. User a list of checkboxes when a fixed set of
     choices can be chosen from, a pair of date fields for date ranges,
     and for all other types provide a textbox for text search.
     """
     self.form = form
     self.request = request
     self.formentry_model = formentry_model
     self.fieldentry_model = fieldentry_model
     self.form_fields = form.fields.all()
     self.entry_time_name = str(
         self.formentry_model._meta.get_field("entry_time").verbose_name)
     super(EntriesForm, self).__init__(*args, **kwargs)
     for field in self.form_fields:
         field_key = "field_%s" % field.id
         # Checkbox for including in export.
         self.fields["%s_export" % field_key] = forms.BooleanField(
             label=field.label, initial=True, required=False)
         if field.is_a(*fields.CHOICES):
             # A fixed set of choices to filter by.
             if field.is_a(fields.CHECKBOX):
                 choices = ((True, _("Checked")), (False, _("Not checked")))
             else:
                 choices = field.get_choices()
             contains_field = forms.MultipleChoiceField(
                 label=" ",
                 choices=choices,
                 widget=forms.CheckboxSelectMultiple(),
                 required=False)
             self.fields["%s_filter" % field_key] = choice_filter_field
             self.fields["%s_contains" % field_key] = contains_field
         elif field.is_a(*fields.MULTIPLE):
             # A fixed set of choices to filter by, with multiple
             # possible values in the entry field.
             contains_field = forms.MultipleChoiceField(
                 label=" ",
                 choices=field.get_choices(),
                 widget=forms.CheckboxSelectMultiple(),
                 required=False)
             self.fields["%s_filter" % field_key] = multiple_filter_field
             self.fields["%s_contains" % field_key] = contains_field
         elif field.is_a(*fields.DATES):
             # A date range to filter by.
             self.fields["%s_filter" % field_key] = date_filter_field
             self.fields["%s_from" % field_key] = forms.DateField(
                 label=" ", widget=SelectDateWidget(), required=False)
             self.fields["%s_to" % field_key] = forms.DateField(
                 label=_("and"), widget=SelectDateWidget(), required=False)
         else:
             # Text box for search term to filter by.
             contains_field = forms.CharField(label=" ", required=False)
             self.fields["%s_filter" % field_key] = text_filter_field
             self.fields["%s_contains" % field_key] = contains_field
     # Add ``FormEntry.entry_time`` as a field.
     field_key = "field_0"
     label = self.formentry_model._meta.get_field("entry_time").verbose_name
     self.fields["%s_export" % field_key] = forms.BooleanField(
         initial=True, label=label, required=False)
     self.fields["%s_filter" % field_key] = date_filter_field
     self.fields["%s_from" % field_key] = forms.DateField(
         label=" ", widget=SelectDateWidget(), required=False)
     self.fields["%s_to" % field_key] = forms.DateField(
         label=_("and"), widget=SelectDateWidget(), required=False)
Exemplo n.º 60
0
from distutils import sysconfig
from obspy import UTCDateTime
import ctypes as C
import doctest
import numpy as np
import os
import platform
import warnings

# Import shared libgse2
# create library names
lib_names = [
    # python3.3 platform specific library name
    'libgse2_%s_%s_py%s.cpython-%sm' %
    (platform.system(), platform.architecture()[0], ''.join([
        str(i) for i in platform.python_version_tuple()[:2]
    ]), ''.join([str(i) for i in platform.python_version_tuple()[:2]])),
    # platform specific library name
    'libgse2_%s_%s_py%s' %
    (platform.system(), platform.architecture()[0], ''.join(
        [str(i) for i in platform.python_version_tuple()[:2]])),
    # fallback for pre-packaged libraries
    'libgse2'
]
# get default file extension for shared objects
lib_extension, = sysconfig.get_config_vars('SO')
# initialize library
for lib_name in lib_names:
    try:
        clibgse2 = C.CDLL(
            os.path.join(os.path.dirname(__file__), os.pardir, 'lib',