Esempio n. 1
0
def key_comparator(k1, k2):
  """A comparator for Datastore keys.

  Comparison is only valid for keys in the same partition. The comparison here
  is between the list of paths for each key.
  """

  if k1.partition_id != k2.partition_id:
    raise ValueError('Cannot compare keys with different partition ids.')

  k2_iter = iter(k2.path)

  for k1_path in k1.path:
    k2_path = next(k2_iter, None)
    if not k2_path:
      return 1

    result = compare_path(k1_path, k2_path)

    if result != 0:
      return result

  k2_path = next(k2_iter, None)
  if k2_path:
    return -1
  return 0
Esempio n. 2
0
    def __next__(self):
        """
        Return the next Statement in the answer, if there is one.

        Otherwise raise StopIteration exception.

        :return: The next statement.
        :raises StopIteration: If there are no more statements.
        """
        if self.nonDuplicateSet is not None:
            try:
                savedNonDuplicateSet = self.nonDuplicateSet
                self.nonDuplicateSet = None
                while (True):
                    stmt = next(self)
                    if not stmt in savedNonDuplicateSet:
                        savedNonDuplicateSet.add(stmt)
                        return stmt
            finally:
                self.nonDuplicateSet = savedNonDuplicateSet
#        elif self.limit and self.cursor >= self.limit:
#            raise StopIteration
        elif self.cursor < len(self.string_tuples):
            stringTuple = self.string_tuples[self.cursor]
            if self.triple_ids:
                stringTuple = RepositoryResult.normalize_quint(stringTuple)
            self.cursor += 1
            if self.subjectFilter and not stringTuple[0] == self.subjectFilter:
                return next(self)
            return self._createStatement(stringTuple);
        else:
            raise StopIteration
 def test_non_iterator(self):
     """
     The default behaviour of next(o) for a newobject o should be to raise a
     TypeError, as with the corresponding builtin object.
     """
     o = object()
     with self.assertRaises(TypeError):
         next(o)
Esempio n. 4
0
def test_bond_sample_state_raises_more_than_two_spanning_sides(
    percolation_graph
):
    if percolation_graph['spanning_cluster']:
        percolation_graph['spanning_sides'] = [0, 1, 2]
        with pytest.raises(ValueError):
            next(percolate.hpc.bond_sample_states(
                seed=0, **percolation_graph
            ))
Esempio n. 5
0
    def lex_label(self):
        self.eat_spaces()
        char = self.streamer.peek()
        if char in ('"', "'"):
            next(self.streamer)  # throw away opening quote
            self._match_delimited(char)
        else:
            despacer = {' ': '_'}
            self._match_run(str.isalnum, accepted_chars='-_|.',
                            denied_chars=':,;', replacements=despacer)
        label = self.token_buffer.decode()
        if label == '':
            label = None
        self.emit(Token(self.tokens.LABEL, label))

        return self.lex_length
Esempio n. 6
0
def test_bond_microcanonical_statistics(percolation_graph, seed):
    result = percolate.hpc.bond_microcanonical_statistics(
        seed=seed, **percolation_graph
    )
    sample_states = percolate.hpc.bond_sample_states(
        seed=seed, **percolation_graph
    )
    for n in range(percolation_graph['num_edges'] + 1):
        result_row = result[n]
        sample_state = next(sample_states)

        def _assert_equal(key):
            assert result_row[key] == sample_state[key][0]

        assert result_row['n'] == n
        assert sample_state['n'][0] == n

        if n > 0:
            # first edge is undefined!
            _assert_equal('edge')

        if percolation_graph['spanning_cluster']:
            _assert_equal('has_spanning_cluster')
        _assert_equal('max_cluster_size')
        np.testing.assert_array_equal(
            result_row['moments'],
            sample_state['moments'][0],
        )
Esempio n. 7
0
def writeUniqueResults(clustered_dupes, input_file, output_file):

    # Write our original data back out to a CSV with a new column called 
    # 'Cluster ID' which indicates which records refer to each other.

    logging.info('saving unique results to: %s' % output_file)

    cluster_membership = {}
    for (cluster_id, cluster) in enumerate(clustered_dupes):
        for record_id in cluster:
            cluster_membership[record_id] = cluster_id

    unique_record_id = cluster_id + 1

    writer = csv.writer(output_file)

    reader = csv.reader(StringIO(input_file))

    heading_row = next(reader)
    heading_row.insert(0, u'Cluster ID')
    writer.writerow(heading_row)

    seen_clusters = set()
    for row_id, row in enumerate(reader):
        if row_id in cluster_membership:
            cluster_id = cluster_membership[row_id]
            if cluster_id not in seen_clusters:
                row.insert(0, cluster_id)
                writer.writerow(row)
                seen_clusters.add(cluster_id)
        else:
            cluster_id = unique_record_id
            unique_record_id += 1
            row.insert(0, cluster_id)
            writer.writerow(row)
Esempio n. 8
0
File: klib.py Progetto: LUMC/kPAL
    def save(self, handle, name=None):
        """
        Save the *k*-mer counts to a file.

        :arg h5py.File handle: Open writeable *k*-mer profile file handle.
        :arg name: Profile name in the file. If not provided, the current
          profile name is used, or the first available number from 1
          consecutively if the profile has no name.
        :type name: str

        :return: Profile name in the file.
        :rtype: str
        """
        if name and ('/' in name or '.' in name):
            raise ValueError('Profile name may not contain / or . characters.')

        name = name or self.name or next(str(n) for n in itertools.count(1)
                                         if str(n) not in handle['profiles'])

        profile = handle.create_dataset('profiles/' + name, data=self.counts,
                                        dtype='int64', compression='gzip')
        profile.attrs['length'] = self.length
        profile.attrs['total'] = self.total
        profile.attrs['non_zero'] = self.non_zero
        profile.attrs['mean'] = self.mean
        profile.attrs['median'] = self.median
        profile.attrs['std'] = self.std
        handle.flush()

        return name
    def test_implements_py2_iterator(self):

        class Upper(object):
            def __init__(self, iterable):
                self._iter = iter(iterable)
            def __next__(self):                 # note the Py3 interface
                return next(self._iter).upper()
            def __iter__(self):
                return self

        self.assertEqual(list(Upper('hello')), list('HELLO'))

        # Try combining it with the next() function:

        class MyIter(object):
            def __next__(self):
                return 'Next!'
            def __iter__(self):
                return self

        itr = MyIter()
        self.assertEqual(next(itr), 'Next!')

        itr2 = MyIter()
        for i, item in enumerate(itr2):
            if i >= 10:
                break
            self.assertEqual(item, 'Next!')
def demonstrate(device_name):
    """
    Apply function 'static_route_create' to the specified device.
    
    Choose a destination that does not already exist. 
    Choose a next-hop on the same sub-network as an existing interface.
    If the next-hop is unknown then use any valid ip-address.
    """
    
    print('Select a destination network for which a static route does not exist.')
    destination_network_iterator = destination_network_generator()
    while True: 
        destination_network = next(destination_network_iterator)
        print('static_route_exists(%s, %s)' % (device_name, destination_network))
        exists = static_route_exists(device_name, destination_network)
        print(exists)
        print()
        if not exists:
            break
    
    print('Determine which interface is on the management plane (to avoid it).')     
    print('device_control(%s)' % device_name)
    device_mgmt = device_control(device_name)
    print_table(device_mgmt)
    print()

    print('Determine ip-address and network-mask of every interface.')     
    print('interface_configuration_tuple(%s)' % device_name)
    interface_config_list = interface_configuration_tuple(device_name)
    print_table(interface_config_list)
    print()
    
    for interface_config in interface_config_list:
        if interface_config.address is None:
            # Skip network interface with unassigned IP address.             
            continue
        if interface_config.address == device_mgmt.address:
            # Do not configure static routes on the 'management plane'.             
            continue
        if 'Loopback' in interface_config.name:
            # Do not configure static routes on the 'control plane'.             
            continue
        
        print('Determine next-hop for a network interface.')
        interface_network = interface_config.ip_network
        next_hop_address = match(device_name, interface_network)
        if next_hop_address is None:
            print('Next-hop for %s/%s %s/%s is outside the known topology.' % (device_name, interface_config.name, interface_config.address, interface_config.netmask))
            next_hop_address = interface_network.network_address
            if next_hop_address == interface_config.ip_interface:
                next_hop_address += 1
            print('Assume that next-hop for %s/%s %s/%s is %s.' % (device_name, interface_config.name, interface_config.address, interface_config.netmask, next_hop_address))
        else:
            print('Next-hop for %s/%s %s/%s is %s.' % (device_name, interface_config.name, interface_config.address, interface_config.netmask, next_hop_address))
        print()

        print('static_route_create(%s, %s, %s)' % (device_name, destination_network, next_hop_address))
        static_route_create(device_name, destination_network, next_hop_address)
        return True
    return False
Esempio n. 11
0
def cleanpy(dirname, recursive=True):
    walk_it = os.walk(dirname)
    if not recursive:
        walk_it = (next(walk_it))
    for dirpath, dirnames, filenames in walk_it:
        _cleanpy2(dirpath, filenames)
        _cleanpy3(dirpath, dirnames)
Esempio n. 12
0
    def _get_label(self, tokens):
        """ Get the node data attributes 'label' and 'length'. Assumes these
        will be the next tokens in the stream. Throws ParseError if they are
        not. """
        label = next(self.lexer)
        if label.typ not in (tokens.LABEL, tokens.SUPPORT):
            raise ParseError(
                'Expected a label or a support value, found {0}'.format(
                    label))

        length = next(self.lexer)
        if length.typ != tokens.LENGTH:
            raise ParseError('Expected a length, found {0}'.format(
                length))

        return (label.val if label.typ == tokens.LABEL else None)
Esempio n. 13
0
 def __setSchemaCombo(self, uriDb):
     """
     To fill the schema combo list
     :param uriDb: selected database uri
     """
     connector = DBConnector(uriDb, self.__iface)
     db = connector.setConnection()
     if db:
         Signal.safelyDisconnect(self.__schemaCombo.currentIndexChanged, self.__schemaComboChanged)
         self.__resetCombo(self.__schemaCombo)
         self.__schemaCombo.addItem("")
         self.__schemas = []
         query = db.exec_("""SELECT DISTINCT table_schema FROM information_schema.tables WHERE table_schema NOT IN
             ('pg_catalog', 'information_schema', 'topology') AND table_type = 'BASE TABLE' AND table_name NOT IN
             (SELECT f_table_name FROM geometry_columns)""")
         if query.lastError().isValid():
             self.__iface.messageBar().pushMessage(query.lastError().text(), level=QgsMessageBar.CRITICAL, duration=0)
         else:
             while next(query):
                 self.__schemas.append(query.value(0))
             db.close()
             for schema in self.__schemas:
                 self.__schemaCombo.addItem(schema)
             self.__schemaCombo.currentIndexChanged.connect(self.__schemaComboChanged)
             if self.__schemaDb is not None:
                 if self.__schemaDb in self.__schemas:
                     self.__schemaCombo.setCurrentIndex(self.__schemas.index(self.__schemaDb) + 1)
Esempio n. 14
0
 def __setTableCombo(self, uriDb, schema):
     """
     To fill the table combo list
     :param uriDb: selected database uri
     :param schema: selected database schema
     """
     connector = DBConnector(uriDb, self.__iface)
     db = connector.setConnection()
     if db:
         Signal.safelyDisconnect(self.__tableCombo.currentIndexChanged, self.__tableComboChanged)
         self.__resetCombo(self.__tableCombo)
         self.__tableCombo.addItem("")
         self.__tables = []
         query = db.exec_("""SELECT table_name FROM information_schema.tables WHERE table_schema = '""" + schema +
                          """' ORDER BY table_name""")
         if query.lastError().isValid():
             self.__iface.messageBar().pushMessage(query.lastError().text(), level=QgsMessageBar.CRITICAL, duration=0)
         else:
             while next(query):
                 self.__tables.append(query.value(0))
             db.close()
             for table in self.__tables:
                 if self.__tableCombo.findText(table) == -1:
                     self.__tableCombo.addItem(table)
             self.__tableCombo.currentIndexChanged.connect(self.__tableComboChanged)
             if self.__configTable is not None:
                 if self.__configTable in self.__tables:
                     self.__tableCombo.setCurrentIndex(self.__tables.index(self.__configTable) + 1)
Esempio n. 15
0
    def _addTaxRangeR(self, node, noUpwardLevels=False):
        """recursive method to add TaxRange property tags."""
        if self.parser.is_ortholog_group(node) or self.parser.is_paralog_group(node) or OrthoXMLQuery.is_geneRef_node(node):
            species_covered = self.parser.get_species_below_node(node)
            current_level = self.tax.mrca(species_covered)

            try: # find the closest ancestral orthogroup that has a TaxRange property
                parent_orthogroup_generator = (n for n in node.iterancestors('{{{}}}orthologGroup'.format(OrthoXMLQuery.ns['ns0']))
                                               if n[0].tag == '{{{}}}property'.format(OrthoXMLQuery.ns['ns0']))
                parent_orthogroup = next(parent_orthogroup_generator)
            except: # couldn't find a parent with a TaxRange property; no extra annotation possible
                parent_orthogroup = None

            if parent_orthogroup is not None:
                parent_levels = {z.get('value')
                                 for z in OrthoXMLQuery.getTaxRangeNodes(parent_orthogroup, False)}
                most_recent_parent_level = self.tax.mostSpecific(parent_levels)

                # Ortholog Node - append missing tax range(s) as property tags under the current node
                if self.parser.is_ortholog_group(node):
                    for level in self.tax.iterParents(current_level, most_recent_parent_level):
                        node.append(self._createTaxRangeTag(level))

                # Paralog Node - insert ortholog node between self and parent; add missing tax range(s) to new parent
                elif self.parser.is_paralog_group(node):
                    if self.tax.levels_between(most_recent_parent_level, current_level) > 1:
                        self._insertOG(node.getparent(), node, current_level, most_recent_parent_level, include_self=False)

                # GeneRef Node - insert ortholog node between self and parent; add all tax range(s) to new parent
                else:
                    self._insertOG(node.getparent(), node, current_level, most_recent_parent_level, include_self=True)
                    return

            for child in node:
                self._addTaxRangeR(child, noUpwardLevels)
Esempio n. 16
0
File: klib.py Progetto: LUMC/kPAL
    def from_file_old_format(cls, handle, name=None):
        """
        Load the *k*-mer profile from a file in the old plaintext format.

        :arg handle: Open readable *k*-mer profile file handle (old format).
        :type handle: file-like object
        :arg str name: Profile name.

        :return: A *k*-mer profile.
        :rtype: Profile
        """
        # Ignore lines with length, total, nonzero.
        for _ in range(3):
            next(handle)

        counts = np.loadtxt(handle, dtype='int64')
        return cls(counts, name=name)
Esempio n. 17
0
 def get_question(self, code):
     if self._lazy:
         # Trigger full fetch
         self.name
     candidates = filter(lambda c: c.code == code, self.questions)
     if not candidates:
         return None
     return next(candidates)
Esempio n. 18
0
def deserialize(string):
    def posInteger(chars):
        result = shift = 0

        # Set value to get into the loop the first time
        value = 0x80
        while value & 0x80:
            value = next(chars)
            result += ((value & 0x7f) << shift)
            shift += 7

        return result

    if isinstance(string, old_str):
        string = bytes(string)
    chars = iter(string)
    value = next(chars)

    if value == SerialConstants.SO_BYTEVECTOR:
        length = posInteger(chars)
        import array
        return array.array(b'b', [ord(next(chars)) for i in range(length)])

    if (value == SerialConstants.SO_VECTOR or
        value == SerialConstants.SO_LIST):
        length = posInteger(chars)
        return [deserialize(chars) for i in range(length)]

    if value == SerialConstants.SO_STRING:
        length = posInteger(chars)
        return unicode(ibytes(islice(chars, 0, length)), 'utf-8')

    if value == SerialConstants.SO_POS_INTEGER:
        return posInteger(chars)

    if value == SerialConstants.SO_NEG_INTEGER:
        return - posInteger(chars)

    if value == SerialConstants.SO_NULL:
        return None

    if value == SerialConstants.SO_END_OF_ITEMS:
        return None

    raise ValueError("bad code found by deserializer: %d" % value)
Esempio n. 19
0
 def __populate_text_field(self, herd, field_name, xml_name=''):
     if not xml_name:
         xml_name = field_name
     try:
         element = next(herd.iter(xml_name))
         text = gettext(element)
     except:
         raise IOError("Couldn't find '%s' label in xml" % xml_name)
     self.population[-1][field_name] = text
Esempio n. 20
0
File: io.py Progetto: greole/owls
def find_times(fold=None):
    """ Find time folders in given or current folder
        Returns sorted list of times as strings
    """
    search_folder = (fold if fold else os.getcwd())
    cur_dir = os.walk(search_folder)
    root, dirs, files = next(cur_dir)
    times = [time for time in dirs if is_time(time) is not False]
    times.sort()
    return times
Esempio n. 21
0
    def lex_subtree_start(self):
        self.eat_spaces()
        char = self.streamer.peek()

        if char == '(':
            self.emit(Token(self.tokens.SUBTREE, next(self.streamer)))
            return self.lex_subtree_start

        else:
            self.emit(Token(self.tokens.LEAF, None))
            return self.lex_label
Esempio n. 22
0
    def posInteger(chars):
        result = shift = 0

        # Set value to get into the loop the first time
        value = 0x80
        while value & 0x80:
            value = next(chars)
            result += ((value & 0x7f) << shift)
            shift += 7

        return result
Esempio n. 23
0
 def __next__(self):
     while True:
         try:
             return self._klass(self.api, next(self._iter))
         except StopIteration:
             next_url = self.jsondata.get("next", False)
             if not next_url:
                 raise
             self.jsondata = self.api.connection.make_get(next_url)
             self._get_iter()
     raise StopIteration
Esempio n. 24
0
    def _match(self, predicate, accepted_chars='', denied_chars='',
               replacements=None):
        """ Checks next character in stream. If predicate returns True, or char
        is in `accepted_chars`, advances the stream and returns 1. Else, or if
        the char is in `denied_chars`, doesn't advance the stream and returns 0.
        Replacements is an optional dictionary that can be used to replace the
        streamed character with an alternative (e.g. replace spaces with
        underscores). """

        replacements = (replacements or {})
        char = self.streamer.peek()
        char = replacements.get(char, char)

        if predicate(char) or char in accepted_chars:
            if len(char) == 1:
                self.buffer(char)
            next(self.streamer)  # advance stream
            return 1
        elif char in denied_chars:
            return 0
        else:
            return 0
def demonstrate(device_name):
    """ 
    Apply function 'static_route_exists' to the specified device for each destination in the list.
    """
    destination_network_iterator = destination_network_generator()
    while True: 
        destination_network = next(destination_network_iterator)
        print('static_route_exists(%s, %s)' % (device_name, destination_network))
        exists = static_route_exists(device_name, destination_network)
        print(exists)
        if not exists:
            return True
        else:
            print()
Esempio n. 26
0
File: io.py Progetto: greole/owls
def read_boundary_names(fn):
    """ Todo use iterator method to avoid reading complete file """
    with open(fn, encoding="utf-8") as f:
        boundary_names = []
        lines  = reversed(f.readlines())
        for line in lines:
            if "{" in line:
                follower = next(lines)
                if "boundaryField" in follower:
                    return boundary_names
                try:
                     boundary_names.append(follower)
                except:
                    pass
            else:
                pass
Esempio n. 27
0
File: io.py Progetto: greole/owls
def _get_datafiles_from_dir(path=False, fn_filter=False):
    """ Return file names of Foam files from cwd if no path
        is specified explicitly.
        If no filter list is given the complete list of files will be returned
        else only files matching that list
    """
    path = (path if path else os.getcwd() + "/")
    path = (path + "/" if not path.endswith("/") else path)
    cur_dir = os.walk(path)
    root, dirs, files = next(cur_dir)
    if fn_filter:
        l = [path + f for f in files if f in fn_filter]
    else:
        l = [path + f for f in files if not f.startswith('.')]
    l.sort()
    return l
Esempio n. 28
0
def max_elems(iterable, key=None):
    """Find the elements in 'iterable' corresponding to the maximum values w.r.t. 'key'."""
    iterator = iter(iterable)
    try:
        elem = next(iterator)
    except StopIteration:
        raise ValueError("argument iterable must be non-empty")
    max_elems = [elem]
    max_key = elem if key is None else key(elem)
    for elem in iterator:
        curr_key = elem if key is None else key(elem)
        if curr_key > max_key:
            max_elems = [elem]
            max_key = curr_key
        elif curr_key == max_key:
            max_elems.append(elem)
    return max_elems
    def _render_node(self, context, cl, node):
        bits = []
        context.push()

        # Render children to add to parent later
        for child in node.get_children():
            bits.append(self._render_node(context, cl, child))

        columns = self._get_column_repr(cl, node)  # list(tuple(name, html), ..)
        first_real_column = next(col for col in columns if col[0] != 'action_checkbox')

        context['columns'] = columns
        context['other_columns'] = [col for col in columns if col[0] not in ('action_checkbox', first_real_column[0])]
        context['first_column'] = first_real_column[1]
        context['named_columns'] = dict(columns)
        context['node'] = node
        context['change_url'] = cl.url_for_result(node)
        context['children'] = mark_safe(u''.join(bits))

        # Render
        rendered = self.template_nodes.render(context)
        context.pop()
        return rendered
Esempio n. 30
0
    def __checkIfExist(self):
        """
        To check if the data we want to import is already in the table
        """
        if self.__iter < len(self.__data):
            data = self.__data[self.__iter]

            # check if already in table
            query = self.__db.exec_(
                """SELECT ST_AsText(geometry3d) FROM %s.%s WHERE st_dwithin('%s', geometry3d, 0.03)"""
                % (data['schema_table'], data['name_table'],data['geom']))
            if query.lastError().isValid():
                self.__iface.messageBar().pushMessage(
                    query.lastError().text(), level=QgsMessageBar.CRITICAL, duration=0)
            else:
                in_base = False
                point = None
                while next(query):
                    point = query.value(0)
                    in_base = True
                if in_base:
                    self.__data[self.__iter]['point'] = point
                    self.__confDlg = ImportConfirmDialog()
                    self.__confDlg.setMessage(
                        QCoreApplication.translate("VDLTools", "There is already a ") + point +
                                                   QCoreApplication.translate("VDLTools", " in table ") + data['schema_table'] + """.""" +
                                                   data['name_table'] + ".\n" + QCoreApplication.translate("VDLTools", "Would you like to add it anyway ? "))
                    self.__confDlg.rejected.connect(self.__cancelAndNext)
                    self.__confDlg.accepted.connect(self.__confirmAndNext)
                    self.__confDlg.okButton().clicked.connect(self.__onConfirmOk)
                    self.__confDlg.cancelButton().clicked.connect(self.__onConfirmCancel)
                    self.__confDlg.show()
                else:
                    self.__confirmAndNext()
        else:
            self.__insert()
Esempio n. 31
0
def get_server_resources(return_presence=False,
                         return_server=False,
                         return_info=False,
                         **kwargs):
    if not return_presence and not return_info:
        logger.info("Tautulli PlexTV :: Requesting resources for server...")

    server = {
        'pms_name': plexpy.CONFIG.PMS_NAME,
        'pms_version': plexpy.CONFIG.PMS_VERSION,
        'pms_platform': plexpy.CONFIG.PMS_PLATFORM,
        'pms_ip': plexpy.CONFIG.PMS_IP,
        'pms_port': plexpy.CONFIG.PMS_PORT,
        'pms_ssl': plexpy.CONFIG.PMS_SSL,
        'pms_is_remote': plexpy.CONFIG.PMS_IS_REMOTE,
        'pms_is_cloud': plexpy.CONFIG.PMS_IS_CLOUD,
        'pms_url': plexpy.CONFIG.PMS_URL,
        'pms_url_manual': plexpy.CONFIG.PMS_URL_MANUAL,
        'pms_identifier': plexpy.CONFIG.PMS_IDENTIFIER,
        'pms_plexpass': plexpy.CONFIG.PMS_PLEXPASS
    }

    if return_info:
        return server

    if kwargs:
        server.update(kwargs)
        for k in [
                'pms_ssl', 'pms_is_remote', 'pms_is_cloud', 'pms_url_manual'
        ]:
            server[k] = int(server[k])

    if server['pms_url_manual'] and server['pms_ssl'] or server['pms_is_cloud']:
        scheme = 'https'
    else:
        scheme = 'http'

    fallback_url = '{scheme}://{hostname}:{port}'.format(
        scheme=scheme, hostname=server['pms_ip'], port=server['pms_port'])

    plex_tv = PlexTV()
    result = plex_tv.get_server_connections(
        pms_identifier=server['pms_identifier'],
        pms_ip=server['pms_ip'],
        pms_port=server['pms_port'],
        include_https=server['pms_ssl'])

    if result:
        connections = result.pop('connections', [])
        server.update(result)
        presence = server.pop('pms_presence', 0)
    else:
        connections = []
        presence = 0

    if return_presence:
        return presence

    plexpass = plex_tv.get_plexpass_status()
    server['pms_plexpass'] = int(plexpass)

    # Only need to retrieve PMS_URL if using SSL
    if not server['pms_url_manual'] and server['pms_ssl']:
        if connections:
            if server['pms_is_remote']:
                # Get all remote connections
                conns = [
                    c for c in connections if c['local'] == '0' and
                    ('plex.direct' in c['uri'] or 'plex.service' in c['uri'])
                ]
            else:
                # Get all local connections
                conns = [
                    c for c in connections if c['local'] == '1' and
                    ('plex.direct' in c['uri'] or 'plex.service' in c['uri'])
                ]

            if conns:
                # Get connection with matching address, otherwise return first connection
                conn = next(
                    (c for c in conns if c['address'] == server['pms_ip']
                     and c['port'] == str(server['pms_port'])), conns[0])
                server['pms_url'] = conn['uri']
                logger.info("Tautulli PlexTV :: Server URL retrieved.")

        # get_server_urls() failed or PMS_URL not found, fallback url doesn't use SSL
        if not server['pms_url']:
            server['pms_url'] = fallback_url
            logger.warn(
                "Tautulli PlexTV :: Unable to retrieve server URLs. Using user-defined value without SSL."
            )

        # Not using SSL, remote has no effect
    else:
        server['pms_url'] = fallback_url
        logger.info("Tautulli PlexTV :: Using user-defined URL.")

    if return_server:
        return server

    logger.info(
        "Tautulli PlexTV :: Selected server: %s (%s) (%s - Version %s)",
        server['pms_name'], server['pms_url'], server['pms_platform'],
        server['pms_version'])

    plexpy.CONFIG.process_kwargs(server)
    plexpy.CONFIG.write()
Esempio n. 32
0
    def get_synced_items(self,
                         machine_id=None,
                         client_id_filter=None,
                         user_id_filter=None,
                         rating_key_filter=None,
                         sync_id_filter=None):

        if not machine_id:
            machine_id = plexpy.CONFIG.PMS_IDENTIFIER

        if isinstance(rating_key_filter, list):
            rating_key_filter = [str(k) for k in rating_key_filter]
        elif rating_key_filter:
            rating_key_filter = [str(rating_key_filter)]

        if isinstance(user_id_filter, list):
            user_id_filter = [str(k) for k in user_id_filter]
        elif user_id_filter:
            user_id_filter = [str(user_id_filter)]

        sync_list = self.get_plextv_sync_lists(machine_id, output_format='xml')
        user_data = users.Users()

        synced_items = []

        try:
            xml_head = sync_list.getElementsByTagName('SyncList')
        except Exception as e:
            logger.warn(
                "Tautulli PlexTV :: Unable to parse XML for get_synced_items: %s."
                % e)
            return {}

        for a in xml_head:
            client_id = helpers.get_xml_attr(a, 'clientIdentifier')

            # Filter by client_id
            if client_id_filter and str(client_id_filter) != client_id:
                continue

            sync_list_id = helpers.get_xml_attr(a, 'id')
            sync_device = a.getElementsByTagName('Device')

            for device in sync_device:
                device_user_id = helpers.get_xml_attr(device, 'userID')
                try:
                    device_username = user_data.get_details(
                        user_id=device_user_id)['username']
                    device_friendly_name = user_data.get_details(
                        user_id=device_user_id)['friendly_name']
                except:
                    device_username = ''
                    device_friendly_name = ''
                device_name = helpers.get_xml_attr(device, 'name')
                device_product = helpers.get_xml_attr(device, 'product')
                device_product_version = helpers.get_xml_attr(
                    device, 'productVersion')
                device_platform = helpers.get_xml_attr(device, 'platform')
                device_platform_version = helpers.get_xml_attr(
                    device, 'platformVersion')
                device_type = helpers.get_xml_attr(device, 'device')
                device_model = helpers.get_xml_attr(device, 'model')
                device_last_seen = helpers.get_xml_attr(device, 'lastSeenAt')

            # Filter by user_id
            if user_id_filter and device_user_id not in user_id_filter:
                continue

            for synced in a.getElementsByTagName('SyncItems'):
                sync_item = synced.getElementsByTagName('SyncItem')
                for item in sync_item:

                    for location in item.getElementsByTagName('Location'):
                        clean_uri = helpers.get_xml_attr(location,
                                                         'uri').split('%2F')

                    rating_key = next((clean_uri[(idx + 1) % len(clean_uri)]
                                       for idx, item in enumerate(clean_uri)
                                       if item == 'metadata'), None)

                    # Filter by rating_key
                    if rating_key_filter and rating_key not in rating_key_filter:
                        continue

                    sync_id = helpers.get_xml_attr(item, 'id')

                    # Filter by sync_id
                    if sync_id_filter and str(sync_id_filter) != sync_id:
                        continue

                    sync_version = helpers.get_xml_attr(item, 'version')
                    sync_root_title = helpers.get_xml_attr(item, 'rootTitle')
                    sync_title = helpers.get_xml_attr(item, 'title')
                    sync_metadata_type = helpers.get_xml_attr(
                        item, 'metadataType')
                    sync_content_type = helpers.get_xml_attr(
                        item, 'contentType')

                    for status in item.getElementsByTagName('Status'):
                        status_failure_code = helpers.get_xml_attr(
                            status, 'failureCode')
                        status_failure = helpers.get_xml_attr(
                            status, 'failure')
                        status_state = helpers.get_xml_attr(status, 'state')
                        status_item_count = helpers.get_xml_attr(
                            status, 'itemsCount')
                        status_item_complete_count = helpers.get_xml_attr(
                            status, 'itemsCompleteCount')
                        status_item_downloaded_count = helpers.get_xml_attr(
                            status, 'itemsDownloadedCount')
                        status_item_ready_count = helpers.get_xml_attr(
                            status, 'itemsReadyCount')
                        status_item_successful_count = helpers.get_xml_attr(
                            status, 'itemsSuccessfulCount')
                        status_total_size = helpers.get_xml_attr(
                            status, 'totalSize')
                        status_item_download_percent_complete = helpers.get_percent(
                            status_item_downloaded_count, status_item_count)

                    for settings in item.getElementsByTagName('MediaSettings'):
                        settings_video_bitrate = helpers.get_xml_attr(
                            settings, 'maxVideoBitrate')
                        settings_video_quality = helpers.get_xml_attr(
                            settings, 'videoQuality')
                        settings_video_resolution = helpers.get_xml_attr(
                            settings, 'videoResolution')
                        settings_audio_boost = helpers.get_xml_attr(
                            settings, 'audioBoost')
                        settings_audio_bitrate = helpers.get_xml_attr(
                            settings, 'musicBitrate')
                        settings_photo_quality = helpers.get_xml_attr(
                            settings, 'photoQuality')
                        settings_photo_resolution = helpers.get_xml_attr(
                            settings, 'photoResolution')

                    sync_details = {
                        "device_name": device_name,
                        "platform": device_platform,
                        "user_id": device_user_id,
                        "user": device_friendly_name,
                        "username": device_username,
                        "root_title": sync_root_title,
                        "sync_title": sync_title,
                        "metadata_type": sync_metadata_type,
                        "content_type": sync_content_type,
                        "rating_key": rating_key,
                        "state": status_state,
                        "item_count": status_item_count,
                        "item_complete_count": status_item_complete_count,
                        "item_downloaded_count": status_item_downloaded_count,
                        "item_downloaded_percent_complete":
                        status_item_download_percent_complete,
                        "video_bitrate": settings_video_bitrate,
                        "audio_bitrate": settings_audio_bitrate,
                        "photo_quality": settings_photo_quality,
                        "video_quality": settings_video_quality,
                        "total_size": status_total_size,
                        "failure": status_failure,
                        "client_id": client_id,
                        "sync_id": sync_id
                    }

                    synced_items.append(sync_details)

        return session.filter_session_info(synced_items, filter_key='user_id')
Esempio n. 33
0
def realseq():
    return next(seqnum_counter), str(os.randint(1, 100000))
Esempio n. 34
0
def test_microcanonical_averages_noninteger_runs(grid_3x3_graph):
    with pytest.raises(ValueError):
        next(percolate.microcanonical_averages(grid_3x3_graph, runs='many'))
Esempio n. 35
0
 def constructColName2IndexFromHeader(self):
     """
     """
     self.header = next(self)
     self.col_name2index = utils.getColName2IndexFromHeader(self.header)
     return self.col_name2index
Esempio n. 36
0
def check_github(scheduler=False, notify=False, use_cache=False):
    plexpy.COMMITS_BEHIND = 0

    if plexpy.CONFIG.GIT_TOKEN:
        headers = {'Authorization': 'token {}'.format(plexpy.CONFIG.GIT_TOKEN)}
    else:
        headers = {}

    version = github_cache('version', use_cache=use_cache)
    if not version:
        # Get the latest version available from github
        logger.info('Retrieving latest version information from GitHub')
        url = 'https://api.github.com/repos/%s/%s/commits/%s' % (
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.CONFIG.GIT_BRANCH)
        version = request.request_json(url,
                                       headers=headers,
                                       timeout=20,
                                       validator=lambda x: type(x) == dict)
        github_cache('version', github_data=version)

    if version is None:
        logger.warn(
            'Could not get the latest version from GitHub. Are you running a local development version?'
        )
        return plexpy.CURRENT_VERSION

    plexpy.LATEST_VERSION = version['sha']
    logger.debug("Latest version is %s", plexpy.LATEST_VERSION)

    # See how many commits behind we are
    if not plexpy.CURRENT_VERSION:
        logger.info(
            'You are running an unknown version of Tautulli. Run the updater to identify your version'
        )
        return plexpy.LATEST_VERSION

    if plexpy.LATEST_VERSION == plexpy.CURRENT_VERSION:
        logger.info('Tautulli is up to date')
        return plexpy.LATEST_VERSION

    commits = github_cache('commits', use_cache=use_cache)
    if not commits:
        logger.info(
            'Comparing currently installed version with latest GitHub version')
        # Need to compare CURRENT << LATEST to get a list of commits
        url = 'https://api.github.com/repos/%s/%s/compare/%s...%s' % (
            plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO,
            plexpy.CURRENT_VERSION, plexpy.LATEST_VERSION)
        commits = request.request_json(url,
                                       headers=headers,
                                       timeout=20,
                                       whitelist_status_code=404,
                                       validator=lambda x: type(x) == dict)
        github_cache('commits', github_data=commits)

    if commits is None:
        logger.warn('Could not get commits behind from GitHub.')
        return plexpy.LATEST_VERSION

    try:
        ahead_by = int(commits['ahead_by'])
        logger.debug("In total, %d commits behind", ahead_by)

        # Do not count [skip ci] commits for Docker or Snap on the nightly branch
        if (plexpy.DOCKER
                or plexpy.SNAP) and plexpy.CONFIG.GIT_BRANCH == 'nightly':
            for commit in reversed(commits['commits']):
                if '[skip ci]' not in commit['commit']['message']:
                    plexpy.LATEST_VERSION = commit['sha']
                    break
                ahead_by -= 1
            install = 'Docker container' if plexpy.DOCKER else 'Snap package'
            logger.debug("%s %d commits behind", install, ahead_by)

        plexpy.COMMITS_BEHIND = ahead_by
    except KeyError:
        logger.info(
            'Cannot compare versions. Are you running a local development version?'
        )
        plexpy.COMMITS_BEHIND = 0

    if plexpy.COMMITS_BEHIND > 0:
        logger.info('New version is available. You are %s commits behind' %
                    plexpy.COMMITS_BEHIND)

        releases = github_cache('releases', use_cache=use_cache)
        if not releases:
            url = 'https://api.github.com/repos/%s/%s/releases' % (
                plexpy.CONFIG.GIT_USER, plexpy.CONFIG.GIT_REPO)
            releases = request.request_json(
                url,
                timeout=20,
                whitelist_status_code=404,
                validator=lambda x: type(x) == list)
            github_cache('releases', github_data=releases)

        if releases is None:
            logger.warn('Could not get releases from GitHub.')
            return plexpy.LATEST_VERSION

        if plexpy.CONFIG.GIT_BRANCH == 'master':
            release = next((r for r in releases if not r['prerelease']),
                           releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'beta':
            release = next(
                (r
                 for r in releases if not r['tag_name'].endswith('-nightly')),
                releases[0])
        elif plexpy.CONFIG.GIT_BRANCH == 'nightly':
            release = next((r for r in releases), releases[0])
        else:
            release = releases[0]

        plexpy.LATEST_RELEASE = release['tag_name']

        if plexpy.CONFIG.GIT_BRANCH in (
                'master', 'beta'
        ) and release['target_commitish'] == plexpy.CURRENT_VERSION:
            logger.info('Tautulli is up to date')
            return plexpy.CURRENT_VERSION

        if notify:
            plexpy.NOTIFY_QUEUE.put({
                'notify_action':
                'on_plexpyupdate',
                'plexpy_download_info':
                release,
                'plexpy_update_commit':
                plexpy.LATEST_VERSION,
                'plexpy_update_behind':
                plexpy.COMMITS_BEHIND
            })

        if plexpy.PYTHON2:
            logger.warn(
                'Tautulli is running using Python 2. Unable to run automatic update.'
            )

        elif scheduler and plexpy.CONFIG.PLEXPY_AUTO_UPDATE and \
                not plexpy.DOCKER and not plexpy.SNAP and not plexpy.FROZEN:
            logger.info('Running automatic update.')
            plexpy.shutdown(restart=True, update=True)

    elif plexpy.COMMITS_BEHIND == 0:
        logger.info('Tautulli is up to date')

    return plexpy.LATEST_VERSION
Esempio n. 37
0
from __future__ import print_function
from future.builtins import object
from future.builtins import next
from future import standard_library
import configparser  # Py3-style import

standard_library.install_aliases()


class Upper(object):
    def __init__(self, iterable):
        self._iter = iter(iterable)

    def __next__(self):  # Py3-style iterator interface
        return next(self._iter).upper()

    def __iter__(self):
        return self


itr = Upper('hello')
print(next(itr), end=' ')  # Py3-style print function
for letter in itr:
    print(letter, end=' ')
Esempio n. 38
0
def test_sample_state_one_sided_auxiliary_nodes(empty_graph):
    empty_graph.add_node(1, span=0)
    with pytest.raises(ValueError):
        next(percolate.sample_states(empty_graph, spanning_cluster=True))
Esempio n. 39
0
 def __next__(self):  # note the Py3 interface
     return next(self._iter).upper()
Esempio n. 40
0
def test_sample_state_not_implemented_model(empty_graph):
    with pytest.raises(ValueError):
        next(percolate.sample_states(empty_graph, model='site'))
Esempio n. 41
0
def test_sample_state_no_auxiliary_nodes(empty_graph):
    with pytest.raises(ValueError):
        next(percolate.sample_states(empty_graph, spanning_cluster=True))
Esempio n. 42
0
def test_microcanonical_averages_zero_alpha(grid_3x3_graph):
    with pytest.raises(ValueError):
        next(percolate.microcanonical_averages(grid_3x3_graph, alpha=0.0))
Esempio n. 43
0
def test_microcanonical_averages_nonfloat_alpha(grid_3x3_graph):
    with pytest.raises(ValueError):
        next(percolate.microcanonical_averages(grid_3x3_graph, alpha='huge'))
Esempio n. 44
0
def test_microcanonical_averages_nonpositive_runs(grid_3x3_graph):
    with pytest.raises(ValueError):
        next(percolate.microcanonical_averages(grid_3x3_graph, runs=0))
Esempio n. 45
0
    def header_encode_lines(self, string, maxlengths):
        """Header-encode a string by converting it first to bytes.

        This is similar to `header_encode()` except that the string is fit
        into maximum line lengths as given by the argument.

        :param string: A unicode string for the header.  It must be possible
            to encode this string to bytes using the character set's
            output codec.
        :param maxlengths: Maximum line length iterator.  Each element
            returned from this iterator will provide the next maximum line
            length.  This parameter is used as an argument to built-in next()
            and should never be exhausted.  The maximum line lengths should
            not count the RFC 2047 chrome.  These line lengths are only a
            hint; the splitter does the best it can.
        :return: Lines of encoded strings, each with RFC 2047 chrome.
        """
        # See which encoding we should use.
        codec = self.output_codec or "us-ascii"
        header_bytes = _encode(string, codec)
        encoder_module = self._get_encoder(header_bytes)
        encoder = partial(encoder_module.header_encode, charset=codec)
        # Calculate the number of characters that the RFC 2047 chrome will
        # contribute to each line.
        charset = self.get_output_charset()
        extra = len(charset) + RFC2047_CHROME_LEN
        # Now comes the hard part.  We must encode bytes but we can't split on
        # bytes because some character sets are variable length and each
        # encoded word must stand on its own.  So the problem is you have to
        # encode to bytes to figure out this word's length, but you must split
        # on characters.  This causes two problems: first, we don't know how
        # many octets a specific substring of unicode characters will get
        # encoded to, and second, we don't know how many ASCII characters
        # those octets will get encoded to.  Unless we try it.  Which seems
        # inefficient.  In the interest of being correct rather than fast (and
        # in the hope that there will be few encoded headers in any such
        # message), brute force it. :(
        lines = []
        current_line = []
        maxlen = next(maxlengths) - extra
        for character in string:
            current_line.append(character)
            this_line = EMPTYSTRING.join(current_line)
            length = encoder_module.header_length(_encode(this_line, charset))
            if length > maxlen:
                # This last character doesn't fit so pop it off.
                current_line.pop()
                # Does nothing fit on the first line?
                if not lines and not current_line:
                    lines.append(None)
                else:
                    separator = " " if lines else ""
                    joined_line = EMPTYSTRING.join(current_line)
                    header_bytes = _encode(joined_line, codec)
                    lines.append(encoder(header_bytes))
                current_line = [character]
                maxlen = next(maxlengths) - extra
        joined_line = EMPTYSTRING.join(current_line)
        header_bytes = _encode(joined_line, codec)
        lines.append(encoder(header_bytes))
        return lines
 def advance_i2(self):
     try:
         val = next(self.i2)
     except StopIteration:
         val = None
     self.f2 = val
Esempio n. 47
0
def sample_states(graph,
                  spanning_cluster=True,
                  model='bond',
                  copy_result=True):
    '''
    Generate successive sample states of the percolation model

    This is a :ref:`generator function <python:tut-generators>` to successively
    add one edge at a time from the graph to the percolation model.
    At each iteration, it calculates and returns the cluster statistics.

    Parameters
    ----------
    graph : networkx.Graph
        The substrate graph on which percolation is to take place

    spanning_cluster : bool, optional
        Whether to detect a spanning cluster or not.
        Defaults to ``True``.

    model : str, optional
        The percolation model (either ``'bond'`` or ``'site'``).
        Defaults to ``'bond'``.

        .. note:: Other models than ``'bond'`` are not supported yet.

    copy_result : bool, optional
        Whether to return a copy or a reference to the result dictionary.
        Defaults to ``True``.

    Yields
    ------
    ret : dict
        Cluster statistics

    ret['n'] : int
        Number of occupied bonds

    ret['N'] : int
        Total number of sites

    ret['M'] : int
        Total number of bonds

    ret['has_spanning_cluster'] : bool
        ``True`` if there is a spanning cluster, ``False`` otherwise.
        Only exists if `spanning_cluster` argument is set to ``True``.

    ret['max_cluster_size'] : int
        Size of the largest cluster (absolute number of sites)

    ret['moments'] : 1-D :py:class:`numpy.ndarray` of int
        Array of size ``5``.
        The ``k``-th entry is the ``k``-th raw moment of the (absolute) cluster
        size distribution, with ``k`` ranging from ``0`` to ``4``.

    Raises
    ------
    ValueError
        If `model` does not equal ``'bond'``.

    ValueError
        If `spanning_cluster` is ``True``, but `graph` does not contain any
        auxiliary nodes to detect spanning clusters.

    See also
    --------

    microcanonical_averages : Evolves multiple sample states in parallel

    Notes
    -----
    Iterating through this generator is a single run of the Newman-Ziff
    algorithm. [2]_
    The first iteration yields the trivial state with :math:`n = 0` occupied
    bonds.

    Spanning cluster

        In order to detect a spanning cluster, `graph` needs to contain
        auxiliary nodes and edges, cf. Reference [2]_, Figure 6.
        The auxiliary nodes and edges have the ``'span'`` `attribute
        <http://networkx.github.io/documentation/latest/tutorial/tutorial.html#node-attributes>`_.
        The value is either ``0`` or ``1``, distinguishing the two sides of the
        graph to span.

    Raw moments of the cluster size distribution

        The :math:`k`-th raw moment of the (absolute) cluster size distribution
        is :math:`\sum_s' s^k N_s`, where :math:`s` is the cluster size and
        :math:`N_s` is the number of clusters of size :math:`s`. [3]_
        The primed sum :math:`\sum'` signifies that the largest cluster is
        excluded from the sum. [4]_

    References
    ----------
    .. [2] Newman, M. E. J. & Ziff, R. M. Fast monte carlo algorithm for site
        or bond percolation. Physical Review E 64, 016706+ (2001),
        `doi:10.1103/physreve.64.016706 <http://dx.doi.org/10.1103/physreve.64.016706>`_.

    .. [3] Stauffer, D. & Aharony, A. Introduction to Percolation Theory (Taylor &
       Francis, London, 1994), second edn.

    .. [4] Binder, K. & Heermann, D. W. Monte Carlo Simulation in Statistical
       Physics (Springer, Berlin, Heidelberg, 2010),
       `doi:10.1007/978-3-642-03163-2 <http://dx.doi.org/10.1007/978-3-642-03163-2>`_.
    '''

    if model != 'bond':
        raise ValueError('Only bond percolation supported.')

    if spanning_cluster:
        auxiliary_node_attributes = nx.get_node_attributes(graph, 'span')
        auxiliary_nodes = auxiliary_node_attributes.keys()
        if not list(auxiliary_nodes):
            raise ValueError(
                'Spanning cluster is to be detected, but no auxiliary nodes '
                'given.')

        spanning_sides = list(set(auxiliary_node_attributes.values()))
        if len(spanning_sides) != 2:
            raise ValueError(
                'Spanning cluster is to be detected, but auxiliary nodes '
                'of less or more than 2 types (sides) given.')

        auxiliary_edge_attributes = nx.get_edge_attributes(graph, 'span')

    # get subgraph on which percolation is to take place (strip off the
    # auxiliary nodes)
    if spanning_cluster:
        perc_graph = graph.subgraph([
            node for node in graph.nodes_iter()
            if 'span' not in graph.node[node]
        ])
    else:
        perc_graph = graph

    # get a list of edges for easy access in later iterations
    perc_edges = perc_graph.edges()

    # number of nodes N
    num_nodes = nx.number_of_nodes(perc_graph)

    # number of edges M
    num_edges = nx.number_of_edges(perc_graph)

    # initial iteration: no edges added yet (n == 0)
    ret = dict()

    ret['n'] = 0
    ret['N'] = num_nodes
    ret['M'] = num_edges
    ret['max_cluster_size'] = 1
    ret['moments'] = np.ones(5) * (num_nodes - 1)

    if spanning_cluster:
        ret['has_spanning_cluster'] = False

    if copy_result:
        yield copy.deepcopy(ret)
    else:
        yield ret

    # permute edges
    perm_edges = np.random.permutation(num_edges)

    # set up disjoint set (union-find) data structure
    ds = nx.utils.union_find.UnionFind()
    if spanning_cluster:
        ds_spanning = nx.utils.union_find.UnionFind()

        # merge all auxiliary nodes for each side
        side_roots = dict()
        for side in spanning_sides:
            nodes = [
                node
                for (node, node_side) in auxiliary_node_attributes.items()
                if node_side is side
            ]
            ds_spanning.union(*nodes)
            side_roots[side] = ds_spanning[nodes[0]]

        for (edge, edge_side) in auxiliary_edge_attributes.items():
            ds_spanning.union(side_roots[edge_side], *edge)

        side_roots = [
            ds_spanning[side_root] for side_root in side_roots.values()
        ]

    # get first node
    max_cluster_root = next(perc_graph.nodes_iter())

    # loop over all edges (n == 1..M)
    for n in range(num_edges):
        ret['n'] = n + 1

        # draw new edge from permutation
        edge_index = perm_edges[n]
        edge = perc_edges[edge_index]
        ret['edge'] = edge

        # find roots and weights
        roots = [ds[node] for node in edge]
        weights = [ds.weights[root] for root in roots]

        if roots[0] is not roots[1]:
            # not same cluster: union!
            ds.union(*roots)
            if spanning_cluster:
                ds_spanning.union(*roots)

                ret['has_spanning_cluster'] = (
                    ds_spanning[side_roots[0]] == ds_spanning[side_roots[1]])

            # find new root and weight
            root = ds[edge[0]]
            weight = ds.weights[root]

            # moments and maximum cluster size

            # deduct the previous sub-maximum clusters from moments
            for i in [0, 1]:
                if roots[i] is max_cluster_root:
                    continue
                ret['moments'] -= weights[i]**np.arange(5)

            if max_cluster_root in roots:
                # merged with maximum cluster
                max_cluster_root = root
                ret['max_cluster_size'] = weight
            else:
                # merged previously sub-maximum clusters
                if ret['max_cluster_size'] >= weight:
                    # previously largest cluster remains largest cluster
                    # add merged cluster to moments
                    ret['moments'] += weight**np.arange(5)
                else:
                    # merged cluster overtook previously largest cluster
                    # add previously largest cluster to moments
                    max_cluster_root = root
                    ret['moments'] += ret['max_cluster_size']**np.arange(5)
                    ret['max_cluster_size'] = weight

        if copy_result:
            yield copy.deepcopy(ret)
        else:
            yield ret
Esempio n. 48
0
    def cluster(self):
        linker = self.deduper
        data_1 = self.data_1 if self.data_1 else self._read_data(self.input_file_base)
        data_2 = self.data_2 if self.data_2 else self._read_data(self.input_file)
        output_file = self.output_file

        # threshold1 = linker.threshold(data_1, data_2, recall_weight=2)

        linked_records = linker.match(data_1, data_2, threshold=0.1)
        # linked_records = [data for data in linked_records]
        print(linked_records)
        # import pdb;pdb.set_trace()
        print('# duplicate sets', len(linked_records))

        # ## Writing Results
        
        # Write our original data back out to a CSV with a new column called 
        # 'Cluster ID' which indicates which records refer to each other.
        
        # import pdb; pdb.set_trace()
        cluster_membership = {}
        cluster_id = None
        for cluster_id, (cluster, score) in enumerate(linked_records):
            for record_id in cluster:
                cluster_membership[record_id] = (cluster_id, score)

        print(cluster_membership)
        
        if cluster_id :
            unique_id = cluster_id + 1
        else :
            unique_id =0
            
        
        with open(output_file, 'w') as f:
            writer = csv.writer(f)
            
            header_unwritten = True
        
            for fileno, filename in enumerate((self.input_file_base, self.input_file)) :
                with open(filename) as f_input :
                    reader = csv.reader(f_input)
        
                    if header_unwritten :
                        heading_row = next(reader)
                        heading_row.insert(0, 'source file')
                        heading_row.insert(0, 'Link Score')
                        heading_row.insert(0, 'Cluster ID')
                        writer.writerow(heading_row)
                        header_unwritten = False
                    else :
                        next(reader)
        
                    for row_id, row in enumerate(reader):
                        # print(str(filename)+ str(row_id))
                        # import pdb;pdb.set_trace()
                        cluster_details = cluster_membership.get(str(filename) + str(row[0]))
                        if cluster_details is None :
                            # cluster_id = unique_id
                            # unique_id += 1
                            # score = None
                            continue
                        else :
                            cluster_id, score = cluster_details
                        row.insert(0, fileno)
                        row.insert(0, score)
                        row.insert(0, cluster_id)
                        writer.writerow(row)
Esempio n. 49
0
 def __next__(self):  # Py3-style iterator interface
     return next(self._iter).upper()
Esempio n. 50
0
    def start(self):
        """
        To start the importation
        """
        if self.ownSettings is None:
            self.__iface.messageBar().pushMessage(QCoreApplication.translate("VDLTools", "No settings given !!"),
                                                  level=QgsMessageBar.CRITICAL, duration=0)
            return
        if self.ownSettings.importUriDb is None:
            self.__iface.messageBar().pushMessage(QCoreApplication.translate("VDLTools", "No import db given !!"),
                                                  level=QgsMessageBar.CRITICAL, duration=0)
            return
        if self.ownSettings.importSchemaDb is None:
            self.__iface.messageBar().pushMessage(QCoreApplication.translate("VDLTools", "No import db schema given !!"),
                                                  level=QgsMessageBar.CRITICAL, duration=0)
            return
        if self.ownSettings.importConfigTable is None:
            self.__iface.messageBar().pushMessage(QCoreApplication.translate("VDLTools", "No import config table given !!"),
                                                  level=QgsMessageBar.CRITICAL, duration=0)
            return
        self.__configTable = self.ownSettings.importConfigTable
        self.__schemaDb = self.ownSettings.importSchemaDb

        self.__connector = DBConnector(self.ownSettings.importUriDb, self.__iface)
        self.__db = self.__connector.setConnection()
        if self.__db is not None:
            query = self.__db.exec_("""SELECT DISTINCT sourcelayer_name FROM %s.%s WHERE sourcelayer_name IS NOT NULL"""
                                    % (self.__schemaDb, self.__configTable))
            if query.lastError().isValid():
                self.__iface.messageBar().pushMessage(
                    query.lastError().text(), level=QgsMessageBar.CRITICAL, duration=0)
                self.__cancel()
            else:
                while next(query):
                    if self.__sourceTable == "":
                        self.__sourceTable = query.value(0)
                    elif self.__sourceTable != query.value(0):
                        self.__iface.messageBar().pushMessage(
                            QCoreApplication.translate("VDLTools", "different sources in config table ?!?"),
                            level=QgsMessageBar.WARNING)
                for layer in self.__iface.mapCanvas().layers():
                    if layer is not None and layer.type() == QgsMapLayer.VectorLayer and \
                                    layer.providerType() == "postgres":
                        uri = QgsDataSourceURI(layer.source())
                        if self.__sourceTable == uri.schema() + "." + uri.table():
                            self.__selectedFeatures = []
                            for f in layer.selectedFeatures():
                                self.__selectedFeatures.append(f.attribute("ID"))
                            break

                #  select jobs
                query = self.__db.exec_(("""SELECT DISTINCT usr_session_name FROM %s WHERE """ % self.__sourceTable) +
                                        """usr_valid = FALSE AND usr_session_name IS NOT NULL""")
                if query.lastError().isValid():
                    self.__iface.messageBar().pushMessage(
                        query.lastError().text(), level=QgsMessageBar.CRITICAL, duration=0)
                    self.__cancel()
                else:
                    jobs = []
                    while next(query):
                        jobs.append(query.value(0))
                    if len(jobs) == 0 and (self.__selectedFeatures is None or len(self.__selectedFeatures) == 0):
                        self.__cancel()
                    else:
                        selected = True
                        if self.__selectedFeatures is None or len(self.__selectedFeatures) == 0:
                            selected = False
                        self.__jobsDlg = ImportJobsDialog(jobs, selected)
                        self.__jobsDlg.jobsRadio().clicked.connect(self.__onJobsRadio)
                        if self.__jobsDlg.pointsRadio() is not None:
                            self.__jobsDlg.pointsRadio().clicked.connect(self.__onPointsRadio)
                        self.__jobsDlg.rejected.connect(self.__cancel)
                        self.__jobsDlg.okButton().clicked.connect(self.__onOk)
                        self.__jobsDlg.cancelButton().clicked.connect(self.__onCancel)
                        self.__jobsDlg.show()
Esempio n. 51
0
def set_newsletter_config(newsletter_id=None, agent_id=None, **kwargs):
    if str(agent_id).isdigit():
        agent_id = int(agent_id)
    else:
        logger.error("Tautulli Newsletters :: Unable to set existing newsletter: invalid agent_id %s."
                     % agent_id)
        return False

    agent = next((a for a in available_newsletter_agents() if a['id'] == agent_id), None)

    if not agent:
        logger.error("Tautulli Newsletters :: Unable to retrieve existing newsletter agent: invalid agent_id %s."
                     % agent_id)
        return False

    config_prefix = 'newsletter_config_'
    email_config_prefix = 'newsletter_email_'

    newsletter_config = {k[len(config_prefix):]: kwargs.pop(k)
                         for k in list(kwargs.keys()) if k.startswith(config_prefix)}
    email_config = {k[len(email_config_prefix):]: kwargs.pop(k)
                    for k in list(kwargs.keys()) if k.startswith(email_config_prefix)}

    for cfg, val in email_config.items():
        # Check for a password config keys and a blank password from the HTML form
        if 'password' in cfg and val == '    ':
            # Get the previous password so we don't overwrite it with a blank value
            old_newsletter_config = get_newsletter_config(newsletter_id=newsletter_id)
            email_config[cfg] = old_newsletter_config['email_config'][cfg]

    subject = kwargs.pop('subject')
    body = kwargs.pop('body')
    message = kwargs.pop('message')

    agent_class = get_agent_class(agent_id=agent['id'],
                                  config=newsletter_config, email_config=email_config,
                                  subject=subject, body=body, message=message)

    keys = {'id': newsletter_id}
    values = {'agent_id': agent['id'],
              'agent_name': agent['name'],
              'agent_label': agent['label'],
              'id_name': kwargs.get('id_name', ''),
              'friendly_name': kwargs.get('friendly_name', ''),
              'newsletter_config': json.dumps(agent_class.config),
              'email_config': json.dumps(agent_class.email_config),
              'subject': agent_class.subject,
              'body': agent_class.body,
              'message': agent_class.message,
              'cron': kwargs.get('cron'),
              'active': kwargs.get('active')
              }

    db = database.MonitorDatabase()
    try:
        db.upsert(table_name='newsletters', key_dict=keys, value_dict=values)
        logger.info("Tautulli Newsletters :: Updated newsletter agent: %s (newsletter_id %s)."
                    % (agent['label'], newsletter_id))
        newsletter_handler.schedule_newsletters(newsletter_id=newsletter_id)
        blacklist_logger()
        return True
    except Exception as e:
        logger.warn("Tautulli Newsletters :: Unable to update newsletter agent: %s." % e)
        return False
Esempio n. 52
0
    def __insert(self):
        """
        To insert the data into the tables
        """
        not_added = []
        for data in self.__data:
            if data['add']:
                destLayer = ""
                request = """INSERT INTO %s.%s""" % (data['schema_table'], data['name_table'])
                columns = "(id,geometry3d"
                values = """(nextval('%s.%s_id_seq'::regclass),'%s'""" \
                         % (data['schema_table'], data['name_table'], data['geom'])

                #  select import data for insertion
                query = self.__db.exec_("""SELECT destinationlayer_name,destinationcolumn_name,static_value FROM """ +
                                        """"%s.%s WHERE code = '%s' AND static_value IS NOT NULL"""
                                        % (self.__schemaDb, self.__configTable, str(data['code'])))
                if query.lastError().isValid():
                    self.__iface.messageBar().pushMessage(
                        query.lastError().text(), level=QgsMessageBar.CRITICAL, duration=0)
                else:
                    while next(query):
                        if destLayer == "":
                            destLayer = query.value(0)
                        elif destLayer != query.value(0):
                            self.__iface.messageBar().pushMessage(
                                QCoreApplication.translate("VDLTools",
                                                           "different destination layer in config table ?!?"),
                                level=QgsMessageBar.WARNING)
                        columns += "," + query.value(1)
                        values += "," + query.value(2)
                    columns += ")"
                    values += ")"
                    request += """ %s VALUES %s RETURNING id""" % (columns, values)
                    #  insert data
                    query2 = self.__db.exec_(request)
                    if query2.lastError().isValid():
                        self.__iface.messageBar().pushMessage(
                            query2.lastError().text(), level=QgsMessageBar.CRITICAL, duration=0)
                    else:
                        self.__num += 1
                        query2.first()
                        id_object = query2.value(0)
                        # update source table
                        query3 = self.__db.exec_(
                            ("""UPDATE %s SET usr_valid_date = '%s', usr_valid = TRUE, usr_fk_network_element = %s,"""
                             % (self.__sourceTable, str(datetime.date(datetime.now())), str(id_object))) +
                            (""" usr_fk_table = %s, usr_import_user = '******' WHERE id = %s"""
                            % (str(data['id_table']), self.__db.userName(), str(data['id_survey']))))
                        if query3.lastError().isValid():
                            self.__iface.messageBar().pushMessage(query3.lastError().text(),
                                                                  level=QgsMessageBar.CRITICAL, duration=0)
            else:
                not_added.append(data)
        if len(not_added) > 0:
            self.__measDlg = ImportMeasuresDialog(not_added)
            self.__measDlg.rejected.connect(self.__validAndNext)
            self.__measDlg.accepted.connect(self.__deleteAndNext)
            self.__measDlg.okButton().clicked.connect(self.__onDeleteOk)
            self.__measDlg.cancelButton().clicked.connect(self.__onDeleteCancel)
            self.__measDlg.show()
        else:
            self.__conclude()
    def _addTaxRangeR(self, node, noUpwardLevels=False):
        """recursive method to add TaxRange property tags."""
        if self.parser.is_ortholog_group(node) or self.parser.is_paralog_group(node) \
                or OrthoXMLQuery.is_geneRef_node(node):
            species_covered, nr_genes = self.parser.get_species_below_node(
                node, return_gene_total_count=True)
            species_covered = set(
                self.tax.map_potential_internal_speciesname_to_leaf(s)
                for s in species_covered)
            current_level = self.tax.mrca(species_covered)
            og_tag = '{{{}}}orthologGroup'.format(OrthoXMLQuery.ns['ns0'])

            if self.parser.is_ortholog_group(node):
                comp_score = OrthoXMLQuery.getScoreNodes(
                    node, 'CompletenessScore')
                if len(comp_score) == 0:
                    node.append(
                        self._createCompletnessScoreTag(
                            current_level, species_covered))
                node.append(self._createNrMemberGeneTag(nr_genes))
                taxrange = OrthoXMLQuery.getTaxRangeNodes(node, False)
                taxid = OrthoXMLQuery.getTaxidNodes(node, False)
                if len(taxrange) > 0:
                    # check consistency between current_level and value stored in taxrange
                    if taxrange[0].get('value') != current_level:
                        raise Exception(
                            "Inconsistent TaxRange: {} vs current_level {}".
                            format(taxrange[0].get('value'), current_level))
                    if len(taxid) > 0:
                        if taxid[0].get('value') != self.taxrange_2_taxid[
                                current_level]:
                            raise Exception(
                                "Inconsitency between taxids: {} vs {}".format(
                                    taxid[0].get('value'),
                                    self.taxrange_2_taxid[current_level]))
                    else:
                        try:
                            node.append(self._create_taxid(current_level))
                        except KeyError:
                            pass
                else:
                    node.append(self._createTaxRangeTags(current_level))

            try:  # find the closest ancestral orthogroup if it has a TaxRange property
                parent_orthogroup = next(node.iterancestors(og_tag))
                parent_levels = {
                    z.get('value')
                    for z in OrthoXMLQuery.getTaxRangeNodes(
                        parent_orthogroup, False)
                }
            except StopIteration:  # couldn't find a parent with a TaxRange property; no extra annotation possible
                parent_levels = set([])

            if len(parent_levels) > 0:
                most_recent_parent_level = self.tax.mostSpecific(parent_levels)

                # Ortholog Node - append missing tax range(s) as property tags under the current node
                if self.parser.is_ortholog_group(node):
                    self._insertOGs_between(node.getparent(),
                                            node,
                                            current_level,
                                            most_recent_parent_level,
                                            nr_genes,
                                            species_covered,
                                            include_self=False)

                # Paralog Node - insert ortholog node between self and parent; add missing tax range(s) to new parent
                elif self.parser.is_paralog_group(node):
                    if self.tax.levels_between(most_recent_parent_level,
                                               current_level) > 1:
                        self._insertOGs_between(node.getparent(),
                                                node,
                                                current_level,
                                                most_recent_parent_level,
                                                nr_genes,
                                                species_covered,
                                                include_self=False)

                # GeneRef Node - insert ortholog node between self and parent; add all tax range(s) to new parent
                else:
                    self._insertOGs_between(node.getparent(),
                                            node,
                                            current_level,
                                            most_recent_parent_level,
                                            nr_genes,
                                            species_covered,
                                            include_self=True)
                    return

            for child in node:
                self._addTaxRangeR(child, noUpwardLevels)
Esempio n. 54
0
    cluster_d = [data_d[c] for c in id_set]
    canonical_rep = dedupe.canonicalize(cluster_d)
    for record_id, score in zip(id_set, scores):
        cluster_membership[record_id] = {
            "cluster id": cluster_id,
            "canonical representation": canonical_rep,
            "confidence": score
        }

singleton_id = cluster_id + 1

with open(output_file, 'w') as f_output, open(input_file) as f_input:
    writer = csv.writer(f_output)
    reader = csv.reader(f_input)

    heading_row = next(reader)
    heading_row.insert(0, 'confidence_score')
    heading_row.insert(0, 'Cluster ID')
    canonical_keys = canonical_rep.keys()
    for key in canonical_keys:
        heading_row.append('canonical_' + key)

    writer.writerow(heading_row)

    for row in reader:
        row_id = int(row[0])
        if row_id in cluster_membership:
            cluster_id = cluster_membership[row_id]["cluster id"]
            canonical_rep = cluster_membership[row_id][
                "canonical representation"]
            row.insert(0, cluster_membership[row_id]['confidence'])
 def advance_i1(self):
     try:
         val = next(self.i1)
     except StopIteration:
         val = None
     self.f1 = val
 def eat_spaces(self):
     while self.streamer.peek().isspace():
         next(self.streamer)
Esempio n. 57
0
def deDuplication():

    input_file = 'csv_example_input.csv'
    output_file = 'csv_example_output.csv'
    settings_file = 'csv_example_learned_settings3'
    training_file = 'csv_example_training.json3'

    def preProcess(column):

        try:
            column = column.decode('utf-8')
        except AttributeError:
            pass
        column = unidecode(column)
        column = re.sub(' +', ' ', column)
        column = re.sub('\n', ' ', column)
        column = column.strip().strip('"').strip("'").lower().strip()

        if not column:
            column = None
        return column

    # Read in the data from CSV file:
    def readData(filename):

        data_d = {}
        with open(filename, encoding="utf-8") as f:
            reader = csv.DictReader(f)
            for row in reader:
                clean_row = [(k, preProcess(v)) for (k, v) in row.items()]
                row_id = row['id']
                data_d[row_id] = dict(clean_row)

        return data_d

    print('importing data ...')
    data_d = readData(input_file)

    if os.path.exists(settings_file):
        print('reading from', settings_file)
        with open(settings_file, 'rb') as f:
            deduper = dedupe.StaticDedupe(f)
    else:
        fields = [
            {
                'field': 'DisplayName_Processed',
                'type': 'String'
            },
            {
                'field': 'Email_Processed',
                'type': 'String'
            },
            {
                'field': 'Info',
                'type': 'String'
            },
        ]
        deduper = dedupe.Dedupe(fields)
        deduper.sample(data_d, 15000)

        if os.path.exists(training_file):
            print('reading labeled examples from ', training_file)
            with open(training_file, 'rb') as f:
                deduper.readTraining(f)

        print('starting active labeling...')

        dedupe.consoleLabel(deduper)

        deduper.train()

        with open(training_file, 'w') as tf:
            deduper.writeTraining(tf)

        with open(settings_file, 'wb') as sf:
            deduper.writeSettings(sf)

    threshold = deduper.threshold(data_d, recall_weight=1)

    print('clustering...')
    clustered_dupes = deduper.match(data_d, threshold)

    print('# duplicate sets', len(clustered_dupes))

    cluster_membership = {}
    cluster_id = 0
    for (cluster_id, cluster) in enumerate(clustered_dupes):
        id_set, scores = cluster
        cluster_d = [data_d[c] for c in id_set]
        canonical_rep = dedupe.canonicalize(cluster_d)
        for record_id, score in zip(id_set, scores):
            cluster_membership[record_id] = {
                "cluster id": cluster_id,
                "canonical representation": canonical_rep,
                "confidence": score
            }

    singleton_id = cluster_id + 1

    with open(output_file, 'w',
              encoding="utf-8") as f_output, open(input_file,
                                                  encoding="utf-8") as f_input:
        writer = csv.writer(f_output)
        reader = csv.reader(f_input)

        heading_row = next(reader)
        heading_row.insert(0, 'confidence_score')
        heading_row.insert(0, 'Cluster ID')
        canonical_keys = canonical_rep.keys()
        for key in canonical_keys:
            heading_row.append('canonical_' + key)

        writer.writerow(heading_row)

        for row in reader:
            row_id = row[0]
            if row_id in cluster_membership:
                cluster_id = cluster_membership[row_id]["cluster id"]
                canonical_rep = cluster_membership[row_id][
                    "canonical representation"]
                row.insert(0, cluster_membership[row_id]['confidence'])
                row.insert(0, cluster_id)
                for key in canonical_keys:
                    row.append(canonical_rep[key].encode('utf8'))
            else:
                row.insert(0, None)
                row.insert(0, singleton_id)
                singleton_id += 1
                for key in canonical_keys:
                    row.append(None)
            writer.writerow(row)
    return clustered_dupes
Esempio n. 58
0
def import_logs(folder, search, keys, time_key="^Time = "):
    """
        keys = {"ExectionTime": ["ExecTime", "ClockTime"]}

        return a DataFrame

              Loc, Time KeyName1 Keyname2
                1   0.1

                    0.2
                2
    """

    def find_start(log):
        """ Fast forward through file till 'Starting time loop' """
        for i, line in enumerate(log):
            if "Starting time loop" in line:
                return i


    def extract(line, keys):
        """
            returns key and values as list
                "ExecutionTime":[0,1]
        """
        import re
        for key, col_names in keys.items():
            if re.search(key, line):
                return col_names, list(
                        map(float,filter(lambda x:
                        x, re.findall("[0-9\-]+[.]?[0-9]*[e]?[\-\+]?[0-9]*", line))))
        return None, None

    fold, dirs, files = next(os.walk(folder))
    logs = [fold + "/" + log for log in files if search in log]
    p_bar = ProgressBar(n_tot = len(logs))
    # Lets make sure that we find Timesteps in the log
    keys.update({time_key: ['Time']})

    for log_number, log_name in enumerate(logs):
        with open(log_name, encoding="utf-8") as log:
            f = log.readlines()
            start = find_start(f)
            dataDict = defaultdict(list)
            df=DataFrame()
            for line in f[start:-1]:
                 col_names, values = extract(line, keys)
                 if not col_names:
                    continue
                 if col_names[0] == 'Time':
                    # a new time step has begun
                    # flush datadict and concat to df
                    # Very slow but, so far the solution
                    # to keep subiterations attached to correct time
                    # FIXME: still needs handling of different length dictionaries
                    df = concat([df, DataFrame(dataDict)])
                    dataDict = defaultdict(list)
                 for i, col in enumerate(col_names):
                    dataDict[col].append(values[i])
        p_bar.next()
        try:
            df.index=range(len(df))
            df.index.names=['Id']
            df['Loc'] = log_number
            df.set_index('Time', append=True, inplace=True)
            df.set_index('Loc', append=True, inplace=True)
            df = df.reorder_levels(['Loc','Time','Id'])
            p_bar.done()
        except Exception as e:
            print(log_name)
            print("failed to process")
            print(e)
            return {}, None
    return {}, df #DataFrame()
Esempio n. 59
0
    def get_plex_downloads(self):
        logger.debug("Tautulli PlexTV :: Retrieving current server version.")

        pms_connect = pmsconnect.PmsConnect()
        pms_connect.set_server_version()

        update_channel = pms_connect.get_server_update_channel()

        logger.debug("Tautulli PlexTV :: Plex update channel is %s." %
                     update_channel)
        plex_downloads = self.get_plextv_downloads(
            plexpass=(update_channel == 'beta'))

        try:
            available_downloads = json.loads(plex_downloads)
        except Exception as e:
            logger.warn(
                "Tautulli PlexTV :: Unable to load JSON for get_plex_updates.")
            return {}

        # Get the updates for the platform
        pms_platform = common.PMS_PLATFORM_NAME_OVERRIDES.get(
            plexpy.CONFIG.PMS_PLATFORM, plexpy.CONFIG.PMS_PLATFORM)
        platform_downloads = available_downloads.get('computer').get(pms_platform) or \
            available_downloads.get('nas').get(pms_platform)

        if not platform_downloads:
            logger.error(
                "Tautulli PlexTV :: Unable to retrieve Plex updates: Could not match server platform: %s."
                % pms_platform)
            return {}

        v_old = helpers.cast_to_int("".join(
            v.zfill(4)
            for v in plexpy.CONFIG.PMS_VERSION.split('-')[0].split('.')[:4]))
        v_new = helpers.cast_to_int("".join(
            v.zfill(4) for v in platform_downloads.get('version', '').split(
                '-')[0].split('.')[:4]))

        if not v_old:
            logger.error(
                "Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid current server version: %s."
                % plexpy.CONFIG.PMS_VERSION)
            return {}
        if not v_new:
            logger.error(
                "Tautulli PlexTV :: Unable to retrieve Plex updates: Invalid new server version: %s."
                % platform_downloads.get('version'))
            return {}

        # Get proper download
        releases = platform_downloads.get('releases', [{}])
        release = next(
            (r for r in releases
             if r['distro'] == plexpy.CONFIG.PMS_UPDATE_DISTRO
             and r['build'] == plexpy.CONFIG.PMS_UPDATE_DISTRO_BUILD),
            releases[0])

        download_info = {
            'update_available': v_new > v_old,
            'platform': platform_downloads.get('name'),
            'release_date': platform_downloads.get('release_date'),
            'version': platform_downloads.get('version'),
            'requirements': platform_downloads.get('requirements'),
            'extra_info': platform_downloads.get('extra_info'),
            'changelog_added': platform_downloads.get('items_added'),
            'changelog_fixed': platform_downloads.get('items_fixed'),
            'label': release.get('label'),
            'distro': release.get('distro'),
            'distro_build': release.get('build'),
            'download_url': release.get('url'),
        }

        return download_info
Esempio n. 60
0
 def test_PollMixin_False_then_True(self):
     i = iter([False, True])
     d = self.pm.poll(check_f=lambda: next(i), pollinterval=0.1)
     return d