Пример #1
0
    def get_best_node(self, service):
        """Returns the 'least loaded' node currently available, increments the
        active count on that node, and decrements the slots currently available
        """
        nodes = self._get_nodes_table(service)
        service = self._get_service_id(service)

        where = [
            nodes.c.service == service, nodes.c.available > 0,
            nodes.c.capacity > nodes.c.current_load, nodes.c.downed == 0
        ]

        query = select([nodes]).where(and_(*where))

        if self._is_sqlite:
            # sqlite doesn't have the 'log' funtion, and requires
            # coercion to a float for the sorting to work.
            query = query.order_by(nodes.c.current_load * 1.0 /
                                   nodes.c.capacity)
        else:
            # using log() increases floating-point precision on mysql
            # and thus makes the sorting more accurate.
            query = query.order_by(
                sqlfunc.log(nodes.c.current_load) /
                sqlfunc.log(nodes.c.capacity))
        query = query.limit(1)
        res = self._safe_execute(query)
        one = res.fetchone()
        if one is None:
            # unable to get a node
            res.close()
            raise BackendError('unable to get a node')

        nodeid = one.id
        node = str(one.node)
        res.close()

        # updating the table
        where = [nodes.c.service == service, nodes.c.node == node]
        where = and_(*where)
        fields = {
            'available': nodes.c.available - 1,
            'current_load': nodes.c.current_load + 1
        }
        query = update(nodes, where, fields)
        con = self._safe_execute(query, close=True)
        con.close()

        return nodeid, node
Пример #2
0
    def get_best_node(self, service):
        """Returns the 'least loaded' node currently available, increments the
        active count on that node, and decrements the slots currently available
        """
        nodes = self._get_nodes_table(service)
        service = self._get_service_id(service)

        where = [nodes.c.service == service,
                 nodes.c.available > 0,
                 nodes.c.capacity > nodes.c.current_load,
                 nodes.c.downed == 0]

        query = select([nodes]).where(and_(*where))

        if self._is_sqlite:
            # sqlite doesn't have the 'log' funtion, and requires
            # coercion to a float for the sorting to work.
            query = query.order_by(nodes.c.current_load * 1.0 /
                                   nodes.c.capacity)
        else:
            # using log() increases floating-point precision on mysql
            # and thus makes the sorting more accurate.
            query = query.order_by(sqlfunc.log(nodes.c.current_load) /
                                   sqlfunc.log(nodes.c.capacity))
        query = query.limit(1)
        res = self._safe_execute(query)
        one = res.fetchone()
        if one is None:
            # unable to get a node
            res.close()
            raise BackendError('unable to get a node')

        nodeid = one.id
        node = str(one.node)
        res.close()

        # updating the table
        where = [nodes.c.service == service, nodes.c.node == node]
        where = and_(*where)
        fields = {'available': nodes.c.available - 1,
                  'current_load': nodes.c.current_load + 1}
        query = update(nodes, where, fields)
        con = self._safe_execute(query, close=True)
        con.close()

        return nodeid, node
Пример #3
0
    def get_best_node(self, service):
        """Returns the 'least loaded' node currently available, increments the
        active count on that node, and decrements the slots currently available
        """
        nodes = self._get_nodes_table(service)
        service = self._get_service_id(service)

        # Pick the least-loaded node that has available slots.
        where = [nodes.c.service == service,
                 nodes.c.available > 0,
                 nodes.c.capacity > nodes.c.current_load,
                 nodes.c.downed == 0,
                 nodes.c.backoff == 0]

        query = select([nodes]).where(and_(*where))

        if self._is_sqlite:
            # sqlite doesn't have the 'log' funtion, and requires
            # coercion to a float for the sorting to work.
            query = query.order_by(nodes.c.current_load * 1.0 /
                                   nodes.c.capacity)
        else:
            # using log() increases floating-point precision on mysql
            # and thus makes the sorting more accurate.
            query = query.order_by(sqlfunc.log(nodes.c.current_load) /
                                   sqlfunc.log(nodes.c.capacity))
        query = query.limit(1)

        # We may have to re-try the query if we need to release more capacity.
        # This loop allows a maximum of five retries before bailing out.
        for _ in xrange(5):
            res = self._safe_execute(query)
            row = res.fetchone()
            res.close()
            if row is None:
                # Try to release additional capacity from any nodes
                # that are not fully occupied.
                where = and_(nodes.c.service == service,
                             nodes.c.available <= 0,
                             nodes.c.capacity > nodes.c.current_load,
                             nodes.c.downed == 0)
                fields = {
                    'available': sqlfunc.min(
                        nodes.c.capacity * self.capacity_release_rate,
                        nodes.c.capacity - nodes.c.current_load
                    ),
                }
                res = self._safe_execute(update(nodes, where, fields))
                res.close()
                if res.rowcount == 0:
                    break

        # Did we succeed in finding a node?
        if row is None:
            raise BackendError('unable to get a node')

        nodeid = row.id
        node = str(row.node)

        # Update the node to reflect the new assignment.
        # This is a little racy with concurrent assignments, but no big deal.
        where = [nodes.c.service == service, nodes.c.node == node]
        where = and_(*where)
        fields = {'available': nodes.c.available - 1,
                  'current_load': nodes.c.current_load + 1}
        query = update(nodes, where, fields)
        con = self._safe_execute(query, close=True)
        con.close()

        return nodeid, node
Пример #4
0
    def get_best_node(self, service, email):
        """Returns the 'least loaded' node currently available, increments the
        active count on that node, and decrements the slots currently available
        """
        nodes = self._get_nodes_table(service)
        service = self._get_service_id(service)
        query = select([nodes])
        send_to_spanner = self.should_allocate_to_spanner(email)
        if send_to_spanner:
            query = query.where(nodes.c.id == self._spanner_node_id)
        else:
            # Pick the least-loaded node that has available slots.
            query = query.where(and_(
                nodes.c.service == service,
                nodes.c.available > 0,
                nodes.c.capacity > nodes.c.current_load,
                nodes.c.downed == 0,
                nodes.c.backoff == 0
            ))
            if self._is_sqlite:
                # sqlite doesn't have the 'log' funtion, and requires
                # coercion to a float for the sorting to work.
                query = query.order_by(
                    nodes.c.current_load * 1.0 /
                    nodes.c.capacity)
            else:
                # using log() increases floating-point precision on mysql
                # and thus makes the sorting more accurate.
                query = query.order_by(
                    sqlfunc.log(nodes.c.current_load) /
                    sqlfunc.log(nodes.c.capacity))
            query = query.limit(1)

        # We may have to re-try the query if we need to release more capacity.
        # This loop allows a maximum of five retries before bailing out.
        for _ in xrange(5):
            res = self._safe_execute(query)
            row = res.fetchone()
            res.close()
            if row is None:
                # Try to release additional capacity from any nodes
                # that are not fully occupied.
                where = and_(nodes.c.service == service,
                             nodes.c.available <= 0,
                             nodes.c.capacity > nodes.c.current_load,
                             nodes.c.downed == 0)
                fields = {
                    'available': self._sqlfunc_min(
                        nodes.c.capacity * self.capacity_release_rate,
                        nodes.c.capacity - nodes.c.current_load
                    ),
                }
                res = self._safe_execute(update(nodes, where, fields))
                res.close()
                if res.rowcount == 0:
                    break
            else:
                break

        # Did we succeed in finding a node?
        if row is None:
            raise BackendError('unable to get a node')

        nodeid = row.id
        node = str(row.node)

        # Update the node to reflect the new assignment.
        # This is a little racy with concurrent assignments, but no big
        # deal.
        where = [nodes.c.service == service, nodes.c.node == node]
        where = and_(*where)
        fields = {'current_load': nodes.c.current_load + 1}
        if not send_to_spanner:
            fields['available'] = self._sqlfunc_max(nodes.c.available - 1, 0)
        query = update(nodes, where, fields)
        con = self._safe_execute(query, close=True)
        con.close()

        return nodeid, node
q = q.filter (DumpFileEntry.binm10 < 16.)
q = q.filter (SimulationEntry.contains (session, [tag for tag in namespace.tags]))

fig, axes = plt.subplots (len (namespace.caches), 1, sharex = True)
try:
    axes [0]
except TypeError:
    axes = [axes]

if namespace.checkcache:
    cache (session, q.add_entity (SimulationEntry).filter (DumpFileEntry.state == "presn").all (), {cache: getattr (kepler_utils.database.cache, cache) for cache in namespace.caches}, states = [state for state in namespace.states])

for cache, axis in zip (namespace.caches, axes):
    colors = cycle (["blue", "green", "red"])

    for state in namespace.states:
        qs = q.filter (DumpFileEntry.state == state)
        cign = CorePlot (qs, axis)
    
        binkey = getattr (DumpFileEntry, namespace.binkey)
        if namespace.binlog:
            binkey = func.log (binkey)
        
        s, e = cign.plotScatter (getattr (DumpFileEntry, namespace.binkey), cache, binkey = binkey, errorbars = True, color = next (colors), label = state)

    axis.set_ylabel (cache)

if namespace.binlog:
    axis.set_xscale ("log")
axis.set_xlabel (namespace.binkey)
axis.legend (loc = "upper left")
Пример #6
0

# CUSTOM SELECT FOR ACTIVITY CLIFFS THAT WILL BE MAPPED AGAINST A CLASS
A1, A2 = metadata.tables['chembl.activities'].alias(
), metadata.tables['chembl.activities'].alias()
FP1, FP2 = metadata.tables['chembl.compound_rdfps'].alias(
), metadata.tables['chembl.compound_rdfps'].alias()

join = A1.join(A2, A2.c.assay_id == A1.c.assay_id).join(
    FP1, FP1.c.molregno == A1.c.molregno).join(FP2,
                                               FP2.c.molregno == A2.c.molregno)

delta_tanimoto = 1 - func.rdkit.tanimoto_sml(FP1.c.circular_fp,
                                             FP2.c.circular_fp)
delta_activity = func.abs(
    func.log(A1.c.standard_value) -
    func.log(A2.c.standard_value)).label('delta_activity')
sali = (delta_activity / delta_tanimoto).label('sali')

whereclause = and_(A1.c.activity_id != A2.c.activity_id,
                   A1.c.molregno < A2.c.molregno,
                   A1.c.standard_type == A2.c.standard_type,
                   A1.c.standard_value > 0, A2.c.standard_value > 0,
                   A1.c.standard_flag > 0, A2.c.standard_flag > 0,
                   A1.c.standard_units == 'nM', A2.c.standard_units == 'nM',
                   A1.c.relation == '=', A1.c.relation == '=', sali >= 1.5)

activity_cliffs = select([
    A1.c.assay_id,
    A1.c.activity_id.label('activity_bgn_id'),
    A2.c.activity_id.label('activity_end_id'),