예제 #1
0
파일: lookup.py 프로젝트: NeoTim/pymql
    def lookup_mids_of_guids(self, guid_list, varenv):
        # It's..sort of the same as before. We have some guids,
        # see if any of them are replaced_by.
        # If they are,
        if not guid_list:
            return {}

        ask_list = set()
        result = {}
        rev = {}
        for g in guid_list:
            # convert the mid directly.
            m = mid.of_guid(g[1:])
            ask_list.add(g)
            result[g] = [m]
            rev[m] = g

        LOG.debug("mql.lookup.mids", "Looking up mids for guids")

        # we look foward, up replaced_by links, and from that node
        # to other replaced_by links,
        # and backwards from the root, for previous ones.

        # +-+  r.b.    +-+
        # |A| -------> |B|
        # +-+          +-+
        #               |
        # +-+           |
        # |C|-----------+
        # +-+
        #
        # in this diagram, we root at B.
        # We list B first but also A and C if present.

        query = [{
            "@guid": ask_list,
            "@pagesize": len(ask_list) + 1,
            "-replaced_by": [{
                "@guid": None,
                ":optional": True
            }]
        }]

        varenv["gr_log_code"] = "mids2guids"
        query_results = self.querier.read(query, varenv)
        varenv.pop("gr_log_code")
        # each result is going to (hopefully) either haave a -replaced_by link
        # or a replaced_by one.
        for item in query_results:
            guid = item["@guid"]

            # otherwise, theres just links pointing at me.
            if item["-replaced_by"]:
                # me first
                result[guid] = [mid.of_guid(guid[1:])]
                # then everyone else
                for r in item["-replaced_by"]:
                    result[guid].append(mid.of_guid(r["@guid"][1:]))

        return result
예제 #2
0
파일: namespace.py 프로젝트: NeoTim/pymql
    def update_namespaces(self, r):
        '''update this namespace cache based on the result of a graph query'''

        self.last_dateline = r.dateline

        for entry in r:
            (name, nsg, g) = (unquote(entry[0]), '#' + entry[1],
                              '#' + entry[2])
            assert nsg == self.guid
            self.store(name, g)
        LOG.debug('updated namespace %s' % self.guid, '%d entries' % len(r))
예제 #3
0
파일: connector.py 프로젝트: NeoTim/pymql
  def reset_cost(self):

    LOG.debug('resetting graphd costs')
    # these 3 counters remain for backward compatiblity
    self.nrequests = 0
    # -1 because the first attempt is not really a 'retry'
    self.dbretries = -1
    self.qretries = -1

    # all cost info is tracked in this dict
    # this includes cost info returned by GQL
    self.totalcost = defaultdict(float)
예제 #4
0
파일: connector.py 프로젝트: NeoTim/pymql
  def add_graph_costs(self, costs, dbtime, tries):
    """feed costs from graphdb into self.totalcost."""

    request_cost = coststr_to_dict(costs)
    request_cost['mql_dbtime'] = dbtime
    request_cost['mql_dbtries'] = tries or 1
    request_cost['mql_dbreqs'] = 1
    LOG.debug('graphdrequest.cost %s', request_cost)
    for k, v in request_cost.iteritems():
      if k in ['mm', 'fm']:
        # These are high water marks. Don't sum them.
        self.totalcost[k] = max(v, self.totalcost.get(k))
      else:
        if k in self.totalcost:
          self.totalcost[k] += v
        else:
          self.totalcost[k] = v
예제 #5
0
파일: lookup.py 프로젝트: NeoTim/pymql
    def lookup_guids_of_mids(self, mid_list, varenv):
        ask_list = set()
        result = {}
        rev = {}
        # arithmetically compute guids
        for m in mid_list:
            try:
                guid = "#" + mid.to_guid(m)
                ask_list.add(guid)
                # store the whole list here, down below we'll just
                # overwrite the things we got back.
                result[m] = guid  #self.internal_guid_to_id(guid)
                # i need to go back + forth.
                rev[guid] = m
            except (mid.InvalidMIDVersion, mid.InvalidMID) as e:
                result[m] = False
            except (mid.InvalidMunch) as e:
                raise MQLParseError(
                    None, "'%(mid)s' is not a properly formatted mid", mid=m)

        if not len(ask_list):
            return result

        # i'm not caching these.
        LOG.debug("mql.resolve.mids",
                  "Looking up guids for mids",
                  code=len(ask_list))
        # look for replaced by links off the guids
        # replaced_by links are unique, if they arent then this will signify some
        # end-of-the-world type event.
        query = [{"@guid": ask_list, "replaced_by": {"@guid": None}}]
        # read
        varenv["gr_log_code"] = "guids2mids"
        query_results = self.querier.read(query, varenv)
        varenv.pop("gr_log_code")
        # "now see what we found out..."
        for item in query_results:
            # [guid, replaced_by { guid }]
            guid = item["@guid"]
            rep_by = item["replaced_by"]["@guid"]
            m = rev[guid]
            result[m] = rep_by

        # pray.
        return result
예제 #6
0
파일: connector.py 프로젝트: NeoTim/pymql
  def write_varenv(self, qs, varenv):
    """Write to the graph the specified "query"."""

    if getattr(self, 'readonly', None):
      raise GraphConnectionError(
          'Tried to write to a read-only graph',
          http_code=500,
          app_code='/mqlwrite/backend/read_only')

    dateline_in = varenv.get('write_dateline', None)

    self.write_occurred = 1

    try:
      r = self._generate_and_transmit_query(qs, varenv, WriteMode)

    except MQLDatelineInvalidError:
      # see read_varenv comment on this
      LOG.info('mqlwrite.dateline.delete',
               'got an invalid dateline, deleting from varenv',
               varenv.get('write_dateline'))
      varenv['write_dateline'] = ''

      r = self._generate_and_transmit_query(qs, varenv, WriteMode)

    dateline_out = r.dateline

    # update our write_dateline in case we do subsequent reads
    # or writes. The new 'write_dateline' is returned to the
    # user for use with subsequent mqlreads or mqlwrites they do
    varenv['write_dateline'] = dateline_out
    varenv['last_write_time'] = time.time()
    log_graph_write(varenv, dateline_in, dateline_out)

    LOG.debug(
        'graph.write_dateline.set',
        '',
        last_write_time=varenv['last_write_time'],
        write_dateline=varenv['write_dateline'])

    # Record that a write has happened and following writes should set
    # the continuation flag.
    varenv['is_write_continuation'] = True

    return r
예제 #7
0
파일: connector.py 프로젝트: NeoTim/pymql
  def read_varenv(self, qs, varenv):
    """Read from the graph the specified "query"."""
    try:
      # the pymql user provides a 'write_dateline', which should be a valid
      # dateline returned to said user by a previous mqlwrite query
      dateline_in = varenv.get('write_dateline', None)

      r = self._generate_and_transmit_query(qs, varenv, ReadMode)

    except MQLDatelineInvalidError:
      # Drop the datelines out of the varenv,
      # re-generate the query and try again.
      # the main use case here is when sandbox is refreshed
      # and the instance id in the dateline changes. The user's dateline
      # (usually in a cookie) is now invalid until they do a write, or a touch
      LOG.info('mqlread.dateline.delete',
               'got an invalid dateline, deleting from varenv',
               varenv.get('write_dateline'))
      varenv['write_dateline'] = ''

      r = self._generate_and_transmit_query(qs, varenv, ReadMode)

    if not r and varenv.get('graph_noisy'):
      raise EmptyResult('query %s' % qs)

    dateline_out = r.dateline

    # 'dateline' is returned to the original caller of pymql read.
    # though, in practice, it is not passed on by frapi and
    # they really should only update their dateline after doing
    # a write.
    # we do *not* update the internal 'write_dateline' varenv, here.
    # in the case of reads, the idea being the user only needs to
    # demand the level of freshness of their last write, so
    # subsequent reads in this session will use the original
    # 'write_dateline' provided by the caller of pymql read/write.
    # the 'write_dateline' is updated in the event that a write
    # occurs in this session.
    varenv['dateline'] = dateline_out
    log_graph_read(varenv, dateline_in, dateline_out)

    LOG.debug('graph.dateline.set', '', dateline=varenv['dateline'])

    return r
예제 #8
0
파일: grparse.py 프로젝트: NeoTim/pymql
    def parse_full_reply(self, replystr):
        """
        parse the given reply string from the graph into a bunch of
        nested lists of tokens. Results are in the form:
        [ 'ok', 'id=', '"me;..."', [[['010000..', '01...', ...]]]]
        """
        LOG.debug('graph.result', replystr)
        token_list = graphresult_re.findall(replystr)

        curlist = []

        stack = []
        push_state = stack.append
        pop_state = stack.pop

        for count, tok in enumerate(token_list):
            if tok == '(':
                push_state(curlist)
                curlist = []
            elif tok == ')':
                sublist = curlist
                curlist = pop_state()
                curlist.append(sublist)
            elif tok == '\n':
                raise MQLGraphError(
                    None,
                    'Not allowed a newline in parse_full_reply',
                    reply=replystr,
                    tokens=token_list)
            elif tok == ' ' or tok == '':
                pass
            else:
                curlist.append(tok)

        LOG.debug('graph.result.parsed', 'Parsed %d tokens' % count)
        if len(stack) != 0:
            raise MQLGraphError(None,
                                'got linefeed in the middle of a reply?',
                                reply=replystr,
                                tokens=token_list,
                                depth=len(stack))

        self.replyqueue.append(curlist)
예제 #9
0
    def __init__(self,
                 msg,
                 http_code=400,
                 app_code=DEF_ME_CODE,
                 inner_exc=None,
                 **kwds):
        self.msg = msg
        Exception.__init__(self, msg)

        if not is_valid_HTTP_code(http_code):
            http_code = 500
        self.http_status = get_HTTP_err(http_code)
        self.http_code = http_code

        # app_code and and api code setup
        codes = app_code.split('/')
        if len(codes) < 3:
            codes = self.DEF_ME_CODE.split('/')
        self.comp_code = '%s/%s' % (self.DEF_PFX, codes[1])
        self.app_code = '%s' % '/'.join(codes[2:])
        self.messages = [self.gen_msgs(**kwds)]

        if not kwds.has_key('error'):
            # don't extract the current frame (__init__)
            stack = traceback.extract_stack()[:-1]
            kwds['traceback'] = '\r\n'.join(traceback.format_list(stack))

        # log inner exception or self
        exc = self
        if inner_exc:
            exc = inner_exc
        comp = app_code[1:].replace('/', '.')
        if exc == self:
            LOG.debug(comp, msg, **kwds)
        else:
            LOG.exception(msg, **kwds)
        self.kwds = kwds
예제 #10
0
파일: lookup.py 프로젝트: NeoTim/pymql
    def lookup_ids(self, guid_list, varenv):
        """
        Given a list of guids returns an id for each one,
        using as few queries as possible.

        Returns a dictionary of guid->id.
        """

        ask_list = set()
        result = {}

        if not "asof" in varenv:
            # Step 1: maybe we already know.
            for guid in guid_list:
                if isinstance(guid, unicode):
                    guid = guid.encode("utf-8")

                if guid in self.guids:
                    LOG.debug("mql.lookup.id.cached",
                              "found %s in cache" % guid,
                              value=self.guids[guid])
                    result[guid] = self.guids[guid]
                elif guid not in ask_list:
                    ask_list.add(guid)

            cache = len(ask_list) < 10000

        else:
            for guid in guid_list:
                if isinstance(guid, unicode):
                    guid = guid.encode("utf-8")

                ask_list.add(guid)

            cache = False

        if not ask_list:
            return result

        LOG.debug("mql.lookup.ids", "Lookup ids", code=len(ask_list))

        self.preload(varenv)

        # Step 2: resolve the ask_list
        query = [{
            "@guid":
            ask_list,
            "@pagesize":
            len(ask_list) + 1,
            "best_hrid": [{
                ":typeguid": self.best_hrid_guid,
                ":value": None,
                ":optional": True,
            }],
            "-has_key": [{
                ":value":
                None,
                ":optional":
                True,
                ":comparator":
                "octet",
                ":pagesize":
                1000,
                "@guid":
                None,
                "-has_key": [{
                    ":value":
                    None,
                    ":optional":
                    True,
                    ":comparator":
                    "octet",
                    "@guid":
                    None,
                    "-has_key": [{
                        ":value": None,
                        ":optional": True,
                        ":comparator": "octet",
                        "@guid": None,
                    }]
                }]
            }],
            "is_instance_of": {
                "@id": "/type/namespace",
                ":optional": True
            }
        }]

        varenv["gr_log_code"] = "guid2id"
        query_results = self.querier.read(query, varenv)
        varenv.pop("gr_log_code")

        LOG.debug("mql.lookup.id.results", "", results=query_results)

        # now see what we found out...
        # these should be cached.
        leftover_guids = []
        for item in query_results:
            res = self.search_id_result(item, varenv)
            if res:
                result[item["@guid"]] = res

                if cache:
                    self.guids[item["@guid"]] = res

        # every guid in guid_list has to be present in the result.
        for guid in guid_list:
            if guid not in result:
                LOG.debug("mql.lookup.id.notfound", "midifying %s" % guid)
                result[guid] = mid.of_guid(guid[1:])

        return result