Ejemplo n.º 1
0
def GetLinks(ids, query=None, url=QueryParams.linkBase, conn=None):
    if not conn:
        try:
            iter(ids)
        except TypeError:
            ids = [
                ids,
            ]
        if not query:
            query = QueryParams.details()
        query['id'] = ','.join(map(str, ids))
        conn = openURL(url, urllib.urlencode(query))
        query['cmd'] = 'neighbor'

    pubmed = ElementTree.parse(conn)
    linkset = pubmed.find('LinkSet/LinkSetDb')
    scores = []
    scoreNorm = 1.0
    for link in linkset.getiterator('Link'):
        id = link.findtext('Id')
        score = float(link.findtext('Score'))
        scores.append([id, score])
        # we'll normalize scores by the score for the first of the query ids:
        if id == ids[0]:
            scoreNorm = score
    for i in range(len(scores)):
        id, score = scores[i]
        scores[i] = id, score / scoreNorm
    return tuple(scores)
Ejemplo n.º 2
0
def GetLinks(ids,query=None,url=QueryParams.linkBase,conn=None):
  if not conn:
    try:
      iter(ids)
    except TypeError:
      ids = [ids,]
    if not query:
      query = QueryParams.details()
    query['id'] = ','.join(map(str,ids))
    conn = openURL(url,urllib.urlencode(query))
    query['cmd'] = 'neighbor'

  pubmed = ElementTree.parse(conn)
  linkset = pubmed.find('LinkSet/LinkSetDb')
  scores = []
  scoreNorm = 1.0
  for link in linkset.getiterator('Link'):
    id = link.findtext('Id')
    score = float(link.findtext('Score'))
    scores.append([id,score])
    # we'll normalize scores by the score for the first of the query ids:
    if id == ids[0]:
      scoreNorm = score
  for i in range(len(scores)):
    id,score = scores[i]
    scores[i] = id,score/scoreNorm
  return tuple(scores)
Ejemplo n.º 3
0
def GetRecords(ids, query=None, url=QueryParams.fetchBase, conn=None):
    """ gets a set of document summary records for the ids provided

  >>> ids = ['11960484']
  >>> recs = GetRecords(ids,conn=open(os.path.join(testDataDir,'records.xml'),'r'))
  >>> len(recs)
  1
  >>> rec = recs[0]
  >>> rec.PubMedId
  '11960484'
  >>> rec.Authors
  u'Penzotti JE, Lamb ML, Evensen E, Grootenhuis PD'
  >>> rec.Title
  u'A computational ensemble pharmacophore model for identifying substrates of P-glycoprotein.'
  >>> rec.Source
  u'J Med Chem'
  >>> rec.Volume
  '45'
  >>> rec.Pages
  '1737-40'
  >>> rec.PubYear
  '2002'
  >>> rec.Abstract[:10]
  u'P-glycopro'

  We've also got access to keywords:
  >>> str(rec.keywords[0])
  'Combinatorial Chemistry Techniques'
  >>> str(rec.keywords[3])
  'Indinavir / chemistry'

  and chemicals:
  >>> rec.chemicals[0]
  'P-Glycoprotein'
  >>> rec.chemicals[2]
  'Nicardipine <55985-32-5>'
  
  
  """
    if not conn:
        try:
            iter(ids)
        except TypeError:
            ids = [
                ids,
            ]
        if not query:
            query = QueryParams.details()
        query['id'] = ','.join(map(str, ids))
        conn = openURL(url, urllib.urlencode(query))

    pubmed = ElementTree.parse(conn)
    res = []
    for article in pubmed.getiterator('PubmedArticle'):
        rec = Records.JournalArticleRecord(article)
        if rec.PubMedId in ids:
            res.append(rec)
    return tuple(res)
Ejemplo n.º 4
0
def GetRecords(ids,query=None,url=QueryParams.fetchBase,conn=None):
  """ gets a set of document summary records for the ids provided

  >>> ids = ['11960484']
  >>> recs = GetRecords(ids,conn=open(os.path.join(testDataDir,'records.xml'),'r'))
  >>> len(recs)
  1
  >>> rec = recs[0]
  >>> rec.PubMedId
  '11960484'
  >>> rec.Authors
  u'Penzotti JE, Lamb ML, Evensen E, Grootenhuis PD'
  >>> rec.Title
  u'A computational ensemble pharmacophore model for identifying substrates of P-glycoprotein.'
  >>> rec.Source
  u'J Med Chem'
  >>> rec.Volume
  '45'
  >>> rec.Pages
  '1737-40'
  >>> rec.PubYear
  '2002'
  >>> rec.Abstract[:10]
  u'P-glycopro'

  We've also got access to keywords:
  >>> str(rec.keywords[0])
  'Combinatorial Chemistry Techniques'
  >>> str(rec.keywords[3])
  'Indinavir / chemistry'

  and chemicals:
  >>> rec.chemicals[0]
  'P-Glycoprotein'
  >>> rec.chemicals[2]
  'Nicardipine <55985-32-5>'
  
  
  """
  if not conn:
    try:
      iter(ids)
    except TypeError:
      ids = [ids,]
    if not query:
      query = QueryParams.details()
    query['id'] = ','.join(map(str,ids))
    conn = openURL(url,urllib.urlencode(query))

  pubmed = ElementTree.parse(conn)
  res = []
  for article in pubmed.getiterator('PubmedArticle'):
    rec = Records.JournalArticleRecord(article)
    if rec.PubMedId in ids:
      res.append(rec)
  return tuple(res)
Ejemplo n.º 5
0
def GetSummaries(ids, query=None, url=QueryParams.summaryBase, conn=None):
    """ gets a set of document summary records for the ids provided

  >>> ids = ['11960484']
  >>> summs = GetSummaries(ids,conn=open(os.path.join(testDataDir,'summary.xml'),'r'))
  >>> len(summs)
  1
  >>> rec = summs[0]
  >>> isinstance(rec,Records.SummaryRecord)
  1
  >>> rec.PubMedId
  '11960484'
  >>> rec.Authors
  'Penzotti JE, Lamb ML, Evensen E, Grootenhuis PD'
  >>> rec.Title
  'A computational ensemble pharmacophore model for identifying substrates of P-glycoprotein.'
  >>> rec.Source
  'J Med Chem'
  >>> rec.Volume
  '45'
  >>> rec.Pages
  '1737-40'
  >>> rec.HasAbstract
  '1'

  """
    if not conn:
        try:
            iter(ids)
        except TypeError:
            ids = [
                ids,
            ]
        if not query:
            query = QueryParams.details()
        ids = map(str, ids)
        query['id'] = ','.join(ids)
        conn = openURL(url, urllib.urlencode(query))
    pubmed = ElementTree.parse(conn)
    res = []
    for summary in pubmed.getiterator('DocSum'):
        rec = Records.SummaryRecord(summary)
        if rec.PubMedId in ids:
            res.append(rec)
            ids.remove(rec.PubMedId)

    return tuple(res)
Ejemplo n.º 6
0
def GetSummaries(ids,query=None,url=QueryParams.summaryBase,conn=None):
  """ gets a set of document summary records for the ids provided

  >>> ids = ['11960484']
  >>> summs = GetSummaries(ids,conn=open(os.path.join(testDataDir,'summary.xml'),'r'))
  >>> len(summs)
  1
  >>> rec = summs[0]
  >>> isinstance(rec,Records.SummaryRecord)
  1
  >>> rec.PubMedId
  '11960484'
  >>> rec.Authors
  'Penzotti JE, Lamb ML, Evensen E, Grootenhuis PD'
  >>> rec.Title
  'A computational ensemble pharmacophore model for identifying substrates of P-glycoprotein.'
  >>> rec.Source
  'J Med Chem'
  >>> rec.Volume
  '45'
  >>> rec.Pages
  '1737-40'
  >>> rec.HasAbstract
  '1'

  """
  if not conn:
    try:
      iter(ids)
    except TypeError:
      ids = [ids,]
    if not query:
      query = QueryParams.details()
    ids = map(str,ids)
    query['id'] = ','.join(ids)
    conn = openURL(url,urllib.urlencode(query))
  pubmed = ElementTree.parse(conn)
  res = []
  for summary in pubmed.getiterator('DocSum'):
    rec = Records.SummaryRecord(summary)
    if rec.PubMedId in ids:
      res.append(rec)
      ids.remove(rec.PubMedId)

  return tuple(res)
Ejemplo n.º 7
0
def CheckForLinks(ids,query=None,url=QueryParams.linkBase,conn=None):
  if not conn:
    try:
      iter(ids)
    except TypeError:
      ids = [ids,]
    if not query:
      query = QueryParams.details()
    query['id'] = ','.join(map(str,ids))
    conn = openURL(url,urllib.urlencode(query))
    query['cmd'] = 'ncheck'
  pubmed = ElementTree.parse(conn)

  checklist = pubmed.find('LinkSet/IdCheckList')
  recs = [Records.LinkRecord(x) for x in checklist.getiterator('Id')]

  res = {}
  for rec in recs:
    id = rec.PubMedId
    res[id] = rec.HasNeighbor
  return res
Ejemplo n.º 8
0
def CheckForLinks(ids,query=None,url=QueryParams.linkBase,conn=None):
  if not conn:
    try:
      iter(ids)
    except TypeError:
      ids = [ids,]
    if not query:
      query = QueryParams.details()
    query['id'] = ','.join(map(str,ids))
    conn = openURL(url,urllib.urlencode(query))
    query['cmd'] = 'ncheck'
  pubmed = ElementTree.parse(conn)

  checklist = pubmed.find('LinkSet/IdCheckList')
  recs = [Records.LinkRecord(x) for x in checklist.getiterator('Id')]

  res = {}
  for rec in recs:
    id = rec.PubMedId
    res[id] = rec.HasNeighbor
  return res