def run_tool(tool_id,
             history_id,
             params,
             api_key,
             galaxy_url,
             wait=True,
             sleep_time=None,
             **kwargs):
    sleep_time = sleep_time or DEFAULT_SLEEP_TIME
    tools_url = urljoin(galaxy_url, 'api/tools')
    payload = {
        'tool_id': tool_id,
    }
    if history_id:
        payload['history_id'] = history_id
    payload['inputs'] = params
    rval = post(api_key, tools_url, payload)
    if wait:
        outputs = list(rval['outputs'])
        while outputs:
            finished_datasets = []
            for i, dataset_dict in enumerate(outputs):
                if dataset_is_terminal(dataset_dict['id'],
                                       api_key=api_key,
                                       galaxy_url=galaxy_url):
                    finished_datasets.append(i)
            for i in reversed(finished_datasets):
                outputs.pop(0)
            if wait and outputs:
                time.sleep(sleep_time)

    return rval
Ejemplo n.º 2
0
    def update(self):
        '''
        更新Question,并获取Answers
        '''
        self.lastModified = str(datetime.datetime.now())

        qurl = 'http://www.zhihu.com/question/%d' % (self.qid)
        r = get(qurl)
        if r.status_code != 200:
            return False

        soup = BeautifulSoup(r.text)
        # 标题
        self.title = soup.find('h2', class_='zm-item-title').text.strip()
        # 内容
        self.detail = soup.find('div', id='zh-question-detail').div.text.strip()
        # 所属的话题标签
        self.tags = [a.string.strip() for a in soup.find_all("a", class_='zm-item-tag')]
        # 关注人数
        followersCountBlock = soup.find('div', class_='zg-gray-normal')
        if followersCountBlock is None or followersCountBlock.strong is None:
            # 当”还没有人关注该问题” followersCountBlock.strong is None
            self.followersCount = 0
        else:
            self.followersCount = parseNum(followersCountBlock.strong.text)

        self.answers = []
        # 回答数目
        answersCountBlock = soup.find('h3', id='zh-question-answer-num')
        if answersCountBlock is None:
            if soup.find('span', class_='count') is not None:
                answersCount = 1
            else:
                answersCount = 0
        else:
            answersCount = int(answersCountBlock['data-num'])

        # 答案部分 每次50个
        for block in soup.find_all('div', class_='zm-item-answer'):
            if block.find('div', class_='answer-status') is not None:
                continue  # 忽略建议修改的答案
            self.answers.append(self._extractAnswer(block))
        if answersCount > 50:
            _xsrf = soup.find('input', attrs={'name': '_xsrf'})['value']
            otherHeaders = {'Referer': qurl}
            for i in range(1, math.ceil(answersCount/50)):  # more answers
                data = {"_xsrf": _xsrf, "method": 'next', 'params': '{"url_token": %d, "pagesize": 50, "offset": %d}' % (self.qid, i*50)}
                r = post('http://www.zhihu.com/node/QuestionAnswerListV2', otherHeaders, data)
                for block in r.json()['msg']:
                    div = BeautifulSoup(block).div
                    if div.find('div', class_='answer-status') is not None:
                        continue  # 忽略建议修改的答案
                    self.answers.append(self._extractAnswer(div))

        return True
Ejemplo n.º 3
0
def get_second_type(data_id, offset):
    """
    获取二级类别的地址
    """
    url = 'https://www.zhihu.com/node/TopicsPlazzaListV2'
    params = '{"topic_id": %d,"offset": %d,"hash_id": ""}'
    postData = {'method': 'next', 'params': params % (int(data_id), int(offset))}
    rst = common.post(url, isjson=True, formdata=postData)
    if rst:
        return rst['msg']
    else:
        return ''
def run_tool( tool_id, history_id, params, api_key, galaxy_url, wait=True, sleep_time=None, **kwargs ):
    sleep_time = sleep_time or DEFAULT_SLEEP_TIME
    tools_url = urljoin( galaxy_url, 'api/tools' )
    payload = {
        'tool_id': tool_id,
    }
    if history_id:
        payload['history_id'] = history_id
    payload[ 'inputs' ] = params
    rval = post( api_key, tools_url, payload )
    if wait:
        outputs = list( rval['outputs'] )
        while outputs:
            finished_datasets = []
            for i, dataset_dict in enumerate( outputs ):
                if dataset_is_terminal( dataset_dict['id'], api_key=api_key, galaxy_url=galaxy_url ):
                    finished_datasets.append( i )
            for i in reversed( finished_datasets ):
                outputs.pop( 0 )
            if wait and outputs:
                time.sleep( sleep_time )

    return rval
Ejemplo n.º 5
0
GALAXY_PATH = 'YOUR_GALAXY_PATH'
sys.path.insert(1, os.path.join(GALAXY_PATH, 'scripts/api'))
import common  # noqa: E402,I100,I202

# Select "W5 - Metagenomics" from published workflows

workflow_name = 'W5 - Metagenomics'
workflows = common.get(API_KEY, '%s/workflows?show_published=True' % API_URL)
w = [_ for _ in workflows if _['published'] and _['name'] == workflow_name]
assert len(w) == 1
w = w[0]

# Import the workflow to user space

data = {'workflow_id': w['id']}
iw = common.post(API_KEY, '%s/workflows/import' % API_URL, data)
iw_details = common.get(API_KEY, '%s/workflows/%s' % (API_URL, iw['id']))

# Select the "Orione SupMat" library

library_name = 'Orione SupMat'
libraries = common.get(API_KEY, '%s/libraries' % API_URL)
l = [_ for _ in libraries if _['name'] == library_name]
assert len(l) == 1
l = l[0]

# Select the "/Metagenomics/MetagenomicsDataset.fq" dataset

ds_name = '/Metagenomics/MetagenomicsDataset.fq'
contents = common.get(API_KEY, '%s/libraries/%s/contents' % (API_URL, l['id']))
ld = [_ for _ in contents if _['type'] == 'file' and _['name'] == ds_name]
Ejemplo n.º 6
0
import common
print common.post()

#file
fo = open('1.txt', 'wb')
print "file name:", fo.name
#fo.close()
print "is close:", fo.closed

fo.write("hello python\n")
Ejemplo n.º 7
0
GALAXY_PATH = 'YOUR_GALAXY_PATH'
sys.path.insert(1, os.path.join(GALAXY_PATH, 'scripts/api'))
import common  # noqa: E402

# Select "W5 - Metagenomics" from published workflows

workflow_name = 'W5 - Metagenomics'
workflows = common.get(API_KEY, '%s/workflows?show_published=True' % API_URL)
w = [_ for _ in workflows if _['published'] and _['name'] == workflow_name]
assert len(w) == 1
w = w[0]

# Import the workflow to user space

data = {'workflow_id': w['id']}
iw = common.post(API_KEY, '%s/workflows/import' % API_URL, data)
iw_details = common.get(API_KEY, '%s/workflows/%s' % (API_URL, iw['id']))

# Select the "Orione SupMat" library

library_name = 'Orione SupMat'
libraries = common.get(API_KEY, '%s/libraries' % API_URL)
l = [_ for _ in libraries if _['name'] == library_name]
assert len(l) == 1
l = l[0]

# Select the "/Metagenomics/MetagenomicsDataset.fq" dataset

ds_name = '/Metagenomics/MetagenomicsDataset.fq'
contents = common.get(API_KEY, '%s/libraries/%s/contents' % (API_URL, l['id']))
ld = [_ for _ in contents if _['type'] == 'file' and _['name'] == ds_name]
Ejemplo n.º 8
0
GALAXY_PATH = 'YOUR_GALAXY_PATH'
sys.path.insert(1, os.path.join(GALAXY_PATH, 'scripts/api'))
import common  # noqa: E402,I100,I202

# Select "W5 - Metagenomics" from published workflows

workflow_name = 'W5 - Metagenomics'
workflows = common.get(API_KEY, f"{API_URL}/workflows?show_published=True")
w = [_ for _ in workflows if _['published'] and _['name'] == workflow_name]
assert len(w) == 1
w = w[0]

# Import the workflow to user space

data = {'workflow_id': w['id']}
iw = common.post(API_KEY, f"{API_URL}/workflows/import", data)
iw_details = common.get(API_KEY, f"{API_URL}/workflows/{iw['id']}")

# Select the "Orione SupMat" library

library_name = 'Orione SupMat'
libraries = common.get(API_KEY, f"{API_URL}/libraries")
l = [_ for _ in libraries if _['name'] == library_name]
assert len(l) == 1
l = l[0]

# Select the "/Metagenomics/MetagenomicsDataset.fq" dataset

ds_name = '/Metagenomics/MetagenomicsDataset.fq'
contents = common.get(API_KEY, f"{API_URL}/libraries/{l['id']}/contents")
ld = [_ for _ in contents if _['type'] == 'file' and _['name'] == ds_name]