예제 #1
0
 def _crawl(self, user_id, album_id):
   self._login()
   page = get('http://photo.renren.com/photo/'+user_id+'/album-'+album_id)
   first_photo_url = re.search('http://photo.renren.com/photo/'+user_id+
       '/photo-.*false', page).group()
   page = get(first_photo_url)
   file_url = re.findall('voiceUrl":"(.*?.mp3)"', page)
   return [url+'\n' for url in file_url]
예제 #2
0
def history_order(socket):
    request = order_pb2.History_Order_Request()
    common.send(socket, common_pb2.HISTORY_ORDER, request)
    (protocol, data) = common.get(socket)
    response = order_pb2.History_Order_Response()
    response.ParseFromString(data)
    print(response)
def main( options ):
    api_key = options.api
    base_tool_shed_url = options.tool_shed_url.rstrip( '/' )
    my_writable = options.my_writable
    one_per_request = options.one_per_request
    skip_file = options.skip_file
    if skip_file:
        encoded_ids_to_skip = read_skip_file( skip_file )
    else:
        encoded_ids_to_skip = []
    if string_as_bool( one_per_request ):
        url = '%s/api/repositories/repository_ids_for_setting_metadata?key=%s&my_writable=%s' % ( base_tool_shed_url, api_key, str( my_writable ) )
        repository_ids = get( url, api_key )
        for repository_id in repository_ids:
            if repository_id in encoded_ids_to_skip:
                print "--------"
                print "Skipping repository with id %s because it is in skip file %s" % ( str( repository_id ), str( skip_file ) )
                print "--------"
            else:
                data = dict( repository_id=repository_id )
                url = '%s/api/repositories/reset_metadata_on_repository' % base_tool_shed_url
                try:
                    submit( url, data, options.api )
                except Exception, e:
                    log.exception( ">>>>>>>>>>>>>>>Blew up on data: %s, exception: %s" % ( str( data ), str( e ) ) )
                    # An nginx timeout undoubtedly occurred.
                    sys.exit( 1 )
예제 #4
0
def get_artical_url(url, referer):
    """
    通过翻页的方式获取下一个动态加载的文章url
    """
    header = common.Iheader
    header['referer'] = referer
    rst = common.get(url, isjson=True, header=header)
    if rst == '':
        return None
    # 数据中有两种数据,分别是问答和文章两种,需要分开处理
    urls = list()
    artical_list = list()
    question_list = list()
    for data in rst['data']:
        if data['target']['type'] == 'answer':
            question_id = data['target']['question']['id']
            answser_id = data['target']['id']
            question_url = '%s/question/%d/answer/%d' % (
                common.domain_name, question_id, answser_id)
            question_list.append(question_url)
            print('get_artical_url question_url is: ', question_url)
        elif data['target']['type'] == 'article':
            artical_url = data['target']['url']
            artical_list.append(artical_url)
            print('get_artical_url artical_url is: ', artical_url)
        else:
            print('检测到异常类型....')
    next_url = rst['paging']['next']
    is_end = rst['paging']['is_end']
    return artical_list, question_list, next_url, is_end
예제 #5
0
def main(options):
    api_key = options.api
    base_tool_shed_url = options.tool_shed_url.rstrip('/')
    my_writable = options.my_writable
    one_per_request = options.one_per_request
    skip_file = options.skip_file
    if skip_file:
        encoded_ids_to_skip = read_skip_file(skip_file)
    else:
        encoded_ids_to_skip = []
    if string_as_bool(one_per_request):
        url = '%s/api/repositories/repository_ids_for_setting_metadata?key=%s&my_writable=%s' % (
            base_tool_shed_url, api_key, str(my_writable))
        repository_ids = get(url, api_key)
        for repository_id in repository_ids:
            if repository_id in encoded_ids_to_skip:
                print "--------"
                print "Skipping repository with id %s because it is in skip file %s" % (
                    str(repository_id), str(skip_file))
                print "--------"
            else:
                data = dict(repository_id=repository_id)
                url = '%s/api/repositories/reset_metadata_on_repository' % base_tool_shed_url
                try:
                    submit(url, data, options.api)
                except Exception, e:
                    log.exception(
                        ">>>>>>>>>>>>>>>Blew up on data: %s, exception: %s" %
                        (str(data), str(e)))
                    # An nginx timeout undoubtedly occurred.
                    sys.exit(1)
예제 #6
0
 def get_comment_info(self, question_id, offset=0, limit=5, referer=None):
     """
     获取评论信息
     """
     # print('question_id: ', question_id)
     url = ('https://www.zhihu.com/api/v4/questions/%s/answers?'
            'include=data[*].is_normal,admin_closed_comment,'
            'reward_info,is_collapsed,annotation_action,annotation'
            '_detail,collapse_reason,is_sticky,collapsed_by,'
            'suggest_edit,comment_count,can_comment,content,'
            'editable_content,voteup_count,reshipment_settings,'
            'comment_permission,created_time,updated_time,review_'
            'info,relevant_info,question,excerpt,relationship.'
            'is_authorized,is_author,voting,is_thanked,is_nothelp;'
            'data[*].mark_infos[*].url;data[*].author.follower_'
            'count,badge[*].topics&offset=%s&limit=%s&sort_by='
            'default') % (str(question_id), str(limit), str(offset))
     header = None
     if not referer:
         header = common.Iheader
         header['referer'] = referer
         header['x-requested-with'] = 'fetch'
         header['x-udid'] = 'AGDmMwbDMQ6PTgvzf0j8efogt4vh5K_aSXk='
     # print(url)
     return common.get(url, True, header)
예제 #7
0
def logout(socket):
    request = washer_pb2.Logout_Request()
    common.send(socket, common_pb2.LOGOUT, request)
    (_, body) = common.get(socket)
    if body:
        response = washer_pb2.Logout_Response()
        response.ParseFromString(body)
        print(response)
예제 #8
0
def order_detail(socket):
    request = order_pb2.Order_Detail_Request()
    request.order_id = "58bfabef7f439e2303a20426"
    common.send(socket, common_pb2.ORDER_DETAIL, request)
    (protocol, data) = common.get(socket)
    response = order_pb2.Order_Detail_Response()
    response.ParseFromString(data)
    print(response)
예제 #9
0
파일: profile.py 프로젝트: wangycthu/srt
 def _crawl(self, token_list, user_id, until_time=None):
   token = token_list[-1]
   response_profile = json.loads(get(url + 'profile/get', [
     ('access_token', token),
     ('userId', user_id),
   ]))
   if 'response' not in response_profile:
     return None
   response_user = json.loads(get(url + 'user/get', [
     ('access_token', token),
     ('userId', user_id),
   ]))
   if 'response' not in response_user:
     return None
   response = response_profile['response']
   response.update(response_user['response'])
   return response
예제 #10
0
def get_histories():
    """
    Return a list of dictionaries that describe the current user's histories.
    """
    # notice how similar this is to users.get_users - in fact only the name and doc has changed
    apikey = setup.get_apikey()
    full_url = setup.get_base_url() + RESOURCE_URL
    return common.get( apikey, full_url )
예제 #11
0
def cancel_order(socket):
    request = order_pb2.Cancel_Order_Request()
    request.order_id = "58bfad717f439e23288ac4e2"
    common.send(socket, common_pb2.CANCEL_ORDER, request)
    (protocol, data) = common.get(socket)
    response = order_pb2.Cancel_Order_Response()
    response.ParseFromString(data)
    print(response)
예제 #12
0
def stop_work(socket):
    request = washer_pb2.Stop_Work_Request()
    common.send(socket, common_pb2.STOP_WORK, request)
    body = common.get(socket)
    if body:
        (protocol, data) = body
        response = washer_pb2.Stop_Work_Response()
        response.ParseFromString(data)
        print(response)
 def __init__(self, naptan):
     self.naptan = naptan
     data = get('stop', naptan).nodes
     # Latitude and longitude are swapped in the API
     self.location = numpy.array(
         [float(str(data['lat'])),
          float(str(data['long']))])
     self.direction = Direction(str(data['OnStreet_CompassPoint']))
     Stop.stops[naptan] = self
예제 #14
0
 def _crawl(self, token_list, user_id, until_time=None):
     token = token_list[-1]
     response_profile = json.loads(
         get(url + 'profile/get', [
             ('access_token', token),
             ('userId', user_id),
         ]))
     if 'response' not in response_profile:
         return None
     response_user = json.loads(
         get(url + 'user/get', [
             ('access_token', token),
             ('userId', user_id),
         ]))
     if 'response' not in response_user:
         return None
     response = response_profile['response']
     response.update(response_user['response'])
     return response
예제 #15
0
def order_feedback(socket):
    print("protocol: order_feedback")
    request = order_pb2.Order_Feedback_Request()
    request.order_id = "58bfabef7f439e2303a20426"
    request.score = 5
    common.send(socket, common_pb2.ORDER_FEEDBACK, request)
    (protocol, data) = common.get(socket)
    response = order_pb2.Order_Feedback_Response()
    response.ParseFromString(data)
    print(response)
예제 #16
0
    def update(self):
        '''
        更新Question,并获取Answers
        '''
        self.lastModified = str(datetime.datetime.now())

        qurl = 'http://www.zhihu.com/question/%d' % (self.qid)
        r = get(qurl)
        if r.status_code != 200:
            return False

        soup = BeautifulSoup(r.text)
        # 标题
        self.title = soup.find('h2', class_='zm-item-title').text.strip()
        # 内容
        self.detail = soup.find('div', id='zh-question-detail').div.text.strip()
        # 所属的话题标签
        self.tags = [a.string.strip() for a in soup.find_all("a", class_='zm-item-tag')]
        # 关注人数
        followersCountBlock = soup.find('div', class_='zg-gray-normal')
        if followersCountBlock is None or followersCountBlock.strong is None:
            # 当”还没有人关注该问题” followersCountBlock.strong is None
            self.followersCount = 0
        else:
            self.followersCount = parseNum(followersCountBlock.strong.text)

        self.answers = []
        # 回答数目
        answersCountBlock = soup.find('h3', id='zh-question-answer-num')
        if answersCountBlock is None:
            if soup.find('span', class_='count') is not None:
                answersCount = 1
            else:
                answersCount = 0
        else:
            answersCount = int(answersCountBlock['data-num'])

        # 答案部分 每次50个
        for block in soup.find_all('div', class_='zm-item-answer'):
            if block.find('div', class_='answer-status') is not None:
                continue  # 忽略建议修改的答案
            self.answers.append(self._extractAnswer(block))
        if answersCount > 50:
            _xsrf = soup.find('input', attrs={'name': '_xsrf'})['value']
            otherHeaders = {'Referer': qurl}
            for i in range(1, math.ceil(answersCount/50)):  # more answers
                data = {"_xsrf": _xsrf, "method": 'next', 'params': '{"url_token": %d, "pagesize": 50, "offset": %d}' % (self.qid, i*50)}
                r = post('http://www.zhihu.com/node/QuestionAnswerListV2', otherHeaders, data)
                for block in r.json()['msg']:
                    div = BeautifulSoup(block).div
                    if div.find('div', class_='answer-status') is not None:
                        continue  # 忽略建议修改的答案
                    self.answers.append(self._extractAnswer(div))

        return True
예제 #17
0
def get_near_washer(socket):
    pb = washer_pb2.Near_Washer_Request()
    pb.city_code = 179
    pb.longitude = 120.21937542
    pb.latitude = 30.25924446
    common.send(socket, washer_pb2.NEAR_WASHER, pb)
    body = common.get(socket)
    if body:
        resp = washer_pb2.Near_Washer_Response()
        resp.ParseFromString(body)
        print(resp)
예제 #18
0
def get_cluster_config(ambari_url, user, password, cluster_name, config_type,
                       config_tag, connection_timeout):
    r = get(
        ambari_url, user, password,
        '/api/v1/clusters/{0}/configurations?type={1}&tag={2}'.format(
            cluster_name, config_type, config_tag), connection_timeout)
    assert_return_code(r, 200, 'cluster configurations')
    config = json.loads(r.content)
    return parse_config(
        r, config, lambda config: config['items'][0]['properties'] is not None,
        lambda config: config['items'][0])
예제 #19
0
def request_authcode(socket):
    requestAuthcode = washer_pb2.Request_Authcode_Request()
    requestAuthcode.phone = WASHER_PHONE
    requestAuthcode.signature = 'signature'
    common.send(socket, common_pb2.REQUEST_AUTHCODE, requestAuthcode)
    (protocol, body) = common.get(socket)
    if body:
        raResponse = washer_pb2.Request_Authcode_Response()
        raResponse.ParseFromString(body)
        print('request_authcode response:{!r}'.format(raResponse))
        return raResponse.authcode
    return False
def users_list_old():
  try:
    users = api.get(key, users_url)
    print type(users) 
    data = {}
    for n, i in enumerate(users):
      for k,v in i.items():
        data[k] = v
      print data 
  except urllib2.URLError, e:
    print str(e)
    sys.exit( 1 )
예제 #21
0
def start_work(socket):
    result = login(socket)
    if result is None:
        print('login first')
        return
    request = washer_pb2.Start_Work_Request()
    request.city_code = 179
    request.longitude = 120.025806
    request.latitude = 30.246185
    common.send(socket, common_pb2.START_WORK, request)
    print("start work, protocol:{}".format(common_pb2.START_WORK))
    while True:
        body = common.get(socket)
        if not body:
            break
        else:
            (protocol, data) = body
            if protocol == common_pb2.START_WORK:
                print("protocol: start_work")
                response = washer_pb2.Start_Work_Response()
                response.ParseFromString(data)
                print(response)

                print("protocol: fresh_location")
                request = washer_pb2.Fresh_Location_Request()
                request.city_code = 179
                request.longitude = 120.025806
                request.latitude = 30.246185
                common.send(socket, common_pb2.FRESH_LOCATION, request)
                break

            elif protocol == common_pb2.ALLOCATE_ORDER:
                print("protocol: allocate_order")
                response = order_pb2.Allocate_Order_Push()
                response.ParseFromString(data)
                print(response)

                print("protocol: processing_order")
                request = order_pb2.Processing_Order_Request()
                request.order_id = response.order_id
                common.send(socket, common_pb2.PROCESSING_ORDER, request)

                print("protocol: finish_order")
                request = order_pb2.Finish_Order_Request()
                request.order_id = response.order_id
                common.send(socket, common_pb2.FINISH_ORDER, request)

            elif protocol == common_pb2.FINISH_ORDER:
                print("protocol: finish_order")
                response = order_pb2.Finish_Order_Response()
                response.ParseFromString(data)
                print(response)
예제 #22
0
def fresh_location(socket):
    pb = washer_pb2.Fresh_Location_Request()
    pb.longitude = 120.21937542
    pb.latitude = 30.25924446
    pb.city_code = 179

    common.send(socket, common_pb2.FRESH_LOCATION, pb)
    (protocol, body) = common.get(socket)
    if body:
        resp = washer_pb2.Fresh_Location_Response()
        resp.ParseFromString(body)
        print('fresh_location result:')
        print(resp.error_code)
예제 #23
0
def get_config_types(ambari_url, user, password, cluster_name,
                     connection_timeout):
    r = get(
        ambari_url, user, password,
        '/api/v1/clusters/{0}?fields=Clusters/desired_configs'.format(
            cluster_name), connection_timeout)
    assert_return_code(r, 200, 'cluster config types')
    config = json.loads(r.content)
    config_types = parse_config(
        r, config,
        lambda conf: conf['Clusters']['desired_configs'] is not None,
        lambda conf: conf['Clusters']['desired_configs'])
    return config_types
예제 #24
0
def get_second_info(url, referer):
    """
    访问二级类别的详细页面,获取初次加载的文章url以及
    后续文章url的请求地址
    """
    print('get_second_info url is: ', url)
    header = common.Iheader
    header['referer'] = referer
    header['Connection'] = 'keep-alive'
    header['Host'] = 'www.zhihu.com'
    header['Upgrade-Insecure-Requests'] = '1'
    html = common.get(url, header=header)
    return html
예제 #25
0
def main():
    ENVIRONMENTS.extend(common.get('/environments').json())
    test_table()
    prepare_users()

    with ProcessPoolExecutor(
            max_workers=os.cpu_count(),
            mp_context=mp.get_context('fork'),
    ) as executor:
        try:
            common.login()
            run_tests(executor)
            executor.shutdown(wait=True)
        finally:
            common.logout()
def create_sequencer_configuration(key,
                                   base_url,
                                   request_form_filename,
                                   sample_form_filename,
                                   request_type_filename,
                                   email_addresses,
                                   return_formatted=True):
    #create request_form
    data = {}
    data['xml_text'] = open(request_form_filename).read()
    request_form = submit(key,
                          "%sforms" % base_url,
                          data,
                          return_formatted=False)[0]
    #create sample_form
    data = {}
    data['xml_text'] = open(sample_form_filename).read()
    sample_form = submit(key,
                         "%sforms" % base_url,
                         data,
                         return_formatted=False)[0]
    #get user ids
    user_ids = [
        user['id'] for user in get(key, "%susers" % base_url)
        if user['email'] in email_addresses
    ]
    #create role, assign to user
    data = {}
    data['name'] = "request_type_role_%s_%s_%s name" % (
        request_form['id'], sample_form['id'], '_'.join(email_addresses))
    data['description'] = "request_type_role_%s_%s_%s description" % (
        request_form['id'], sample_form['id'], '_'.join(email_addresses))
    data['user_ids'] = user_ids
    role_ids = [
        role['id'] for role in submit(
            key, "%sroles" % base_url, data, return_formatted=False)
    ]
    #create request_type
    data = {}
    data['request_form_id'] = request_form['id']
    data['sample_form_id'] = sample_form['id']
    data['role_ids'] = role_ids
    data['xml_text'] = open(request_type_filename).read()
    return submit(key,
                  "%srequest_types" % base_url,
                  data,
                  return_formatted=return_formatted
                  )  #create and print out results for request type
예제 #27
0
def login(socket):
    washer = washer_pb2.Login_Request()
    washer.phone = WASHER_PHONE
    washer.password = '******'
    washer.signature = 'signature'
    #washer.city_code = 179
    #washer.longitude = 120.025806
    #washer.latitude  = 30.246185
    common.send(socket, common_pb2.LOGIN, washer)
    (protocol, body) = common.get(socket)
    if body:
        washerResponse = washer_pb2.Login_Response()
        washerResponse.ParseFromString(body)
        print(washerResponse)
        return True
    print('login failure')
def users_list():
  try:
    users = api.get(key, users_url)
    data = {}
    for user in users:
      if (user['email'] == "$ARGV[0]@uab.edu"):
        blazerid = user['email'].partition("@")[0].lower()
        userid = user['id']
        response = create_lib(blazerid)
        print type(response)
        print len(response)
        print response
        set_lib_perms(response['id'], userid)
  except urllib2.URLError, e:
    print str(e)
    sys.exit( 1 )
예제 #29
0
 def update_data(self, user_id):
   print 'get imgs of user ' + user_id
   album_list = self._get_album(user_id)
   for album in album_list:
     print 'get imgs in album ' + album + ' of user ' + user_id
     img_list = self._get_img_list(user_id, album)
     if not img_list:
       continue
     path = img_data_path + user_id + '/' + album + '/'
     if not os.path.exists(path):
       os.makedirs(path)
     for img_id, img_url in img_list.iteritems():
       filename = path + img_id + '.jpg'
       if os.path.exists(filename):
         continue
       with open(filename, 'wb') as ofile:
         ofile.write(get(img_url, {}))
예제 #30
0
 def update_data(self, user_id):
   print 'get voices of user ' + user_id
   album_list = self._get_album(user_id)
   for album in album_list:
     print 'get voices in album ' + album + ' of user ' + user_id
     voice_list = self._get_voice_list(user_id, album)
     if not voice_list:
       continue
     path = voice_data_path + user_id + '/' + album + '/'
     if not os.path.exists(path):
       os.makedirs(path)
     for voice_url in voice_list:
       filename = path + voice_url[voice_url.rfind('/'):] + '.mp3'
       if os.path.exists(filename):
         continue
       with open(filename, 'wb') as ofile:
         ofile.write(get(voice_url, {}))
예제 #31
0
def get_stack_versions(protocol,
                       host,
                       port,
                       context_path,
                       username,
                       password,
                       cluster_name,
                       connection_timeout=10):
    ambari_url = get_ambari_url(protocol, host, port, context_path)
    path = '/api/v1/clusters/{0}/stack_versions/1'.format(cluster_name)
    resp = get(ambari_url, username, password, path, connection_timeout)
    assert_return_code(resp, 200, 'cluster stack versions')
    config = json.loads(resp.content)
    stack_version = parse_config(
        resp, config, lambda conf: conf['ClusterStackVersions'] is not None,
        lambda conf: conf['ClusterStackVersions'])
    return stack_version['stack'], stack_version['version']
예제 #32
0
    def get_comment_info(self, artical_id, offset=0, limit=20, referer=None):
        """
        获取评论信息
        """
        host = 'https://www.zhihu.com/api/v4/articles/'
        url = (host + artical_id + '/comments?include='
               'data%5B*%5D.author%2Ccollapsed%2Creply_to_author'
               '%2Cdisliked%2Ccontent%2Cvoting%2Cvote_count'
               '%2Cis_parent_author%2Cis_author%2Calgorithm_right'
               '&order=normal&limit=' + str(limit) + '&offset=' + str(offset) +
               '&status=open')
        header = None
        if not referer:
            header = common.Iheader
            header['referer'] = referer
            header['origin'] = 'https://zhuanlan.zhihu.com'

        return common.get(url, True, header)
예제 #33
0
 def _crawl(self, token_list, user_id, until_time):
   response = []
   page = 1
   token = token_list[-1]
   if until_time is not None:
     conv_until_time = conv(until_time)
   while True:
     new_response = json.loads(get(url + self.name + '/list', [
       ('access_token', token),
       ('ownerId', user_id),
       ('pageSize', str(self.page_size)),
       ('pageNumber', str(page)),
     ]))
     if 'error' in new_response and new_response['error'] == 403:
       token_list.pop()
       if not token_list:
         print 'Error - All token can not be used now.'
         return None
       token = token_list[-1]
       continue
     if 'response' not in new_response:
       break
     new_response = new_response['response']
     if not new_response:
       break
     if until_time is not None:
       try:
         if conv(new_response[-1]['createTime']) <= conv_until_time:
           for item in new_response:
             if item['createTime'] <= conv_until_time:
               break
           response.append(item)
           return response
       except Exception, e:
         pass
       try:
         if conv2(new_response[-1]['createTime']) <= conv_until_time:
           for item in new_response:
             if item['createTime'] <= conv_until_time:
               break
           response.append(item)
           return response
       except Exception, e:
         pass
예제 #34
0
def register(socket):
    authcode = verify_authcode(socket)
    if not authcode:
        return
    pb = washer_pb2.Register_Request()
    pb.phone = WASHER_PHONE
    pb.authcode = authcode
    pb.password = '******'
    pb.password2 = 'iwasher'
    pb.nick = 'iwasher'
    pb.signature = 'signature'
    pb.type = common_pb2.PERSONAL
    common.send(socket, common_pb2.REGISTER, pb)
    (protocol, body) = common.get(socket)
    if body:
        res = washer_pb2.Register_Response()
        res.ParseFromString(body)
        print(res)
        print('finish register')
예제 #35
0
def verify_authcode(socket):
    authcode = request_authcode(socket)
    if not authcode:
        print('get request authcode failure..')
        return
    verifyAuthcode = washer_pb2.Verify_Authcode_Request()
    verifyAuthcode.phone = WASHER_PHONE
    verifyAuthcode.authcode = authcode
    verifyAuthcode.signature = 'signature'
    common.send(socket, common_pb2.VERIFY_AUTHCODE, verifyAuthcode)
    (protocol, body) = common.get(socket)
    if body:
        va = washer_pb2.Verify_Authcode_Response()
        va.ParseFromString(body)
        if va.error_code == common_pb2.SUCCESS:
            print("verify authcode success:%s" % va.error_code)
            return authcode
    print('verify authcode failure')
    return False
예제 #36
0
def read_data():
    nodes = get_node_list()
    value = ''
    has_read = False
    i = 0
    while not has_read:
        try:
            value = get(nodes[i][1])
            has_read = True
        except Exception as e:
            i += 1
            if i >= len(nodes):
                value = e
                break
    data = ''
    if value is not None:
        msg = pickle.loads(value)
        if msg is not None and msg[1] is not None:
            data = pickle.loads(msg[1])
    return data
예제 #37
0
def main(options):
    api_key = options.api
    from_tool_shed = options.from_tool_shed.rstrip('/')
    to_tool_shed = options.to_tool_shed.rstrip('/')
    # Get the categories from the specified Tool Shed.
    url = '%s/api/categories' % from_tool_shed
    category_dicts = get(url)
    create_response_dicts = []
    for category_dict in category_dicts:
        name = category_dict.get('name', None)
        description = category_dict.get('description', None)
        if name is not None and description is not None:
            data = dict(name=name,
                        description=description)
            url = '%s/api/categories' % to_tool_shed
            try:
                response = submit(url, data, api_key)
            except Exception as e:
                response = str(e)
                print("Error attempting to create category using URL: ", url, " exception: ", e)
            create_response_dict = dict(response=response)
            create_response_dicts.append(create_response_dict)
def create_sequencer_configuration( key, base_url, request_form_filename, sample_form_filename, request_type_filename, email_addresses, return_formatted=True ):
    # create request_form
    data = {}
    data[ 'xml_text' ] = open( request_form_filename ).read()
    request_form = submit( key, "%sforms" % base_url, data, return_formatted=False )[0]
    # create sample_form
    data = {}
    data[ 'xml_text' ] = open( sample_form_filename ).read()
    sample_form = submit( key, "%sforms" % base_url, data, return_formatted=False )[0]
    # get user ids
    user_ids = [ user['id'] for user in get( key, "%susers" % base_url ) if user['email'] in email_addresses ]
    # create role, assign to user
    data = {}
    data[ 'name' ] = "request_type_role_%s_%s_%s name" % ( request_form['id'], sample_form['id'], '_'.join( email_addresses ) )
    data[ 'description' ] = "request_type_role_%s_%s_%s description" % ( request_form['id'], sample_form['id'], '_'.join( email_addresses ) )
    data[ 'user_ids' ] = user_ids
    role_ids = [ role[ 'id' ] for role in submit( key, "%sroles" % base_url, data, return_formatted=False ) ]
    # create request_type
    data = {}
    data[ 'request_form_id' ] = request_form[ 'id' ]
    data[ 'sample_form_id' ] = sample_form[ 'id' ]
    data[ 'role_ids' ] = role_ids
    data[ 'xml_text' ] = open( request_type_filename ).read()
    return submit( key, "%srequest_types" % base_url, data, return_formatted=return_formatted )  # create and print out results for request type
예제 #39
0
def main( options ):
    api_key = options.api
    from_tool_shed = options.from_tool_shed.rstrip( '/' )
    to_tool_shed = options.to_tool_shed.rstrip( '/' )
    # Get the users from the specified Tool Shed.
    url = '%s/api/users' % from_tool_shed
    user_dicts = get( url )
    create_response_dicts = []
    for user_dict in user_dicts:
        username = user_dict.get( 'username', None )
        if username is not None:
            email = '*****@*****.**' % username
            password = '******'
            data = dict( email=email,
                         password=password,
                         username=username )
            url = '%s/api/users' % to_tool_shed
            try:
                response = submit( url, data, api_key )
            except Exception, e:
                response = str( e )
                print "Error attempting to create user using URL: ", url, " exception: ", str( e )
            create_response_dict = dict( response=response )
            create_response_dicts.append( create_response_dict )
import sys, time
from common import get
from numpy import array, mean, ptp
from enthought.mayavi.mlab import points3d, show

if len(sys.argv) != 2:
    print "Usage:", sys.argv[0], "<service number>"
    sys.exit(1)

service = sys.argv[1]

stops = get('servicestops', service).nodes['serviceStop']

points = []

for stop in stops:
    lat = float(str(stop.lat))
    long = float(str(stop.long))
    departures = get('stopdepartures', str(stop.naptan))
    if departures.nodes.has_key('stopDeparture'):
        for departure in departures.nodes['stopDeparture']:
            if str(departure.service) == service:
                t_string = str(departure.departure_time)
                t_struct = time.strptime(t_string, "%Y-%m-%dT%H:%M")
                t_float = time.mktime(t_struct)
                if t_struct.tm_hour < 3:
                    t_float += 24*60*60
                points.append([lat, long, t_float])

def normalise(data):
    return (data - mean(data)) / (ptp(data)/2)
예제 #41
0
#!/usr/bin/env python

from common import get

notifications = get('/repos/grpc/grpc/notifications').json()
for notification in notifications:
  if not notification['unread']:
    continue
  if notification['reason'] == 'mention':
    issue_number = notification['subject']['url'].split('/')[-1]
    print 'https://github.com/grpc/grpc/issues/{}'.format(issue_number)
예제 #42
0
def mark_video_as_viewed(episodeId, lenght, tp):
    xbmc.log('Mark viewed:%s' % (episodeId), 4)
    common.get('?usecase=UpdateViewTime&t=%s&eid=%s' % (str(lenght), str(episodeId)), tp)
예제 #43
0
def get_all_episodes_xml(id, tp):
    return common.get("Series/" + id + "/XML/", tp)
    return dataset_state in [ 'ok', 'error' ]

if __name__ == '__main__':
    parser = optparse.OptionParser()
    parser.add_option( '-k', '--key', dest='api_key', action='store', type="string", default=None, help='API Key.' )
    parser.add_option( '-u', '--url', dest='base_url', action='store', type="string", default='http://localhost:8080', help='Base URL of Galaxy Server' )
    parser.add_option( '-d', '--dbkey', dest='dbkeys', action='append', type="string", default=[], help='List of dbkeys to download and Index' )
    parser.add_option( '-s', '--sleep_time', dest='sleep_time', action='store', type="int", default=DEFAULT_SLEEP_TIME, help='How long to sleep between check loops' )
    (options, args) = parser.parse_args()

    # check options
    assert options.api_key is not None, ValueError( 'You must specify an API key.' )
    assert options.dbkeys, ValueError( 'You must specify at least one dbkey to use.' )

    # check user is admin
    configuration_options = get( options.api_key, urljoin( options.base_url, 'api/configuration' ) )
    if 'library_import_dir' not in configuration_options:  # hack to check if is admin user
        print("Warning: Data Managers are only available to admin users. The API Key provided does not appear to belong to an admin user. Will attempt to run anyway.")

    # Fetch Genomes
    dbkeys = {}
    for dbkey in options.dbkeys:
        if dbkey not in dbkeys:
            dbkeys[ dbkey ] = run_tool( FETCH_GENOME_TOOL_ID, None, { 'dbkey': dbkey, 'reference_source|reference_source_selector': 'ucsc', 'reference_source|requested_dbkey': dbkey }, options.api_key, options.base_url, wait=False )
        else:
            "dbkey (%s) was specified more than once, skipping additional specification." % ( dbkey )

    print('Genomes Queued for downloading.')

    # Start indexers
    indexing_tools = []
예제 #45
0
    sys.exit('API_KEY not set, see the README.txt file')

# Clone the galaxy git repository and replace
# YOUR_GALAXY_PATH with the clone's local path in the following code, e.g.:
#   cd /tmp
#   git clone https://github.com/galaxyproject/galaxy
#   GALAXY_PATH = '/tmp/galaxy'

GALAXY_PATH = 'YOUR_GALAXY_PATH'
sys.path.insert(1, os.path.join(GALAXY_PATH, 'scripts/api'))
import common  # noqa: E402,I100,I202

# Select "W5 - Metagenomics" from published workflows

workflow_name = 'W5 - Metagenomics'
workflows = common.get(API_KEY, '%s/workflows?show_published=True' % API_URL)
w = [_ for _ in workflows if _['published'] and _['name'] == workflow_name]
assert len(w) == 1
w = w[0]

# Import the workflow to user space

data = {'workflow_id': w['id']}
iw = common.post(API_KEY, '%s/workflows/import' % API_URL, data)
iw_details = common.get(API_KEY, '%s/workflows/%s' % (API_URL, iw['id']))

# Select the "Orione SupMat" library

library_name = 'Orione SupMat'
libraries = common.get(API_KEY, '%s/libraries' % API_URL)
l = [_ for _ in libraries if _['name'] == library_name]
예제 #46
0
def get_new_series_xml(tp):
    xbmc.log("get new series list", 3)
    return common.get("List/new/XML/", tp)
예제 #47
0
def get_my_new_series_xml(tp):
    return common.get("List/mynew/XML/", tp)
예제 #48
0
def show_episode(pv):
    s = common.get("EpisodeLink/" + pv['id'] + "/XML/", pv['tp'])
    if s is None:
        common.show_message(common.lang(30003), common.lang(30004))
        return False

    d = xml.dom.minidom.parseString(s)
    i = d.getElementsByTagName('item')
    if i and len(i) > 0:
        snum = int(common.getVal(i[0], 'snum'))
        enum = int(common.getVal(i[0], 'enum'))
        vnum = common.getVal(i[0], 'vnum')
        lenght = common.getVal(i[0], 'tl')
        title = common.getVal(i[0], 'title')
        etitle = common.getVal(i[0], 'etitle')

        smark = common.getVal(i[0], 'smark')
        server = common.getVal(i[0], 'server')
        series = common.getVal(i[0], 'series')

        if pv['tp'] == 'uaj':
            suburl = "http://" + server + "/content/" + series + "/"
            scurl = suburl + smark + ".jpg"
            videourl = common.getVal(i[0], 'vurl')
            sub_f = 0
        else:
            scurl = common.getVal(i[0], 'scurl')
            suburl = common.getVal(i[0], 'suburl')
            videourl = common.getVal(i[0], 'videourl')
            sub_f = int(common.getVal(i[0], 'sub_f'))

        sub_en = int(common.getVal(i[0], 'sub_en'))
        sub_ru = int(common.getVal(i[0], 'sub_ru'))
        tp = int(common.getVal(i[0], 'tp'))

        ftitle = common.fTitle(snum, vnum, title, etitle)

        item = xbmcgui.ListItem(ftitle, iconImage=scurl, thumbnailImage=scurl)
        item.setInfo(type='video', infoLabels={
            'id': "hdout_tv_episode_" + pv['tp'] + "_" + pv['id'],
            'title': ftitle,
            'season': snum,
            'episode': enum})
        player = xbmc.Player()
        player.play(videourl, item)
        wait_for_playng_start(player, 40)
        if wait_for_playng(player, videourl, 5):
            # xbmc.sleep(3000)

            if pv['tp'] == 'uaj':
                sub = int(common.config.getSetting('subuaj'))
                if sub == 1 and sub_ru == 1:
                    common.append_subtitle(smark, "1", suburl)
                elif sub == 2 and sub_en == 1:
                    common.append_subtitle(smark, "2", suburl)
            else:
                sub = int(common.config.getSetting('subhd'))
                if sub == 1 and sub_ru == 1:
                    common.append_subtitle(smark, "ru", suburl)
                elif sub == 2 and sub_en == 1:
                    common.append_subtitle(smark, "en", suburl)
                elif sub_f == 1:
                    common.append_subtitle(smark, "f", suburl)
            if wait_for_playng(player, videourl, 10):
                mark_video_as_viewed(pv['id'], lenght, pv['tp'])
    else:
        e = d.getElementsByTagName('error')
        if e and len(e) > 0:
            et = common.getVal(e[0], "type")
            if type == "notfound":
                common.show_message(common.lang(30003), common.lang(30006))
                return False
            elif type == "nomoney":
                common.show_message(common.lang(30003), common.lang(30007))
                return False
            else:
                common.show_message(common.lang(30003), common.lang(30008))
                return False
        else:
            common.show_message(common.lang(30003), common.lang(30008))
            return False
    xbmc.log("End show episode", 4)
    return True
def get_dataset_state( hda_id, api_key, galaxy_url ):
    datasets_url = urljoin( galaxy_url, 'api/datasets/%s' % hda_id )
    dataset_info = get( api_key, datasets_url )
    return dataset_info['state']
예제 #50
0
def get_all_series_xml(tp):
    return common.get("List/all/XML/", tp)