Пример #1
0
def history_etf(code, datefrom='', dateto='', timestamp=''):
    host = 'https://stock.finance.sina.com.cn/fundInfo/api/openapi.php/CaihuiFundInfoService.getNav?'
    url = host + 'symbol=' + code + '&datefrom=' + datefrom + '&dateto=' + dateto + '&_=' + timestamp

    def check_code(jdata):
        code = -1
        if ('result' in jdata) and ('status' in jdata['result']) and (
                'code' in jdata['result']['status']):
            code = jdata['result']['status']['code']
        return code

    jdata = util.http_get(url + '&page=1')
    code = check_code(jdata)
    if check_code(jdata) != 0:
        raise Exception(u'请求失败,返回的json种code异常:' + str(code))
    total_num = int(jdata['result']['data']['total_num'])
    max_page = (total_num + 19) / 20
    dlist = jdata['result']['data']['data']
    dlist.reverse()
    for page in range(2, max_page + 1):
        print 'page:', page, '/', max_page
        jdata = util.http_get(url + '&page=' + str(page))
        if check_code(jdata) != 0:
            raise Exception(u'请求失败,返回的json种code异常:' + str(code))
        if ('result' in jdata) and ('data' in jdata['result']) and (
                'data' in jdata['result']['data']):
            pagedata = jdata['result']['data']['data']
            #print pagedata
            pagedata.reverse()
            dlist = pagedata + dlist
        if page % 5 == 0:
            time.sleep(1)
    return dlist
Пример #2
0
    def handle(self, handler_input):
        # type: (HandlerInput) -> Response
        response_builder = handler_input.response_builder
        attr = handler_input.attributes_manager.session_attributes
        filled_slots = handler_input.request_envelope.request.intent.slots
        slot_values = util.get_slot_values(filled_slots)

        conn_devs = attr["connected_devices"]
        device = slot_values["Device"]["resolved"]
        resource = slot_values["Resource"]["resolved"]
        dev = next((dev for dev in conn_devs if dev['deviceID'] == device),
                   None)
        end_point = dev["end_point"]
        units = dev["units"]

        try:
            measure = util.http_get(end_point + '/get_measure/' +
                                    resource)[resource]
            if measure:
                speech = data.GET_MEASURE.format(resource, device, measure,
                                                 units[resource])
            else:
                speech = data.RESOURCE_NOT_FOUND.format(device, resource)
        except:
            speech = data.CONNECTION_ERR.format("measure")

        response_builder.speak(speech)
        response_builder.ask(data.REPROMPT)
        return response_builder.response
Пример #3
0
 def szse_json():
     url = 'http://www.szse.cn/api/report/index/overview/onepersistenthour/szse'
     jsdata = util.http_get(url)
     #print jsdata
     if ('code' in jsdata) and (jsdata['code'] == 0):
         return jsdata
     raise Exception(jsdata['message'])
Пример #4
0
 def __get_socket_server_url(self, roomid):
     cid_info_xml = str(util.http_get(self.__CID_INFO_URL + roomid))
     start = cid_info_xml.find("<server>") + len("<server>")
     end = cid_info_xml.find("</server>", start)
     if 0 < start < end:
         socket_server_url = cid_info_xml[start:end]
         return socket_server_url
     else:
         return None
Пример #5
0
def get_allergens_and_ingredients(link):
  html = http_get(link)
  html = html.find_all(face="arial", size="2")[-2:]

  # edge case where no nutrition info is avaiable
  if len(html) <= 1:
    return "n/a", "n/a"

  allergens = html[0].contents[1].string if len(html[0].contents) > 1 else ""
  ingredients = html[1].contents[1].string if len(html[1].contents) > 1 else ""

  return allergens, ingredients
Пример #6
0
def get_allergens_and_ingredients(link):
    html = http_get(link)
    html = html.find_all(face="arial", size="2")[-2:]

    # edge case where no nutrition info is avaiable
    if len(html) <= 1:
        return "n/a", "n/a"

    allergens = html[0].contents[1].string if len(html[0].contents) > 1 else ""
    ingredients = html[1].contents[1].string if len(
        html[1].contents) > 1 else ""

    return allergens, ingredients
Пример #7
0
    def dropEvent(self, event):
        url = event.mimeData().urls()[0]

        if url.scheme() == "file":
            self.from_file(url.path())
            return

        try:
            self.cover_data = util.http_get(url.toString())
            self._set_cover()
        except Exception as e:
            QMessageBox.critical(self, "Error",
                                 f"Error downloading {url}: {e}.")
Пример #8
0
def get_server_addr():
    proxy_url = conf.api['bvc_proxy']
    for i in range(10):
        try:
            res_proxy = json.loads(util.http_get(proxy_url))
            server_json = random.choice(res_proxy['result']['ServerInfo'])
            addr = server_json['Server']['service_addr'][0]
            break
        except Exception as e:
            util.print_err(e)
            util.print_err("getting proxy url...")
            time.sleep(1)
    ip = addr.split(':')[0]
    port = 40077
    url = "http://%s:%s/1" % (ip, port)
    return url
Пример #9
0
def crawl():
  # make web request
  soup = http_get(BASE_URL + MENU_URL)
  # locate html data
  html = soup.body.contents[-2].table.tbody.contents[3].td.table.contents

  # stores food that has already been added to the table
  food_cache = {}

  # extract data
  for MEAL in MEALS:
    meal_index = MEALS[MEAL]
    meal_data = html[meal_index]

    for DINING_COMMON in DINING_COMMONS:
      dc_index = DINING_COMMONS[DINING_COMMON]
      if len(meal_data.contents) <= dc_index:
        break
      meal_dc_data = meal_data.contents[dc_index]

      for entry in meal_dc_data.find_all('a'):
        meal_name = entry.contents[0].string
        meal_name, gluten_free = truncate_meal_name(meal_name)

        # skip the "Nutritive Analysis" link
        if 'nutritive analysis' in meal_name.lower():
          continue

        # create database models object
        if meal_name in food_cache:  
          food_obj = food_cache[meal_name]
        else: # food is not located in local cache
            # check if food is in database
          food_obj = Food.query.filter_by(name=meal_name).first()
            # not found in database, crawl page
          if food_obj is None:
            food_obj = extract_food_info(entry)
            db.session.add(food_obj)

            # add food to the cache
          food_cache[meal_name] = food_obj

        menu_obj = Menu(date = get_date(), location = LOCATION_TO_ENUM[DINING_COMMON], \
        meal = MEAL_TO_ENUM[MEAL], food = food_obj)

        db.session.add(menu_obj)
  db.session.commit()
Пример #10
0
def crawl():
    # make web request
    soup = http_get(BASE_URL + MENU_URL)
    # locate html data
    html = soup.body.contents[-2].table.tbody.contents[3].td.table.contents

    # stores food that has already been added to the table
    food_cache = {}

    # extract data
    for MEAL in MEALS:
        meal_index = MEALS[MEAL]
        meal_data = html[meal_index]

        for DINING_COMMON in DINING_COMMONS:
            dc_index = DINING_COMMONS[DINING_COMMON]
            if len(meal_data.contents) <= dc_index:
                break
            meal_dc_data = meal_data.contents[dc_index]

            for entry in meal_dc_data.find_all('a'):
                meal_name = entry.contents[0].string
                meal_name, gluten_free = truncate_meal_name(meal_name)

                # skip the "Nutritive Analysis" link
                if 'nutritive analysis' in meal_name.lower():
                    continue

                # create database models object
                if meal_name in food_cache:
                    food_obj = food_cache[meal_name]
                else:  # food is not located in local cache
                    # check if food is in database
                    food_obj = Food.query.filter_by(name=meal_name).first()
                    # not found in database, crawl page
                    if food_obj is None:
                        food_obj = extract_food_info(entry)
                        db.session.add(food_obj)

                        # add food to the cache
                    food_cache[meal_name] = food_obj

                menu_obj = Menu(date = get_date(), location = LOCATION_TO_ENUM[DINING_COMMON], \
                meal = MEAL_TO_ENUM[MEAL], food = food_obj)

                db.session.add(menu_obj)
    db.session.commit()
Пример #11
0
    def handle(self, handler_input):
        # type: (HandlerInput) -> Response
        logger.info("In LaunchRequestHandler")

        connected_devices = util.get_connected_devices()
        conn_devicesID = [dev["deviceID"] for dev in connected_devices]

        broker = util.http_get(data.CATALOG_URL + data.USER_ID + '/broker')

        attr = handler_input.attributes_manager.session_attributes
        attr["connected_devices"] = connected_devices
        attr["broker"] = broker
        handler_input.attributes_manager.session_attributes = attr

        handler_input.response_builder.speak(data.WELCOME_MESSAGE).ask(
            data.HELP_MESSAGE).add_directive(
                util.refresh_devices_slot(conn_devicesID))
        return handler_input.response_builder.response
Пример #12
0
def fetch(symbols, cmd, params={}):
    if cmd not in cmd_map.keys():
        raise Exception(u'请求命令错误, 请参考cmd_map')
    info = cmd_map[cmd]
    url = info[0]
    new_param = info[1].copy()
    need_token = info[2]
    new_param.update(params)
    url += symbols
    for (k,v) in new_param.items():
        url += '&' + k + '=' + str(v)
    headers = {'Host': 'stock.xueqiu.com',
        'Accept': 'application/json',
        'User-Agent': 'Xueqiu iPhone 11.8',
        'Accept-Language': 'zh-Hans-CN;q=1, ja-JP;q=0.9',
        'Accept-Encoding': 'br, gzip, deflate',
        'Connection': 'keep-alive'}
    if need_token is True:
        headers['Cookie'] = get_token()
    print(url)
    return util.http_get(url, headers)
Пример #13
0
def bml_map(line, download_dir_name, merged_dir_name):
    #try:
        fs = line.strip().split('\t')
        pics = fs[0:3]
	title = fs[4]
	brand = fs[5]
	audio_url = fs[7]
	ideaid = fs[-1]
        m2 = hashlib.md5()   
        output_dir = download_dir_name
        url_names = []
        pic_file_names = []
        pic_local_paths = []
	q_all=[]	
        ready_pics = 0
        for index,p in enumerate(pics):
            m2.update(p + 'sunfuhao')
            file_name = m2.hexdigest() 
            fn_suf = file_name+'.jpg' 
            output_path = os.path.join(output_dir, fn_suf)
            url_names.append(p)
            pic_file_names.append(fn_suf)
            pic_local_paths.append(output_path)

            #download
            if os.path.exists(output_path):
                util.print_err("dupulicated image %s" % (file_name))
                ready_pics+= 1
                continue
            img_data = None
            use_proxy = False
            for i in range(50):     
	    #while True:
                try:
                    img_data = util.http_get(p, use_proxy)
                    util.print_err("%s downloaded succeed" % p)
                    break
                except Exception as e:
                    util.print_err("%s %s" % (e,p))
                    use_proxy = not use_proxy
                    util.print_err("use proxy")
                    time.sleep(1)
                    continue
            if img_data is not None and len(img_data) > 1000:
		q0 = None
            	for i in range(30):     
                    try:
		        q0 = cvtools.req_clarity_bvc(img_data)
			if q0<0.3 :
			    return None
	   		q_all.append(q0)
			break
                    except Exception as e:
                        util.print_err("fail_clarity")
                        time.sleep(2)
                        continue
                with open(output_path, 'w') as fn:
                    fn.write(img_data)
                img1 = cv2.imread(output_path)
                img1 = cvtools.img_resize(img1, (370, 245))
                cv2.imwrite(output_path, img1)
                ready_pics += 1
            else:
                util.print_err("%s download failed!!!" % p)

        if ready_pics != 3:
            util.print_err("has not enough images %s" % (len(pic_file_names)))
            return

        img_name1,img_name2,img_name3 = pic_file_names[0],pic_file_names[1],pic_file_names[2]
        fimg1, fimg2, fimg3 = pic_local_paths
        img1, img2, img3 = cv2.imread(fimg1), cv2.imread(fimg2), cv2.imread(fimg3)
        res_dir_chaofen = merged_dir_name
	#make mapper_quality
	#for index,files in enumerate(pic_local_paths):
	#    name = "q"+str(index)
	#    for i in range(1, 500):
	#    #while True:
	#	try:
	#	    name = cvtools.img_quality_ht(files)
	#	    q_all.append(name)
	#	    #if name < 0.55:
	#		#return None
	#	    break
	#	except Exception as e:
	#	    util.print_err("%s %s" % (e,files))
	#	    time.sleep(10)
 
	#q0 = cvtools.img_quality_ht(fimg1)
        #q1 = cvtools.img_quality_ht(fimg2)
        #q2 = cvtools.img_quality_ht(fimg3)
	#print q0,q1,q2
	#if (q0< 0.55 or q1<0.55 or q2<0.55) :
	#    return None;
        fn_path1 = os.path.join(res_dir_chaofen, img_name1)
        fn_path2 = os.path.join(res_dir_chaofen, img_name2)
        fn_path3 = os.path.join(res_dir_chaofen, img_name3)
        
        cv2.imwrite(fn_path1, img1)
        cv2.imwrite(fn_path2, img2)
        cv2.imwrite(fn_path3, img3)
        c0 = cvtools.super_resolution(fn_path1, svr_url, is_local=False)
	h, w, d = c0.shape[:3]
        target_sz = (h, int(867), d)
        c2_p = cvtools.img_padding(c0, target_sz, dir='h', method='gblur')
        cv2.imwrite(fn_path1, c2_p)
	
        with open(fn_path1) as f:
             base1 = base64.b64encode(f.read())
        c0 = cvtools.super_resolution(fn_path2, svr_url, is_local=False)
	h, w, d = c0.shape[:3]
        target_sz = (h, int(867), d)
        c2_p = cvtools.img_padding(c0, target_sz, dir='h', method='gblur')
        cv2.imwrite(fn_path2, c2_p)
	
        with open(fn_path2) as f:
             base2 = base64.b64encode(f.read())
        c0 = cvtools.super_resolution(fn_path3, svr_url, is_local=False)
	h, w, d = c0.shape[:3]
        target_sz = (h, int(867), d)
        c2_p = cvtools.img_padding(c0, target_sz, dir='h', method='gblur')
        cv2.imwrite(fn_path3, c2_p)
        with open(fn_path3) as f:
             base3 = base64.b64encode(f.read())
	#fn = open("./log_res", 'a')
	#templtelist = ['feedinspireing2','Digital Zoom-3','Color Swipe-3','ElegantSlideshow-3']
	templtelist = ['99','98','97','96']
	templte = random.choice(templtelist)
	templte = "99" 
	basestr = base1+"\t"+base2+"\t"+base3+"\t"+"end after three pics"+"\n"
	prjson = '{"video_key":"%s","company":"%s","audio":["%s"],"pic_and_desc":[{"pic_binary":"%s","desc":"%s"},{"pic_binary":"%s","desc":"%s"},{"pic_binary":"%s","desc":"%s"}],"trade":[{"trade_id_1st":"%s","trade_name_1st":"feed"}],"ad_info":{"userid":"%s","planid":"123","unitid":"123","winfoid":"123"},"other_info":{"lp_url":""}}' %(ideaid,title,audio_url,base1,title,base2,brand,base3,title,templte,ideaid)
	#with open("./data/log_res",'a') as fn:
	#    fn.write(prjson)
	#fn.close()
	#if len(q_all) == 3: 
	#    print (prjson+"\t"+str(q_all[0])+"\t"+str(q_all[1])+"\t"+str(q_all[2]))
	#else:
	#    print (prjson+"\t"+"not_3pic")
	print prjson
Пример #14
0
                break
 
 

if __name__ == '__main__':
    
 
    O = {}
    NODE_KEY = "a88b92531ba974f68bc1fd5938fc77"
    NODE_DEBUG = 0
    SERVER = "http://w/uauc/playweb/"
    util.msg("PlayWeb Node 1.0")
    util.msg("Server:%s Key:%s Debug:%d" % (SERVER, NODE_KEY, NODE_DEBUG))
    util.msg("Listening server project...")
    while 1:
        r = util.http_get(SERVER + "/index.php?m=node&a=get_task")
        if r['data'] != " " :
            O = eval(util.decode_str(r['data'], NODE_KEY))
            break
        time.sleep(1)
    O['debug'] = NODE_DEBUG
    util.msg("[Project] Target:%s  Time:%s Module:%s  Thread:%s" % (O['target'], util.date(O['start_time']), O['module'], O['thread']), 1)
    O['target'] = "w"
    O['key'] = NODE_KEY
    #O['depth'] = 5  # notice
    O['server_url'] = SERVER + "?m=node&a="
    O['web-ports'] = util.csv2array(O['web-ports'])
    O['app-ports'] = util.csv2array(O['app-ports'])
    O['file-ext'] = util.csv2array(O['file-ext'])
    O['module'] = util.csv2array(O['module'])
    g.config(O)
Пример #15
0
def get_cd_info(rel, discid):
    discno = None
    album = rel.get("title")
    for medium in rel["medium-list"]:
        for disc in medium["disc-list"]:
            if disc["id"] == discid:
                discno = int(medium["position"])
                album = medium.get("title", album)
                break
        if discno:
            break
    else:
        raise Exception("could not find disc no")

    relid = rel["id"]
    ret = mb.get_release_by_id(
        relid, includes=["artists", "recordings", "media", "artist-credits"])
    rel = ret.get("release")

    date = rel["date"]
    fmts = [
        "%Y-%m-%d",
        "%Y-%m",
        "%Y",
    ]
    for fmt in fmts:
        try:
            year = datetime.datetime.strptime(date, fmt).year
            break
        except:
            pass
    else:
        print(f"Can't figure out album year: {date}")
        year = 1900

    set_size = rel["medium-count"]
    for medium in rel["medium-list"]:
        if int(medium["position"]) == discno:
            tracks = medium["track-list"]
            break
    else:
        raise Exception("cannot find tracks")

    atracks = []
    found_artists = set()
    for t in tracks:
        artists = t["artist-credit"]
        if len(artists) > 1:
            artist = "Various"
        else:
            artist = artists[0]["artist"]["name"]
            found_artists.add(artist)

        track = TrackInfo(
            artist=artist,
            album=album,
            title=t.get("title") or t["recording"]["title"],
            trackno=int(t["position"]),
        )
        atracks.append(track)

    album_artist = "Various"
    if len(found_artists) == 1:
        album_artist = list(found_artists)[0]

    atracks = sorted(atracks, key=lambda t: t.trackno)

    cover_art = None
    if rel.get("cover-art-archive").get("artwork") == "true":
        try:
            art = mb.get_image_list(relid)
            pos = 0
            for img in art["images"]:
                if not "Front" in img.get("types", []):
                    continue

                if not cover_art or pos == discno:
                    cover_art = util.http_get(img["image"])

                if pos == discno:
                    break

                pos += 1
        except Exception as e:
            util.print_error()
            pass

    return CDInfo(
        artist=album_artist,
        album=album,
        tracks=atracks,
        discno=discno,
        year=year,
        set_size=set_size,
        multi_artist=len(found_artists) > 1,
        cover_art=cover_art,
        disambiguation=rel.get("disambiguation"),
    )
Пример #16
0
            except Exception, e:
                #print str(e)
                break


if __name__ == '__main__':

    O = {}
    NODE_KEY = "a88b92531ba974f68bc1fd5938fc77"
    NODE_DEBUG = 0
    SERVER = "http://w/uauc/playweb/"
    util.msg("PlayWeb Node 1.0")
    util.msg("Server:%s Key:%s Debug:%d" % (SERVER, NODE_KEY, NODE_DEBUG))
    util.msg("Listening server project...")
    while 1:
        r = util.http_get(SERVER + "/index.php?m=node&a=get_task")
        if r['data'] != " ":
            O = eval(util.decode_str(r['data'], NODE_KEY))
            break
        time.sleep(1)
    O['debug'] = NODE_DEBUG
    util.msg(
        "[Project] Target:%s  Time:%s Module:%s  Thread:%s" %
        (O['target'], util.date(O['start_time']), O['module'], O['thread']), 1)
    O['target'] = "w"
    O['key'] = NODE_KEY
    #O['depth'] = 5  # notice
    O['server_url'] = SERVER + "?m=node&a="
    O['web-ports'] = util.csv2array(O['web-ports'])
    O['app-ports'] = util.csv2array(O['app-ports'])
    O['file-ext'] = util.csv2array(O['file-ext'])
Пример #17
0
    def __init__(self,
                 model_name_or_path: str = None,
                 modules: Iterable[nn.Module] = None,
                 device: str = None):
        if modules is not None and not isinstance(modules, OrderedDict):
            modules = OrderedDict([(str(idx), module)
                                   for idx, module in enumerate(modules)])

        if model_name_or_path is not None and model_name_or_path != "":
            logging.info("Load pretrained SentenceTransformer: {}".format(
                model_name_or_path))

            if '/' not in model_name_or_path and '\\' not in model_name_or_path and not os.path.isdir(
                    model_name_or_path):
                logging.info(
                    "Did not find a / or \\ in the name. Assume to download model from server"
                )
                model_name_or_path = __DOWNLOAD_SERVER__ + model_name_or_path + '.zip'

            if model_name_or_path.startswith(
                    'http://') or model_name_or_path.startswith('https://'):
                model_url = model_name_or_path
                folder_name = model_url.replace("https://", "").replace(
                    "http://", "").replace("/", "_")[:250]

                try:
                    from torch.hub import _get_torch_home
                    torch_cache_home = _get_torch_home()
                except ImportError:
                    torch_cache_home = os.path.expanduser(
                        os.getenv(
                            'TORCH_HOME',
                            os.path.join(
                                os.getenv('XDG_CACHE_HOME', '~/.cache'),
                                'torch')))
                default_cache_path = os.path.join(torch_cache_home,
                                                  'sentence_transformers')
                model_path = os.path.join(default_cache_path, folder_name)
                os.makedirs(model_path, exist_ok=True)

                if not os.listdir(model_path):
                    if model_url[-1] is "/":
                        model_url = model_url[:-1]
                    logging.info(
                        "Downloading sentence transformer model from {} and saving it at {}"
                        .format(model_url, model_path))
                    try:
                        zip_save_path = os.path.join(model_path, 'model.zip')
                        http_get(model_url, zip_save_path)
                        with ZipFile(zip_save_path, 'r') as zip:
                            zip.extractall(model_path)
                    except Exception as e:
                        shutil.rmtree(model_path)
                        raise e
            else:
                model_path = model_name_or_path

            #### Load from disk
            if model_path is not None:
                logging.info("Load SentenceTransformer from folder: {}".format(
                    model_path))
                with open(os.path.join(model_path, 'modules.json')) as fIn:
                    contained_modules = json.load(fIn)

                modules = OrderedDict()
                for module_config in contained_modules:
                    module_class = import_from_string(module_config['type'])
                    module = module_class.load(
                        os.path.join(model_path, module_config['path']))
                    modules[module_config['name']] = module

        super().__init__(modules)
        if device is None:

            device = 'cuda:{}'.format(
                args.gpu) if torch.cuda.is_available() else "cpu"
            logging.info("Use pytorch device: {}".format(device))
        self.device = torch.device(device)
        self.to(device)