def _findClientAttribute(self, index, CLIENT, listClient, mySheet, model): dicti = dict() ClientAttributes = [] dicti = { "index": index, "client": str(mySheet.cell_value(index, 1)), "Service-id": str(mySheet.cell_value(index, 50)), "service": str(mySheet.cell_value(index, 20)), "OMIPpba": str(mySheet.cell_value(index, 12)), "VPNIPpba": str(mySheet.cell_value(index, 34)), "HSIIPpba": str(mySheet.cell_value(index, 32)), "port": str(mySheet.cell_value(index, 15)), "OMvlan": str(mySheet.cell_value(index, 18)), "HSIvlan": str(mySheet.cell_value(index, 47)), "VPNvlan": str(mySheet.cell_value(index, 49)), "VOIPvlan": str(mySheet.cell_value(index, 48)), "Protocol_type": str(quote(mySheet.cell_value(index, 42).encode("utf-8"))), "Authentication_Key": mySheet.cell_value(index, 39), "Interface_Name": str(model._find_Interface_Name(mySheet, index)), "QOS": mySheet.cell_value(index, 19), "VpnDescription": str(mySheet.cell_value(index, 2).encode('utf-8')), "publicIP": str(mySheet.cell_value(index, 22)), "IPCpeHSIAdress": str(mySheet.cell_value(index, 28)), "VOIPIPpba": str(mySheet.cell_value(index, 33)), "loopbackOM": str(mySheet.cell_value(index, 13)), "IPOMCpeAdress": str(mySheet.cell_value(index, 11)), "LoopbackVOIP": str(mySheet.cell_value(index, 31)), "routes": mySheet.cell_value(index, 9), "IPVPNCpeAdress": str(mySheet.cell_value(index, 30)) } ClientAttributes.append(dicti) return ClientAttributes
def fetch_prices(symbol, out_name): """ Fetch daily stock prices for stock `symbol`, since 1980-01-01. Args: symbol (str): a stock abbr. symbol, like "GOOG" or "AAPL". Returns: a bool, whether the fetch is succeeded. """ # Format today's date to match Google's finance history api. now_datetime = datetime.now().strftime("%b+%d,+%Y") BASE_URL = "https://finance.google.com/finance/historical?output=csv&q={0}&startdate=Jan+1%2C+1980&enddate={1}" symbol_url = BASE_URL.format( urllib3.quote(symbol), urllib3.quote(now_datetime, '+') ) print ("Fetching {} ...".format(symbol)) print (symbol_url) try: http = urllib3.PoolManager() f = http.request('GET', symbol_url)#f = urllib3.urlopen(symbol_url) with open(out_name, 'w') as fin: print >> fin, f.read() except urllib3.HTTPError: print ("Failed when fetching {}".format(symbol)) return False data = pd.read_csv(out_name) if data.empty: print ("Remove {} because the data set is empty.".format(out_name)) os.remove(out_name) else: dates = data.iloc[:,0].tolist() print ("# Fetched rows: %d [%s to %s]" % (data.shape[0], dates[-1], dates[0])) # Take a rest sleep_time = random.randint(*RANDOM_SLEEP_TIMES) print ("Sleeping ... %ds" % sleep_time) time.sleep(sleep_time) return True
def copy(self, source, target): ''':raises: NoSuchFilesytemObjectError if source does not exist''' target = quote(target) self._get_client().copy(self.root + source, self.root + target, overwrite=True)
def get_rent_percommunity(communityname): url = BASE_URL + u"zufang/rs" + \ urllib3.quote(communityname.encode('utf8')) + "/" source_code = misc.get_source_code(url) soup = BeautifulSoup(source_code, 'lxml') if check_block(soup): return total_pages = misc.get_sh_total_pages(url) if total_pages == None: row = model.Rentinfo.select().count() raise RuntimeError("Finish at %s because total_pages is None" % row) for page in range(total_pages): if page > 0: url_page = BASE_URL + \ u"rent/d%drs%s/" % (page, urllib3.quote(communityname.encode('utf8'))) source_code = misc.get_source_code(url_page) soup = BeautifulSoup(source_code, 'lxml') i = 0 log_progress("GetRentByCommunitylist", communityname, page + 1, total_pages) data_source = [] nameList = soup.findAll("div", {"class": "info-panel"}) for name in nameList: i = i + 1 info_dict = {} try: info = name.find("a", {"name": "selectDetail"}) info_dict.update({u'title': info.get('title')}) info_dict.update({u'link': info.get('href')}) info_dict.update({u'houseID': info.get('key')}) where = name.find("div", {"class": "where"}) wheres = where.find_all("span") info_dict.update({u'region': wheres[0].get_text().strip()}) info_dict.update({u'zone': wheres[1].get_text().strip()}) info_dict.update({u'meters': wheres[2].get_text().strip()}) other = name.find("div", {"class": "con"}) info_dict.update({u'other': "".join(other.get_text().split())}) info_dict.update({u'subway': ""}) info_dict.update({u'decoration': ""}) info_dict.update({u'heating': ""}) price = name.find("div", {"class": "price"}) info_dict.update( {u'price': int(price.span.get_text().strip())}) pricepre = name.find("div", {"class": "price-pre"}) info_dict.update( {u'pricepre': "".join(pricepre.get_text().split())}) except: continue # Rentinfo insert into mysql data_source.append(info_dict) # model.Rentinfo.insert(**info_dict).upsert().execute() with model.database.atomic(): model.Rentinfo.insert_many(data_source).upsert().execute() time.sleep(1)
def get_house_percommunity(communityname): url = BASE_URL + u"ershoufang/rs" + \ urllib3.quote(communityname.encode('utf8')) + "/" source_code = misc.get_source_code(url) soup = BeautifulSoup(source_code, 'lxml') if check_block(soup): return total_pages = misc.get_sh_total_pages(url) if total_pages == None: row = model.Houseinfo.select().count() raise RuntimeError("Finish at %s because total_pages is None" % row) for page in range(total_pages): if page > 0: url_page = BASE_URL + \ u"ershoufang/d%drs%s/" % (page, urllib3.quote(communityname.encode('utf8'))) source_code = misc.get_source_code(url_page) soup = BeautifulSoup(source_code, 'lxml') nameList = soup.findAll("div", {"class": "info"}) i = 0 log_progress("GetHouseByCommunitylist", communityname, page + 1, total_pages) data_source = [] hisprice_data_source = [] for name in nameList: # per house loop i = i + 1 info_dict = {} try: housetitle = name.find("div", {"class": "prop-title"}) info_dict.update({u'title': housetitle.a.get('title')}) info_dict.update({u'link': housetitle.a.get('href')}) info_dict.update({u'houseID': housetitle.a.get('key')}) houseaddr = name.find("span", {"class": "info-col row1-text"}) info = houseaddr.get_text().split('|') info_dict.update({u'housetype': info[0].strip()}) info_dict.update({u'square': info[1].strip()}) info_dict.update({u'floor': info[2].strip()}) try: info_dict.update({u'direction': info[3].strip()}) except: info_dict.update({u'direction': ''}) info_dict.update({u'decoration': ''}) housefloor = name.find("span", {"class": "info-col row2-text"}) detail = housefloor.get_text().split('|') info_dict.update({u'years': detail[-1].strip()}) community = name.find("a", {"class": "laisuzhou"}) info_dict.update( {u'community': community.span.get_text().strip()}) info_dict.update({u'followInfo': ''}) tax = name.find("div", {"class": "property-tag-container"}) info_dict.update({u'taxtype': "".join(tax.get_text().split())}) totalPrice = name.find("span", {"class": "total-price strong-num"}) info_dict.update( {u'totalPrice': totalPrice.get_text().strip()}) unitPrice = name.find("span", {"class": "info-col price-item minor"}) info_dict.update({u'unitPrice': unitPrice.get_text().strip()}) except: continue # houseinfo insert into mysql data_source.append(info_dict) hisprice_data_source.append({ "houseID": info_dict["houseID"], "totalPrice": info_dict["totalPrice"] }) # model.Houseinfo.insert(**info_dict).upsert().execute() #model.Hisprice.insert(houseID=info_dict['houseID'], totalPrice=info_dict['totalPrice']).upsert().execute() with model.database.atomic(): model.Houseinfo.insert_many(data_source).upsert().execute() model.Hisprice.insert_many(hisprice_data_source).upsert().execute() time.sleep(1)
def get_sell_percommunity(communityname): url = BASE_URL + u"chengjiao/rs" + \ urllib3.quote(communityname.encode('utf8')) + "/" source_code = misc.get_source_code(url) soup = BeautifulSoup(source_code, 'lxml') if check_block(soup): return total_pages = misc.get_sh_total_pages(url) if total_pages == None: row = model.Sellinfo.select().count() raise RuntimeError("Finish at %s because total_pages is None" % row) for page in range(total_pages): if page > 0: url_page = BASE_URL + \ u"chengjiao/d%drs%s/" % (page, urllib3.quote(communityname.encode('utf8'))) source_code = misc.get_source_code(url_page) soup = BeautifulSoup(source_code, 'lxml') i = 0 log_progress("GetSellByCommunitylist", communityname, page + 1, total_pages) data_source = [] for name in soup.findAll("div", {"class": "info"}): i = i + 1 info_dict = {} try: housetitle = name.findAll("div", {"class": "info-row"})[0] info_dict.update({u'title': housetitle.a.get('title')}) info_dict.update({u'link': housetitle.a.get('href')}) info_dict.update({u'houseID': housetitle.a.get('key')}) houseinfo = housetitle.get_text().strip().split(' ') info_dict.update({u'housetype': houseinfo[1].strip()}) info_dict.update( {u'square': houseinfo[2].strip('').split('\n')[0]}) houseaddr = name.find("div", {"class": "row1-text"}) info = houseaddr.get_text().split('|') info_dict.update({u'floor': info[0].strip()}) try: info_dict.update({u'direction': info[1].strip()}) except: info_dict.update({u'direction': ''}) info_dict.update({u'status': info[2].strip()}) years = name.find("span", {"class": "c-prop-tag2"}) info_dict.update({u'years': years.get_text().strip()}) community = name.find("span", {"class": "cj-text"}) info_dict.update({u'community': community.get_text().strip()}) totalPrice = name.find("span", {"class": "strong-num"}) info_dict.update( {u'totalPrice': totalPrice.get_text().strip()}) unitPrice = name.find("div", {"class": "info-col price-item minor"}) info_dict.update({u'unitPrice': unitPrice.get_text().strip()}) source = name.find("div", {"class": "info-col deal-item minor"}) info_dict.update({u'source': source.get_text().strip()}) dealdate = name.find( "div", {"class": "info-col deal-item main strong-num"}) info_dict.update({ u'dealdate': dealdate.get_text().strip().replace('.', '-') }) except: continue # Sellinfo insert into mysql data_source.append(info_dict) # model.Sellinfo.insert(**info_dict).upsert().execute() with model.database.atomic(): model.Sellinfo.insert_many(data_source).upsert().execute() time.sleep(1)
def get_share_message(self): full_url = "%s%s" % (settings.FULL_DOMAIN_NAME, self.get_absolute_url()) return urllib3.quote("%s %s" % (self.share_message, full_url))
def main_job(argv): jobStatus='' jobName='' jobId='' long='' group='' user='' past='' try: opts, args = getopt.getopt(argv, "hu:ls:n:g:p:", ['help','user='******'long','status=','name=','group=','past=']) except getopt.GetoptError: job_usage() return for opt, arg in opts: if ((opt == "-h") | (opt == "--help")) : job_usage() return elif ((opt=='-u') | (opt == '--user')) : user=arg elif ((opt=='-l') | (opt == '--long')) : long='yes' elif ((opt == '-s') | (opt == "--status")) : jobStatus = arg elif ((opt == '-n') | (opt == "--name")) : jobName = urllib3.quote(arg) elif ((opt == '-g') | (opt == "--group")) : group=urllib3.quote(arg) elif ((opt == '-p') | (opt == '--past')) : past=arg if len(args) > 0: jobId = args[0] p = re.compile('^[1-9]{1}[0-9]{0,}$') pl = re.compile('^[1-9]{1}[0-9]{0,}\[{1}[0-9]{0,}\]{1}$') if (len(jobId) == 0) | ((p.match(jobId.lower()) == None) and (pl.match(jobId.lower()) == None)) | (len(args)>1): job_usage() return if (len(jobStatus) > 0 and len(jobName) > 0) | (len(jobStatus) > 0 and len(jobId) > 0) | (len(jobId) > 0 and len(jobName) > 0) | (len(jobId)>0 and len(group)>0) | (len(jobName)>0 and len(group)>0) | (len(jobStatus)>0 and len(group)>0): print ( _getmsg("job_usage_error") ) return status = '' message = '' statusFlag = False nameFlag = False groupFlag=False if len(jobStatus) > 0: status, message = getJobListInfo('status='+jobStatus+'&user='******'&details='+long+'&past='+past) statusFlag = True elif len(jobName) > 0: status, message = getJobListInfo('name='+jobName+'&user='******'&details='+long+'&past='+past) nameFlag = True elif len(group) > 0: status, message = getJobListInfo('group='+group+'&user='******'&details='+long+'&past='+past) groupFlag=True if status != '': if status == 'ok': tree = ET.fromstring(message) jobs =tree.iter("Job") count = len(tree.findall('Job')) if count == 0: if statusFlag == True: print ( _getmsg("job_nomatch_status").format(jobStatus) ) elif nameFlag == True: print ( _getmsg("job_nomatch_name").format(jobName) ) elif groupFlag== True: print ( _getmsg("job_nomatch_group").format(group) ) return showJobinfo(jobs,long, count) else: print(message) return if len(jobId) > 0: status, message= getJobListInfo('id='+jobId+'&user='******'&details='+long+'&past='+past) if status == 'ok': tree = ET.fromstring(message) jobs =tree.iter("Job") count = len(tree.findall('Job')) showJobinfo(jobs,long, count) else: print(message) return else: status, message= getJobListInfo('user='******'&details='+long+'&past='+past) if status == 'ok': tree = ET.fromstring(message) jobs =tree.iter("Job") count = len(tree.findall('Job')) showJobinfo(jobs,long, count) else: print(message) return job_usage()
import urllib3 from datetime import datetime BASE_URL = "https://www.google.com/finance/historical?output=csv&q={0}&startdate=Jan+1%2C+1980&enddate={1}" symbol_url = BASE_URL.format( urllib3.quote('GOOG'), # Replace with any stock you are interested. urllib3.quote(datetime.now().strftime("%b+%d,+%Y"), '+')) #case when the code is invalid try: f = urllib3.urlopen(symbol_url) with open("GOOG.csv", 'w') as fin: print(fin, f.read()) except urllib3.HTTPError: print("Fetching Failed: {}".format(symbol_url))
def get_house_percommunity(city, communityname): baseUrl = u"http://%s.lianjia.com/" % (city) url = baseUrl + u"ershoufang/rs" + \ urllib3.quote(communityname.encode('utf8')) + "/" source_code = misc.get_source_code(url) soup = BeautifulSoup(source_code, 'lxml') if check_block(soup): return total_pages = misc.get_total_pages(url) if total_pages == None: row = model.Houseinfo.select().count() raise RuntimeError("Finish at %s because total_pages is None" % row) for page in range(total_pages): if page > 0: url_page = baseUrl + \ u"ershoufang/pg%drs%s/" % (page, urllib3.quote(communityname.encode('utf8'))) source_code = misc.get_source_code(url_page) soup = BeautifulSoup(source_code, 'lxml') nameList = soup.findAll("li", {"class": "clear"}) i = 0 log_progress("GetHouseByCommunitylist", communityname, page + 1, total_pages) data_source = [] hisprice_data_source = [] for name in nameList: # per house loop i = i + 1 info_dict = {} try: housetitle = name.find("div", {"class": "title"}) info_dict.update({u'title': housetitle.a.get_text().strip()}) info_dict.update({u'link': housetitle.a.get('href')}) houseaddr = name.find("div", {"class": "address"}) info = houseaddr.div.get_text().split('|') info_dict.update({u'community': communityname}) info_dict.update({u'housetype': info[1].strip()}) info_dict.update({u'square': info[2].strip()}) info_dict.update({u'direction': info[3].strip()}) info_dict.update({u'decoration': info[4].strip()}) housefloor = name.find("div", {"class": "flood"}) floor_all = housefloor.div.get_text().split( '-')[0].strip().split(' ') info_dict.update({u'floor': floor_all[0].strip()}) info_dict.update({u'years': floor_all[-1].strip()}) followInfo = name.find("div", {"class": "followInfo"}) info_dict.update({u'followInfo': followInfo.get_text()}) tax = name.find("div", {"class": "tag"}) info_dict.update({u'taxtype': tax.get_text().strip()}) totalPrice = name.find("div", {"class": "totalPrice"}) info_dict.update({u'totalPrice': totalPrice.span.get_text()}) unitPrice = name.find("div", {"class": "unitPrice"}) info_dict.update({u'unitPrice': unitPrice.get('data-price')}) info_dict.update({u'houseID': unitPrice.get('data-hid')}) except: continue # houseinfo insert into mysql data_source.append(info_dict) hisprice_data_source.append({ "houseID": info_dict["houseID"], "totalPrice": info_dict["totalPrice"] }) # model.Houseinfo.insert(**info_dict).upsert().execute() #model.Hisprice.insert(houseID=info_dict['houseID'], totalPrice=info_dict['totalPrice']).upsert().execute() with model.database.atomic(): if data_source: model.Houseinfo.insert_many(data_source).upsert().execute() if hisprice_data_source: model.Hisprice.insert_many( hisprice_data_source).upsert().execute() time.sleep(1)
def get_rent_percommunity(city, communityname): baseUrl = u"http://%s.lianjia.com/" % (city) url = baseUrl + u"zufang/rs" + \ urllib3.quote(communityname.encode('utf8')) + "/" source_code = misc.get_source_code(url) soup = BeautifulSoup(source_code, 'lxml') if check_block(soup): return total_pages = misc.get_total_pages(url) if total_pages == None: row = model.Rentinfo.select().count() raise RuntimeError("Finish at %s because total_pages is None" % row) for page in range(total_pages): if page > 0: url_page = baseUrl + \ u"rent/pg%drs%s/" % (page, urllib3.quote(communityname.encode('utf8'))) source_code = misc.get_source_code(url_page) soup = BeautifulSoup(source_code, 'lxml') i = 0 log_progress("GetRentByCommunitylist", communityname, page + 1, total_pages) data_source = [] for ultag in soup.findAll("ul", {"class": "house-lst"}): for name in ultag.find_all('li'): i = i + 1 info_dict = {} try: housetitle = name.find("div", {"class": "info-panel"}) info_dict.update({u'title': housetitle.get_text().strip()}) info_dict.update({u'link': housetitle.a.get('href')}) houseID = housetitle.a.get('href').split("/")[-1].split( ".")[0] info_dict.update({u'houseID': houseID}) region = name.find("span", {"class": "region"}) info_dict.update({u'region': region.get_text().strip()}) zone = name.find("span", {"class": "zone"}) info_dict.update({u'zone': zone.get_text().strip()}) meters = name.find("span", {"class": "meters"}) info_dict.update({u'meters': meters.get_text().strip()}) other = name.find("div", {"class": "con"}) info_dict.update({u'other': other.get_text().strip()}) subway = name.find("span", {"class": "fang-subway-ex"}) if subway is None: info_dict.update({u'subway': ""}) else: info_dict.update( {u'subway': subway.span.get_text().strip()}) decoration = name.find("span", {"class": "decoration-ex"}) if decoration is None: info_dict.update({u'decoration': ""}) else: info_dict.update({ u'decoration': decoration.span.get_text().strip() }) heating = name.find("span", {"class": "heating-ex"}) info_dict.update( {u'heating': heating.span.get_text().strip()}) price = name.find("div", {"class": "price"}) info_dict.update( {u'price': int(price.span.get_text().strip())}) pricepre = name.find("div", {"class": "price-pre"}) info_dict.update( {u'pricepre': pricepre.get_text().strip()}) except: continue # Rentinfo insert into mysql data_source.append(info_dict) # model.Rentinfo.insert(**info_dict).upsert().execute() with model.database.atomic(): if data_source: model.Rentinfo.insert_many(data_source).upsert().execute() time.sleep(1)
def get_sell_percommunity(city, communityname): baseUrl = u"http://%s.lianjia.com/" % (city) url = baseUrl + u"chengjiao/rs" + \ urllib3.quote(communityname.encode('utf8')) + "/" source_code = misc.get_source_code(url) soup = BeautifulSoup(source_code, 'lxml') if check_block(soup): return total_pages = misc.get_total_pages(url) if total_pages == None: row = model.Sellinfo.select().count() raise RuntimeError("Finish at %s because total_pages is None" % row) for page in range(total_pages): if page > 0: url_page = baseUrl + \ u"chengjiao/pg%drs%s/" % (page, urllib3.quote(communityname.encode('utf8'))) source_code = misc.get_source_code(url_page) soup = BeautifulSoup(source_code, 'lxml') log_progress("GetSellByCommunitylist", communityname, page + 1, total_pages) data_source = [] for ultag in soup.findAll("ul", {"class": "listContent"}): for name in ultag.find_all('li'): info_dict = {} try: housetitle = name.find("div", {"class": "title"}) info_dict.update({u'title': housetitle.get_text().strip()}) info_dict.update({u'link': housetitle.a.get('href')}) houseID = housetitle.a.get('href').split("/")[-1].split( ".")[0] info_dict.update({u'houseID': houseID.strip()}) house = housetitle.get_text().strip().split(' ') info_dict.update({u'community': communityname}) info_dict.update({ u'housetype': house[1].strip() if 1 < len(house) else '' }) info_dict.update({ u'square': house[2].strip() if 2 < len(house) else '' }) houseinfo = name.find("div", {"class": "houseInfo"}) info = houseinfo.get_text().split('|') info_dict.update({u'direction': info[0].strip()}) info_dict.update( {u'status': info[1].strip() if 1 < len(info) else ''}) housefloor = name.find("div", {"class": "positionInfo"}) floor_all = housefloor.get_text().strip().split(' ') info_dict.update({u'floor': floor_all[0].strip()}) info_dict.update({u'years': floor_all[-1].strip()}) followInfo = name.find("div", {"class": "source"}) info_dict.update( {u'source': followInfo.get_text().strip()}) totalPrice = name.find("div", {"class": "totalPrice"}) if totalPrice.span is None: info_dict.update( {u'totalPrice': totalPrice.get_text().strip()}) else: info_dict.update({ u'totalPrice': totalPrice.span.get_text().strip() }) unitPrice = name.find("div", {"class": "unitPrice"}) if unitPrice.span is None: info_dict.update( {u'unitPrice': unitPrice.get_text().strip()}) else: info_dict.update( {u'unitPrice': unitPrice.span.get_text().strip()}) dealDate = name.find("div", {"class": "dealDate"}) info_dict.update({ u'dealdate': dealDate.get_text().strip().replace('.', '-') }) except: continue # Sellinfo insert into mysql data_source.append(info_dict) # model.Sellinfo.insert(**info_dict).upsert().execute() with model.database.atomic(): if data_source: model.Sellinfo.insert_many(data_source).upsert().execute() time.sleep(1)
def sendsong(msg): username = msg['from']['first_name'] chat_id = msg['from']['id'] command = msg['text'] command = codecs.encode(command, 'utf-8') bot.sendMessage(chat_id, username + ', песня отправляется...') query = urllib.quote(command) url = "https://www.youtube.com/results?search_query=" + query response = urllib.request.urlopen(url) html = response.read() soup = BeautifulSoup(html, "lxml") for vid in soup.findAll(attrs={'class': 'yt-uix-tile-link'}): VIDEO_URL = 'https://www.youtube.com' + vid['href'] JSON_URL = BASE_URL + VIDEO_URL print('JSON URL : ' + JSON_URL) response = urllib.urlopen(JSON_URL) try: data = json.loads(response.read()) print(data) if 'length' not in data: raise ValueError("Нет длительности") break if 'link' not in data: raise ValueError("Нет ссылки") break if 'title' not in data: raise ValueError("Нет названия") break length = data['length'] DOWNLOAD_URL = data['link'] title = data['title'] title = slugify(title) upload_file = path + title.lower() + '.mp3' if not (os.path.exists(upload_file)): bot.sendMessage(chat_id, 'Загрузка началась.') downloadSong(DOWNLOAD_URL, upload_file) file_size = checkFileSize(upload_file) if (file_size < ONE_MB): os.remove(upload_file) continue bot.sendMessage(chat_id, SONG_SENT_MESSAGE) print('Загрузка завершена') else: print('Файл уже существует') audio = open(upload_file, 'rb') bot.sendAudio(chat_id, audio, length, '', title) songdata = { 'searchterm': command, 'searchresult': title.lower(), 'date': msg['date'] } savedata(songdata, songdata_file_path) break except ValueError: bot.sendMessage( chat_id, 'Песня не найдена. Попробуйте использовать другие данные') break return