async def fetch_category_shopping_trending_keywords(category_id, start_date: datetime.date, end_date: datetime.date): headers = { 'authority': 'datalab.naver.com', 'sec-ch-ua': '" Not;A Brand";v="99", "Google Chrome";v="91", "Chromium";v="91"', 'accept': '*/*', 'x-requested-with': 'XMLHttpRequest', 'sec-ch-ua-mobile': '?0', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', 'content-type': 'application/x-www-form-urlencoded; charset=UTF-8', 'origin': 'https://datalab.naver.com', 'sec-fetch-site': 'same-origin', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'referer': 'https://datalab.naver.com/shoppingInsight/sCategory.naver', 'accept-language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7', } data = { 'cid': category_id, 'timeUnit': 'date', 'startDate': start_date.isoformat(), 'endDate': end_date.isoformat(), 'age': '', 'gender': '', 'device': '', 'page': '1', 'count': '100' } response = requests.post( 'https://datalab.naver.com/shoppingInsight/getCategoryKeywordRank.naver', headers=headers, data=data) # print(response.text) data = response.json()['ranks'] return data
async def fetch_cafe_post_published_count(keyword, start_date: datetime.date, end_date: datetime.date): """ 네이버 카페 월 발행량 가져오기(기간별) """ headers = { 'authority': 'apis.naver.com', 'sec-ch-ua': '" Not A;Brand";v="99", "Chromium";v="90", "Google Chrome";v="90"', 'accept': 'application/json, text/plain, */*', 'x-cafe-product': 'pc', 'sec-ch-ua-mobile': '?0', 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36', 'content-type': 'application/json;charset=UTF-8', 'origin': 'https://cafe.naver.com', 'sec-fetch-site': 'same-site', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'referer': 'https://cafe.naver.com/ca-fe/home/search/articles?q=%EA%B0%80%EC%9C%84&pr=3', 'accept-language': 'en', } data = { "query": keyword, "page": 1, "sortBy": 0, "period": [start_date.strftime("%Y%m%d"), end_date.strftime("%Y%m%d")] } data = json.dumps(data) async with aiohttp.ClientSession() as session: async with session.post('https://apis.naver.com/cafe-home-web/cafe-home/v1/search/articles', headers=headers, data=data.encode('utf-8')) as response: res = await response.json() return int(res['message']['result']['totalCount'])
async def fetch_keyword_graph_statistics(keyword, category_id, time_unit: TimeUnit, start_date: datetime.date, end_date: datetime.date): '''그래프 그리기용''' headers = { 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.114 Safari/537.36', 'referer': 'https://datalab.naver.com/shoppingInsight/sKeyword.naver', 'origin': 'https://datalab.naver.com' } data = { 'cid': category_id, # category id 'timeUnit': time_unit, # time unit date/week/month 'startDate': start_date.isoformat(), 'endDate': end_date.isoformat(), # 'age': '30,40', # 10/20/30/40/50/60 in commas 'age': '', # 'gender': 'f', # f or f,m or m 'gender': '', # 'device': 'pc', # '' 'pc' 'mo' 'pc,mo' 'device': '', 'keyword': keyword } statistics = {} async with aiohttp.ClientSession() as session: async with session.post('https://datalab.naver.com/shoppingInsight/getKeywordClickTrend.naver', headers=headers, data=data) as response: statistics['clickTrend'] = (await response.json(content_type=None))['result'][0]['data'] # async with session.post('https://datalab.naver.com/shoppingInsight/getKeywordDeviceRate.naver', headers=headers, data=data) as response: # statistics['deviceRate'] = (await response.json(content_type=None))['result'][0]['data'] async with session.post('https://datalab.naver.com/shoppingInsight/getKeywordGenderRate.naver', headers=headers, data=data) as response: statistics['genderRate'] = (await response.json(content_type=None))['result'][0]['data'] async with session.post('https://datalab.naver.com/shoppingInsight/getKeywordAgeRate.naver', headers=headers, data=data) as response: statistics['ageRate'] = (await response.json(content_type=None))['result'][0]['data'] return statistics
def add_time(start_dt: datetime.date, delta, quantity): if isinstance(delta, Number): return start_dt + timedelta(seconds=delta * quantity) quantity = delta[0] * quantity if delta: if delta[1] == 'month': years = int(quantity / 12) months = quantity - years * 12 months_result = start_dt.month + months if months_result < 1: years -= 1 months_result += 12 elif 12 < months_result: years += 1 months_result -= 12 years_result = start_dt.year + years last_calendar_day = calendar.monthrange(years_result, months_result)[1] return start_dt.replace(day=min(start_dt.day, last_calendar_day), month=months_result, year=years_result) else: # elif delta[1] == 'year': return start_dt.replace(year=start_dt.year + quantity, ) log.warning('"None" timedelta supplied when adding time, ' 'not adding any time') return start_dt
async def fetch_blog_post_published_count(keyword, start_date: datetime.date, end_date: datetime.date): """ 네이버 블로그 월 발행량 가져오기(기간별) """ headers = { 'authority': 'section.blog.naver.com', 'sec-ch-ua': '"Google Chrome";v="89", "Chromium";v="89", ";Not A Brand";v="99"', 'accept': 'application/json, text/plain, */*', 'sec-ch-ua-mobile': '?0', 'user-agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/89.0.4389.128 Safari/537.36', 'sec-fetch-site': 'same-origin', 'sec-fetch-mode': 'cors', 'sec-fetch-dest': 'empty', 'referer': 'https://section.blog.naver.com/Search/Post.nhn?pageNo=1&rangeType=ALL&orderBy=sim&keyword=%ED%8F%BC%ED%81%B4%EB%A0%8C%EC%A7%95', 'accept-language': 'ko-KR,ko;q=0.9,en-US;q=0.8,en;q=0.7', } params = { 'countPerPage': '7', 'currentPage': '1', 'keyword': keyword, 'orderBy': 'sim', 'type': 'post', 'startDate': start_date.isoformat(), 'endDate': end_date.isoformat(), } async with aiohttp.ClientSession() as session: async with session.get('https://section.blog.naver.com/ajax/SearchList.nhn', headers=headers, params=params) as response: html = await response.text() # 월 발행량 raw = html[6:] # 앞에 쓸데없는 뭐가 많음 res = json.loads(raw) return int(res['result']['totalCount'])
def add_new_sheet_element(date: datetime.date, name: str, amount: float, start_range: str): month = date.strftime("%B") values = [[date.strftime('%d/%m/%Y'), name, amount]] updated = SheetService().write_append_sheet(SPREADSHEET_ID, f'{month}!{start_range}', values) return updated
def get_beginning_and_end_of_month(date: datetime.date): """ Get first and last day of a month :param date: date in a month to get the first and last date of :return: tuple of first and last day of a month """ start = date.replace(day=1) end = date.replace(day=calendar.monthrange(date.year, date.month)[1]) return start, end
async def fetch_relative_ratio(keywords: List[str], start_date: datetime.date, end_date: datetime.date, time_unit: TimeUnit): '''get relative ratio''' headers = { 'X-Naver-Client-Id': '8VcP69maRqven9qJWV1b', 'X-Naver-Client-Secret': 'BH21bBnIJz' } body = { 'startDate': start_date.isoformat(), # 전월부터 'endDate': end_date.isoformat(), # 이번달까지 'timeUnit': time_unit.value, 'keywordGroups': list( map(lambda keyword: { 'groupName': keyword, 'keywords': [keyword] }, keywords)) } ''' "ratio": [ { "data": [ { "period": "2021-05-01", "ratio": 93.98417 }, { "period": "2021-06-01", "ratio": 100 } ], "keywords": [ "샴푸" ], "title": "샴푸" } ]''' req = requests.post("https://openapi.naver.com/v1/datalab/search", headers=headers, data=json.dumps(body)) raw = req.json()['results'] result = [{'keyword': d['keywords'][0], 'data': d['data']} for d in raw] return result
def list_feriados(dataInicial: datetime.date, dataFinal: datetime.date): # Instancia o cliente do banco de dados NOSQL GCloud DataStore ds = get_client() # Prepara a query para consultar valores do índice IPCA query = ds.query(kind=TipoEntidade.FERIADOS.value) # Inclui filtros da consulta query.add_filter('dt_feriado', '>=', dataInicial.isoformat()) query.add_filter('dt_feriado', '<=', dataFinal.isoformat()) #Define ordenação da consulta query.order = ['dt_feriado'] # Executa a consulta e armazena num dictionary feriados = query.fetch() # Trata os formatos retornados da lista de entidades feriados = builtin_list(map(from_datastore, feriados)) return feriados
def get_todoist_formatted_string_from_datetime(dt: datetime.date): """ Returns a todoist API accepted string encoding for a given date/date-time :param dt: :return: """ logging.debug("ALIVE. got:{}".format(dt)) if isinstance(dt, datetime): logging.debug("Formatting {} with {}".format(dt, todoist_datetime_format)) return dt.strftime(todoist_datetime_format) if isinstance(dt, date): logging.debug("Formatting {} with {}".format(dt, todoist_date_format)) return dt.strftime(todoist_date_format)
def _do_validate(self, validated_date: datetime.date) -> bool: if validated_date: try: datetime.strptime(validated_date.isoformat(), self.__DATE_PATTERN) except ValueError: return False return True
def get_dogs_as_json(dogs_df: pd.DataFrame, report_date: datetime.date): dogs_dict = dogs_df.to_dict() dogs_json_str = json.dumps({ 'report_date': report_date.timestamp(), 'dogs': dogs_dict }) return dogs_json_str
def move_from_archive_to_emission(self, program, date: datetime.date): source = settings.ARCHIVE_SERVER_UPLOAD_DIRECTORY + program.normalized_name( ) + "/" + program.get_filename_for_date(date.strftime("%Y%m%d")) # Replacement is for scp destination = "\"\'" + settings.UPLOAD_SERVER_UPLOAD_DIRECTORY + program.get_filename_for_date( date.strftime("%Y%m%d")).replace(date.strftime("%Y%m%d"), "") + "\'\"" # command may error if there is no upload, but we don't care command = "scp,{}@{}:{},{}@{}:{}".format( settings.ARCHIVE_SERVER_USERNAME, settings.ARCHIVE_SERVER_IP, source, settings.UPLOAD_SERVER_USERNAME, settings.UPLOAD_SERVER_IP, destination) print("Running:" + command) subprocess.run(command.split(","))
def get_movies(self, session_date: datetime.date): request = f""" SELECT movie.id, movie.title, movie.duration FROM movie INNER JOIN session ON movie.id = session.movie_title_id WHERE session.date = ? """ return self.data_base.select_all(request, (session_date.strftime('%d.%m.%Y'),))
def crawl_library_group_room_availability(session: WebSession, date: datetime.date): page = session.get_broken_simplified_soup(urls.LIBRARY_GROUP_ROOMS, post_data={ 'submit:reservas:es': 'Ver+disponibilidade', 'data': date.isoformat() }) return parser.get_library_group_room_availability(page)
def format_date(date: datetime.date) -> str: """ Convert date object to string with format 'DD-MM-YYYY'. :param date: Date as datetime object. :return: Date in string type. """ return date.strftime("%d-%m-%Y")
def is_working_day(target: datetime.date) -> bool: """指定日が休日か判定する""" if target.weekday() >= 5: # 土曜 or 日曜 return False if jpholiday.is_holiday(target): # 祝日 return False return True
def list_indices(indexador: str, dataInicial: datetime.date, dataFinal: datetime.date): # Instancia o cliente do banco de dados NOSQL GCloud DataStore ds = get_client() # Prepara a query para consultar valores do índice IPCA query = ds.query(kind=TipoEntidade.INDICES.value) # Inclui filtros da consulta query.add_filter('tp_indice', '=', indexador) query.add_filter('dt_referencia', '>=', dataInicial.isoformat()) query.add_filter('dt_referencia', '<=', dataFinal.isoformat()) #Define ordenação da consulta query.order = ['dt_referencia'] # Executa a consulta e armazena num dictionary # indices = list(query.fetch()) indices = query.fetch() # Trata os formatos retornados da lista de entidades # indices = list(map(lambda e: _tratar_formatos(e), indices)) indices = builtin_list(map(from_datastore, indices)) return indices
def read_weather_for_state_for_date(bucket: Bucket, bucket_raw_base_path: str, selected_state: str, date: datetime.date): yyyymmdd: str = date.strftime("%Y%m%d") blob = bucket.blob( f"{bucket_raw_base_path.format(date=yyyymmdd)}/{selected_state}.json.gz" ) try: return json.loads(gunzip_bytes(blob.download_as_string())) except NotFound: return None
def getting_shutter_open_time_info( start_date: datetime.date, end_date: datetime.date, dome_status_id: int, telescope_id: int, ) -> Tuple: """This function retrieves information from the dome_shutter_open_time table in the Dashboard database. The start date and end date give you a time range, with the start date inclusive and the end date exclusive from the query, meaning it doesn't include the final day. :param start_date: the starting date(inclusive) for your query :param end_date: the ending date(exclusive) for your query :param dome_status_id: The dome status id tells us if the dome is open or closed, with 1 representing OPEN and 2 representing CLOSED :param telescope_id: The id of each of the steerable telescopes to identify it based on its name :return: A tuple of the results from the database query """ start_time = start_date.strftime("%Y-%m-%d") + " 12:00" end_time = end_date.strftime("%Y-%m-%d") + " 12:00" with connection.cursor( MySQLdb.cursors.DictCursor) as shutter_open_time_cursor: mysql_statement = """SELECT converted_julian_date, telescope_id, dome_status_id FROM dome_shutter_open_time WHERE converted_julian_date >= %(start_date)s and converted_julian_date < %(end_date)s and dome_status_id= %(dome_status_id)s and telescope_id=%(telescope_id)s""" shutter_open_time_cursor.execute( mysql_statement, dict( start_date=start_time, end_date=end_time, dome_status_id=dome_status_id, telescope_id=telescope_id, ), ) results = shutter_open_time_cursor.fetchall() return results
def part_due_date_analysis(self, part: Part, ref_date: datetime.date) -> DueDateAnalysis: cache_key = ref_date.strftime("%d%m%y") + str(hash(part)) cached = self.parts_due_date_cache.get(cache_key, None) if cached is None: due_date_analysis = DueDateValidator.part_status_on_date( part, ref_date) self.parts_due_date_cache[cache_key] = due_date_analysis return due_date_analysis else: return cached
def render_score_page(request, page: str, date: datetime.date, title: str): """Render generic score page. """ games = Game.objects.filter(game_date=date.strftime("%b %d, %Y")) # Validate date input if request.method == 'POST': form = DateForm(request.POST) date_input = parser.parse(form.data.get('date')).date() if form.is_valid(): return redirect('main:score', date=date_input.strftime("%m-%d-%Y")) context = { 'title': title, 'date': date.strftime("%b %d, %Y"), 'games': games, 'closest_date': Game.get_closest_game_date(date).strftime("%m-%d-%Y") } return render(request, page, context)
def is_first_working_day(today: datetime.date) -> bool: """指定日がその月の最初の平日か判定する""" for target_day in range(1, today.day + 1): if is_working_day(today.replace(day=target_day)): if target_day == today.day: # その月で最初の平日である return True # その月で2回目以降の平日である return False # 平日ではない return False
def sensible_date2str(x: datetime.date) -> str: """ Returns string of the form: '15-Dec-2012 [Saturday]' """ # if x.strftime("%d")[0] == '1': return x.strftime("%A %-dth %B %Y") # elif x.strftime("%d")[-1] == '1': return x.strftime("%A %-dst %B %Y") # elif x.strftime("%d")[-1] == '2': return x.strftime("%A %-dnd %B %Y") # elif x.strftime("%d")[-1] == '3': return x.strftime("%A %-drd %B %Y") # else : return x.strftime("%A %-dth %B %Y") return x.strftime("%d-%b-%Y [%A]")
def component_due_date_analysis( self, component: Component, ref_date: datetime.date) -> DueDateAnalysis: cache_key = ref_date.strftime("%d%m%y") + str(hash(component)) cached = self.component_due_date_cache.get(cache_key, None) if cached is None: due_date_analysis = DueDateValidator.component_status_on_date( component, ref_date) self.component_due_date_cache[cache_key] = due_date_analysis return due_date_analysis else: return cached
def order_cake( self, contact_id: int, address: str, delivery_date: datetime.date, content: str, ) -> int: """https://www.amocrm.ru/developers/content/crm_platform/leads-api""" product_field_id = 608539 address_field_id = 608543 order_date_field_id = 472649 data = self._send_post( "api/v4/leads", [{ "pipeline_id": 3428428, "created_by": self.ROBOT_ID, "custom_fields_values": [ { "field_id": product_field_id, "values": [{ "value": content }], }, { "field_id": address_field_id, "values": [{ "value": address }], }, { "field_id": order_date_field_id, "values": [ # send as unix timestamp { "value": int(delivery_date.strftime("%s")) } ], }, ], "_embedded": { "contacts": [{ "id": contact_id, }] }, }], ) order_id = data["_embedded"]["leads"][0]["id"] return order_id
def get_previous_by_day(index: int, start_date: datetime.date = None): """ :param index: Monday 0 ~ Sunday 6 :param start_date: :return: """ if start_date is None: start_date = datetime.today().date() day_num = start_date.weekday() days_ago = (7 + day_num - index) % 7 days_ago = 7 if days_ago == 0 else days_ago return start_date - timedelta(days=days_ago)
def __init__(self, repository: str, date: datetime.date = datetime.today().date(), ports: Tuple[str] = ('443', ), cpu_cores: int = 1): self._repository = repository self._date = date self._ports = (ports, ) if isinstance(ports, str) else ports self._cpu_cores = cpu_cores self.__date_id = date.strftime('%Y%m%d') log.info( 'RapidDatasetManager initialized with repository=%s, date=%s, ports=%s', repository, date, ports)
def extract_raw_weather_for_county_for_date(base_url: str, api_key: str, selected_state: str, county: str, date: datetime.date): query = parse.quote(f"{county}, {selected_state}") full_url = base_url.format(key=api_key, q=query, dt=date.strftime('%Y-%m-%d')) response = requests.get(full_url) result = response.json() # Sanity check result if "error" in result.keys(): raise RuntimeError( f"Error encountered for {county}, {selected_state} on {date}: {result['error']['message']}" ) return result
def build_html_for_crime_type(url: str, crime_type: str, filter_month: datetime.date, html_template_body: str): # check its not too big # https://developers.google.com/maps/documentation/maps-static/start#url-size-restriction print(f'\n\n"{crime_type}"... has string length = {len(url)}') if len(url) <= 8192: # create the body html pagedict = { 'filter_month': filter_month.strftime('%b-%Y'), 'crime_type': crime_type, 'url': url } return html_template_body.format(**pagedict) else: return None