def parse(): car_parser = CarParser() url = input( 'Введите URL с сайта auto.ria.com из раздела новые автомобили:') url = url.strip() html = car_parser.get_html(url) if html.status_code != 200: print('Error') return pages_count = car_parser.get_pages_count(html.text) soup = BeautifulSoup(html.text, 'html.parser') mark = car_parser.get_mark(soup) saver = FileSaver(mark) for page in range(1, pages_count + 1): print(f'Парсинг страницы {page} из {pages_count}...') html = car_parser.get_html(url, params={'page': page}) soup = BeautifulSoup(html.text, 'html.parser') for new_car in car_parser.get_content(soup): saver.save(new_car, mark) print(f'Файл записан')
def saveFile(self, text): self._isTextUnsave = False fileSaver = FileSaver(self._currentFileLocation, text) fileSaver.save()
def save(self, py_object): time_path = datetime.datetime.now().strftime("%y_%m_%d/%H_%M_%S") fs = FileSaver(os.path.join(self._folder_path, time_path), self._file_path, self._file_extension, self._save_method) return fs.save(py_object)