def write_to_csv(fp, contents): filepath = get_filepath_name(fp) output_file_handle = CsvCreator(filepath, fieldname) for brand, rows in contents.items(): for row in rows: output_file_handle.write_to_file(row) return
def write_overall_result(contents): filepath = get_filepath_name(OUTPUT_FILE) op = CsvCreator(filepath, fieldname) for key, rows in contents.items(): for row in rows: op.write_to_file(row) return
import sqlite3 from contextlib import contextmanager import csv from config import OUTPUT_FILE from utils import get_filepath_name, build_db_format, format_price DB_FILE = 'products.db' FILEPATH = get_filepath_name(OUTPUT_FILE) CREATE_PRODUCTS_TABLE = """ CREATE TABLE products(product_id INTEGER PRIMARY KEY AUTOINCREMENT, brand_name VARCHAR(30), title text, price float, aggregateRating VARCHAR(7), image_url text, description text, url_link text );""" DROP_PRODUCTS_TABLE = """DROP TABLE IF EXISTS products;""" INSERT_INTO_PRODUCTS_STATEMENT = """INSERT INTO products(brand_name,title,price,aggregateRating,image_url,description,url_link) VALUES (?,?,?,?,?,?,?);""" @contextmanager def get_connection(): connection = sqlite3.connect(DB_FILE) yield connection connection.commit() connection.close()
def test_get_filepath_name(): filename = 'searchfile' filepath = 'home/lf/rishi' result = get_filepath_name(filepath, filename) assert result == 'home/lf/rishi/searchfile'
def write_to_json(fp, contents): filepath = get_filepath_name(fp) + '.json' with open(filepath, 'w') as json_file: json.dump(contents, json_file)
def write_to_yaml(fp, contents): filepath = get_filepath_name(fp) + '.yaml' with open(filepath, 'w') as file_: yaml.dump(contents, file_)
def write_empty_result_sets(empty_search_result_list): filepath = get_filepath_name('empty_result.csv') with open(filepath, mode='w') as wr: writer = csv.writer(wr, quoting=csv.QUOTE_ALL) writer.writerow(empty_search_result_list)