def store_stock_reco_json(): import os import json from webnotes.utils.datautils import read_csv_content from webnotes.utils import get_base_path files_path = os.path.join(get_base_path(), "public", "files") list_of_files = os.listdir(files_path) replaced_list_of_files = [f.replace("-", "") for f in list_of_files] for reco, file_list in webnotes.conn.sql("""select name, file_list from `tabStock Reconciliation`"""): if file_list: file_list = file_list.split("\n") stock_reco_file = file_list[0].split(",")[1] stock_reco_file_path = os.path.join(files_path, stock_reco_file) if not os.path.exists(stock_reco_file_path): if stock_reco_file in replaced_list_of_files: stock_reco_file_path = os.path.join(files_path, list_of_files[replaced_list_of_files.index(stock_reco_file)]) else: stock_reco_file_path = "" if stock_reco_file_path: with open(stock_reco_file_path, "r") as open_reco_file: content = open_reco_file.read() try: content = read_csv_content(content) reconciliation_json = json.dumps(content, separators=(',', ': ')) webnotes.conn.sql("""update `tabStock Reconciliation` set reconciliation_json=%s where name=%s""", (reconciliation_json, reco)) except Exception: # if not a valid CSV file, do nothing pass
def import_file_by_path(path, ignore_links=False, overwrite=False): from webnotes.utils.datautils import read_csv_content print "Importing " + path with open(path, "r") as infile: upload(rows=read_csv_content(infile.read()), ignore_links=ignore_links, overwrite=overwrite)
def read_csv_content(self, submit = 1): """Get csv data""" if submit: from webnotes.utils.datautils import read_csv_content_from_attached_file data = read_csv_content_from_attached_file(self.doc) else: from webnotes.utils.datautils import read_csv_content data = read_csv_content(self.doc.diff_info) return data
def find_charts(): print "finding charts..." for basepath, folders, files in os.walk(path): basename = os.path.basename(basepath) if basename.startswith("l10n"): for fname in files: fname = cstr(fname) filepath = os.path.join(basepath, fname) if fname.endswith(".xml"): tree = ET.parse(filepath) root = tree.getroot() for node in root[0].findall("record"): if node.get("model") in [ "account.account.template", "account.chart.template", "account.account.type" ]: chart_roots.append(root) root.set("folder", basename) break if fname.endswith(".csv"): with open(filepath, "r") as csvfile: try: content = read_csv_content(csvfile.read()) except Exception, e: continue if content[0][0] == "id": for row in content[1:]: data = dict(zip(content[0], row)) account = { "name": data.get("name"), "parent_id": data.get("parent_id:id"), "children": [] } accounts[data.get("id")] = account if not account.get("parent_id"): chart_id = data.get("chart_id:id") charts.setdefault(chart_id, {}).update( {"account_root_id": data.get("id")})
def store_stock_reco_json(): import os import json from webnotes.utils.datautils import read_csv_content from webnotes.utils import get_base_path files_path = os.path.join(get_base_path(), "public", "files") list_of_files = os.listdir(files_path) replaced_list_of_files = [f.replace("-", "") for f in list_of_files] for reco, file_list in webnotes.conn.sql("""select name, file_list from `tabStock Reconciliation`"""): if file_list: file_list = file_list.split("\n") stock_reco_file = file_list[0].split(",")[1] stock_reco_file_path = os.path.join(files_path, stock_reco_file) if not os.path.exists(stock_reco_file_path): if stock_reco_file in replaced_list_of_files: stock_reco_file_path = os.path.join( files_path, list_of_files[replaced_list_of_files.index( stock_reco_file)]) else: stock_reco_file_path = "" if stock_reco_file_path: with open(stock_reco_file_path, "r") as open_reco_file: content = open_reco_file.read() try: content = read_csv_content(content) reconciliation_json = json.dumps(content, separators=(',', ': ')) webnotes.conn.sql( """update `tabStock Reconciliation` set reconciliation_json=%s where name=%s""", (reconciliation_json, reco)) except Exception: # if not a valid CSV file, do nothing pass
def find_charts(): print "finding charts..." for basepath, folders, files in os.walk(path): basename = os.path.basename(basepath) if basename.startswith("l10n"): for fname in files: fname = cstr(fname) filepath = os.path.join(basepath, fname) if fname.endswith(".xml"): tree = ET.parse(filepath) root = tree.getroot() for node in root[0].findall("record"): if node.get("model") in ["account.account.template", "account.chart.template", "account.account.type"]: chart_roots.append(root) root.set("folder", basename) break if fname.endswith(".csv"): with open(filepath, "r") as csvfile: try: content = read_csv_content(csvfile.read()) except Exception, e: continue if content[0][0]=="id": for row in content[1:]: data = dict(zip(content[0], row)) account = { "name": data.get("name"), "parent_id": data.get("parent_id:id"), "children": [] } accounts[data.get("id")] = account if not account.get("parent_id"): chart_id = data.get("chart_id:id") charts.setdefault(chart_id, {}).update({ "account_root_id": data.get("id")})
def import_file_by_path(path, ignore_links=False, overwrite=False): from webnotes.utils.datautils import read_csv_content print "Importing " + path with open(path, "r") as infile: upload(rows = read_csv_content(infile.read()), ignore_links=ignore_links, overwrite=overwrite)
def import_file_by_path(path): from webnotes.utils.datautils import read_csv_content print "Importing " + path with open(path, "r") as infile: upload(rows=read_csv_content(infile.read()))
def import_file_by_path(path): from webnotes.utils.datautils import read_csv_content print "Importing " + path with open(path, "r") as infile: upload(rows = read_csv_content(infile.read()))