def get_doclist(start_idx): if doctypes: doclist = [] for idx in xrange(start_idx, len(rows)): if (not len(doclist)) or main_doc_empty(rows[idx]): for dt in doctypes: d = {} for column_idx in column_idx_to_fieldname[dt]: try: fieldname = column_idx_to_fieldname[dt][column_idx] fieldtype = column_idx_to_fieldtype[dt][column_idx] d[fieldname] = rows[idx][column_idx] if fieldtype in ("Int", "Check"): d[fieldname] = cint(d[fieldname]) elif fieldtype in ("Float", "Currency"): d[fieldname] = flt(d[fieldname]) except IndexError, e: pass if sum([0 if not val else 1 for val in d.values()]): d["doctype"] = dt if dt != doctype: if not overwrite: d["parent"] = doclist[0]["name"] d["parenttype"] = doctype d["parentfield"] = doctype_parentfield[dt] doclist.append(d) else: break return doclist
def get_doclist(start_idx): if doctypes: doclist = [] for idx in xrange(start_idx, len(rows)): if (not len(doclist)) or main_doc_empty(rows[idx]): for dt in doctypes: d = {} for column_idx in column_idx_to_fieldname[dt]: try: fieldname = column_idx_to_fieldname[dt][ column_idx] fieldtype = column_idx_to_fieldtype[dt][ column_idx] d[fieldname] = rows[idx][column_idx] if fieldtype in ("Int", "Check"): d[fieldname] = cint(d[fieldname]) elif fieldtype in ("Float", "Currency"): d[fieldname] = flt(d[fieldname]) except IndexError, e: pass if sum([0 if val == '' else 1 for val in d.values()]): d['doctype'] = dt if dt != doctype: if not overwrite: d['parent'] = doclist[0]["name"] d['parenttype'] = doctype d['parentfield'] = doctype_parentfield[dt] doclist.append(d) else: break return doclist
return not (row and ((len(row) > 1 and row[1]) or (len(row) > 2 and row[2]))) # header if not rows: rows = read_csv_content_from_uploaded_file(ignore_encoding_errors) start_row = get_start_row() header = rows[:start_row] data = rows[start_row:] doctype = get_header_row(data_keys.main_table)[1] columns = filter_empty_columns(get_header_row(data_keys.columns)[1:]) doctypes = [] doctype_parentfield = {} column_idx_to_fieldname = {} column_idx_to_fieldtype = {} if submit_after_import and not cint(webnotes.conn.get_value("DocType", doctype, "is_submittable")): submit_after_import = False parenttype = get_header_row(data_keys.parent_table) if len(parenttype) > 1: parenttype = parenttype[1] parentfield = get_parent_field(doctype, parenttype) # check permissions if not webnotes.permissions.can_import(parenttype or doctype): webnotes.flags.mute_emails = False return {"messages": [_("Not allowed to Import") + ": " + _(doctype)], "error": True} # allow limit rows to be uploaded check_data_length()
(len(row) > 2 and row[2]))) # header if not rows: rows = read_csv_content_from_uploaded_file(ignore_encoding_errors) start_row = get_start_row() header = rows[:start_row] data = rows[start_row:] doctype = get_header_row(data_keys.main_table)[1] columns = filter_empty_columns(get_header_row(data_keys.columns)[1:]) doctypes = [] doctype_parentfield = {} column_idx_to_fieldname = {} column_idx_to_fieldtype = {} if submit_after_import and not cint( webnotes.conn.get_value("DocType", doctype, "is_submittable")): submit_after_import = False parenttype = get_header_row(data_keys.parent_table) if len(parenttype) > 1: parenttype = parenttype[1] parentfield = get_parent_field(doctype, parenttype) # allow limit rows to be uploaded check_data_length() make_column_map() webnotes.conn.begin() if not overwrite: overwrite = params.get('overwrite')