def extend_entry(entry, dataset): entry['dataset'] = dataset.name entry['dataset.id'] = dataset.id entry = flatten(entry) entry['_id'] = dataset.name + '::' + unicode(entry['id']) for k, v in entry.items(): # this is similar to json encoding, but not the same. if isinstance(v, datetime.datetime) and not v.tzinfo: entry[k] = datetime.datetime(v.year, v.month, v.day, v.hour, v.minute, v.second, tzinfo=tzutc()) elif '.' in k and isinstance(v, (list, tuple)): entry[k] = " ".join([unicode(vi) for vi in v]) else: entry[k] = safe_unicode(entry[k]) if k.endswith(".name"): vk = k[:len(k) - len(".name")] entry[vk] = v if k.endswith(".label"): entry[k + "_str"] = entry[k] entry[k + "_facet"] = entry[k] for item in PluginImplementations(ISolrSearch): entry = item.update_index(entry) return entry
def extend_entry(entry, dataset): entry['dataset'] = dataset.name entry['dataset.id'] = dataset.id entry = flatten(entry) entry['_id'] = dataset.name + '::' + unicode(entry['id']) for k, v in entry.items(): if k.endswith(".taxonomy") or k.endswith('.color'): continue # this is similar to json encoding, but not the same. if isinstance(v, datetime.datetime) and not v.tzinfo: entry[k] = datetime.datetime(v.year, v.month, v.day, v.hour, v.minute, v.second, tzinfo=UTC()) elif '.' in k and isinstance(v, (list, tuple)): entry[k] = " ".join([unicode(vi) for vi in v]) else: entry[k] = _safe_unicode(entry[k]) if k.endswith(".name"): vk = k[:len(k) - len(".name")] entry[vk] = v if k.endswith(".label"): entry[k + "_facet"] = entry[k] return entry
def generate_csv_row(entry, generate_headers=True): row = {} for k, v in flatten(entry).items(): if isinstance(v, (list, tuple, dict)): continue elif isinstance(v, datetime): v = v.isoformat() elif isinstance(v, float): v = u'%.2f' % v row[unicode(k).encode('utf8')] = unicode(v).encode('utf8') fields = sorted(row.keys()) sio = StringIO() writer = csv.DictWriter(sio, fields) if generate_headers: header = dict(zip(fields, fields)) writer.writerow(header) writer.writerow(row) return sio.getvalue()
def extend_entry(entry, dataset): entry['dataset'] = dataset.name entry['dataset.id'] = dataset.id entry = flatten(entry) entry['_id'] = dataset.name + '::' + unicode(entry['id']) for k, v in entry.items(): # this is similar to json encoding, but not the same. if isinstance(v, datetime.datetime) and not v.tzinfo: entry[k] = datetime.datetime(v.year, v.month, v.day, v.hour, v.minute, v.second, tzinfo=UTC()) elif '.' in k and isinstance(v, (list, tuple)): entry[k] = " ".join([unicode(vi) for vi in v]) else: entry[k] = _safe_unicode(entry[k]) if k.endswith(".name"): vk = k[:len(k)-len(".name")] entry[vk] = v if k.endswith(".label"): #entry[k + "_str"] = entry[k] entry[k + "_facet"] = entry[k] return entry
def generate_csv(entries): generate_headers = True for entry in entries: row = {} for k, v in flatten(entry).items(): if isinstance(v, (list, tuple, dict)): continue elif isinstance(v, datetime): v = v.isoformat() row[unicode(k).encode('utf8')] = unicode(v).encode('utf8') fields = sorted(row.keys()) sio = StringIO() writer = csv.DictWriter(sio, fields) if generate_headers: header = dict(zip(fields, fields)) writer.writerow(header) generate_headers = False writer.writerow(row) yield sio.getvalue()
def generate_csv(entries): generate_headers = True for entry in entries: row = {} for k, v in flatten(entry).items(): if isinstance(v, (list, tuple, dict)): continue elif isinstance(v, datetime): v = v.isoformat() row[unicode(k).encode('utf8')] = unicode(v).encode('utf8') fields = sorted(row.keys()) sio = StringIO() writer = csv.DictWriter(sio, fields) if generate_headers: header = dict(zip(fields, fields)) writer.writerow(header) generate_headers = False writer.writerow(row) yield sio.getvalue()