Beispiel #1
0
def render_non_html(encoding, querySet):
    '''Renders non-html formats and returns an appropriate HttpResponse'''

    if encoding == 'csv':
        vals = querySet.values()
        response = HttpResponse(mimetype='text/csv')
        response['Content-Disposition'] = \
                'attachment; filename=genesets%d.csv' % len(vals)
        csvW = DictWriter(response, GenesetFieldNames)
        fieldsDict = {}
        for k in GenesetFieldNames:
            fieldsDict[k] = k
        csvW.writerow(fieldsDict)
        csvW.writerows(vals)
    elif encoding == 'xml':
        response = HttpResponse(mimetype='text/xml')
        response['Content-Disposition'] = \
                'attachment; filename=genesets%d.xml' % len(querySet)
        serializers.serialize("xml", querySet, stream=response)
    elif encoding == "json":
        response = HttpResponse(mimetype='application/json')
        response['Content-Disposition'] = \
                'attachment; filename=genesets%d.js' % len(querySet)
        serializers.serialize("json", querySet, stream=response)

    return response
Beispiel #2
0
def main():
    prs = argparse.ArgumentParser()

    prs.add_argument('--count', type=int, default=100)

    prs.add_argument('file', type=file)

    args = prs.parse_args()

    count = args.count
    assert count > 0
    path = os.path.abspath(args.file.name)
    root, ext = os.path.splitext(path)
    new_path = '%s_trimmed_%s%s' % (root, count, ext)

    reader = DictReader(open(path))
    new_entries = []
    for i in range(count):
        new_entries.append(next(reader))

    with open(new_path, 'w') as new_file:
        writer = DictWriter(new_file, reader.unicode_fieldnames)
        writer.writeheader()
        writer.writerows(new_entries)

    print open(new_path).read()
Beispiel #3
0
def from_files():
    basedir = "/Users/rikhoekstra/surfdrive/Shared/Documents/NIOD2017/International_MIgration"
    toread = [fl for fl in os.listdir(basedir)]
    result = []

    for fl in toread:
        infl = open(os.path.join(basedir, fl), 'rU')
        txt = infl.read()
        recs = txt.split("\n\n")[1:]
        for r in recs:
            rec = r.split('\n')
            res = {}
            for l in rec:
                item = l.split(' - ')
                #            print item
                #            for item in splitted:
                #           import pdb; pdb.set_trace()
                if len(item) > 1 and item[0].strip() in [
                        'AU', 'TI', 'PY', 'JO'
                ]:
                    res[item[0].strip()] = item[1].strip()
            result.append(res)

    flout = open('wileyrecs.csv', 'w')

    w = DictWriter(flout, ['AU', 'TI', 'PY', 'JO'])
    w.writeheader()
    w.writerows(result)
    flout.close()
    print('written: ', flout.name)
    return result
Beispiel #4
0
    def edr_export(self, request):
        data = []

        for rec_id in request.POST.getlist("iswear"):
            meta_id = request.POST.get("company_%s_id" % rec_id)
            res = EDRPOU.get(id=meta_id)
            if res:
                rec = res.to_dict()

                if isinstance(rec.get("founders"), list):
                    rec["founders"] = ";;;".join(rec["founders"])
                data.append(rec)

        if not data:
            self.message_user(request, "Нічого експортувати")
            return redirect(reverse("admin:edr_search"))

        fp = StringIO()
        w = DictWriter(fp, fieldnames=data[0].keys())
        w.writeheader()
        w.writerows(data)
        payload = fp.getvalue()
        fp.close()

        response = HttpResponse(payload, content_type="text/csv")

        response[
            "Content-Disposition"] = "attachment; filename=edr_{:%Y%m%d_%H%M}.csv".format(
                datetime.datetime.now())

        response["Content-Length"] = len(response.content)

        return response
Beispiel #5
0
    def load_data(self, options):
        save_data = self.settings.get("__save_data__", False)
        if save_data:
            options['full_record'] = True
            try:
                os.makedirs("./saved_data")
                LOG.info("Saving data to %s.", os.path.abspath("./saved_data"))
            except OSError as exc:
                if exc.errno == errno.EEXIST and os.path.isdir("./saved_data"):
                    pass
                else:
                    raise

        if self.settings['protocol_version'] == '2':
            if self.settings['group_dn']:
                users = self.query_group(options)
            else:
                users = self.query_objects(options)
        else:
            if self.settings['group_dn']:
                users = self.query_group_paged(options)
            else:
                users = self.query_objects_paged(options)

        if save_data:
            data = []
            keys = set()
            for user in users:
                # Note: Not all user dicts contain all the fields. So, need to loop over
                #       all the users to make sure we don't miss any fields.
                keys.update(user.keys())
                data.append(user)

            used_keys = set(self.ldap_query_fields)
            unused_keys = set(keys) - used_keys
            if unused_keys:
                keys = sorted(used_keys) + ['unmapped ->'
                                            ] + sorted(unused_keys)
            else:
                keys = sorted(used_keys)

            with open('./saved_data/ldap.csv', 'w') as save_file:
                writer = DictUnicodeWriter(save_file, keys)
                writer.writeheader()
                writer.writerows(data)

            users = data

        for user in users:
            yield user
Beispiel #6
0
res = []
for p2c in Person2Company.objects.filter(to_company_id=63).prefetch_related("from_person"):
    for d in Declaration.objects.filter(nacp_declaration=True, person=p2c.from_person, confirmed="a").order_by("year"):
        res.append({
            "name": p2c.from_person.full_name,
            "year": d.year,
            "id": d.declaration_id.replace("nacp_", "", 1)
        })


 with open("/tmp/mp_decls.csv", "w") as fp:
     from unicodecsv import DictWriter
     w = DictWriter(fp, fieldnames=res[0].keys())
     w.writerows(res)
        if not isinstance(s2, dict):
            continue
        if s2.get("previous_firstname") or s2.get(
                "previous_lastname") or s2.get("previous_middlename"):
            changes.append({
                "person": d.person_id,
                "first_name": s2.get("firstname", ""),
                "patronymic": s2.get("middlename", ""),
                "last_name": s2.get("lastname", ""),
                "prev_first_name": s2.get("previous_firstname", ""),
                "prev_patronymic": s2.get("previous_middlename", ""),
                "prev_last_name": s2.get("previous_lastname", ""),
            })

    if step_1.get("previous_firstname") or step_1.get(
            "previous_lastname") or step_1.get("previous_middlename"):
        changes.append({
            "person": d.person_id,
            "first_name": d.first_name,
            "patronymic": d.patronymic,
            "last_name": d.last_name,
            "prev_first_name": step_1.get("previous_firstname", ""),
            "prev_patronymic": step_1.get("previous_middlename", ""),
            "prev_last_name": step_1.get("previous_lastname", ""),
        })

with open("/tmp/changed_names.csv", "w") as fp:
    w = DictWriter(fp, fieldnames=changes[0].keys())
    w.writeheader()
    w.writerows(changes)
Beispiel #8
0
                        for d in t.getchildren():
                            row[d.tag] = d.text
                    else:
                        row[t.tag] = t.text
        for k in row.keys():
            if k not in fieldnames:
                del row[k]
        rows.append(row)
    return rows


outrows = []
for fl in os.listdir(outdir):
    if os.path.splitext(fl)[1] == '.xml':
        infl = os.path.join(outdir, fl)
        doc = etree.parse(infl)
        root = doc.getroot()
        items = [item for item in root if item.tag == 'item']
        result = parsefl(items)
        outrows.extend(result)

fieldnames = ['n', 'page', 'from', 'to', 'd', 'm', 'y']

outfl = os.path.join(outdir, 'heinbrieven.csv')

out = open(outfl, 'w')

w = DictWriter(out, fieldnames)
w.writeheader()
w.writerows(outrows)
out.close()