Пример #1
0
 def test_unicode_csv_dict_reader(self):
     #@todo cleanup
     fp = tempfile.NamedTemporaryFile(delete=True)
     fp.write((
         'abc,xyz\n' +
         # \x9d is some control char - will be ignored??
         'blah\x9d,\x93windows quotes\x94\n'))
     fp.seek(0)
     rows = list(unicode_csv_dict_reader(fp))
     # cp1252 quotes get translated to unicode
     self.assertEqual(u'\u201cwindows quotes\u201d', rows[0]['xyz'])
     fp = tempfile.NamedTemporaryFile(delete=True)
     fp.write((u'abc,xyz\n' +
               u'blah,\u201cunicode quotes\u201d\n').encode('utf-8'))
     fp.seek(0)
     rows = list(unicode_csv_dict_reader(fp))
     self.assertEqual(u'\u201cunicode quotes\u201d', rows[0]['xyz'])
Пример #2
0
 def test_unicode_csv_dict_reader(self):
     #@todo cleanup
     fp = tempfile.NamedTemporaryFile(delete=True)
     fp.write((
         'abc,xyz\n' +
         # \x9d is some control char - will be ignored??
         'blah\x9d,\x93windows quotes\x94\n'
     ))
     fp.seek(0)
     rows = list(unicode_csv_dict_reader(fp))
     # cp1252 quotes get translated to unicode
     self.assertEqual(u'\u201cwindows quotes\u201d', rows[0]['xyz'])
     fp = tempfile.NamedTemporaryFile(delete=True)
     fp.write((
         u'abc,xyz\n' +
         u'blah,\u201cunicode quotes\u201d\n'
     ).encode('utf-8'))
     fp.seek(0)
     rows = list(unicode_csv_dict_reader(fp))
     self.assertEqual(u'\u201cunicode quotes\u201d', rows[0]['xyz'])
Пример #3
0
    def test_csv_upload(self):
        '''test csv upload with update and insert'''

        #@todo cleanup and break out into simpler cases

        self.make_annotations(self.dummy, 2)

        header = u"id,title,content,lat,lon,start_time,end_time,appearance\n"

        # first row is insert, second update (as it has an id)
        fp = tempfile.NamedTemporaryFile(delete=True)
        fp.write((
            header +
            u'"",foo bar,blah,5,10,2001/01/01,2005\n'
            u"1,bar foo,halb,10,20,2010-01-01,,\n"
            u"2,bunk,\u201c,20,30,,,"
        ).encode('utf-8'))
        fp.seek(0)
        # verify failure before login
        resp = self.c.post(reverse('annotations',args=[self.dummy.id]),{'csv':fp})
        self.assertEqual(403, resp.status_code)
        # still only 2 annotations
        self.assertEqual(2, Annotation.objects.filter(map=self.dummy.id).count())

        # login, rewind the buffer and verify
        self.c.login(username='******',password='******')
        fp.seek(0)
        resp = self.c.post(reverse('annotations',args=[self.dummy.id]),{'csv':fp})
        # response type must be text/html for ext fileupload
        self.assertEqual('text/html', resp['content-type'])
        jsresp = json.loads(resp.content)
        self.assertEqual(True, jsresp['success'])
        ann = Annotation.objects.filter(map=self.dummy.id)
        # we uploaded 3, the other 2 should be deleted (overwrite mode)
        self.assertEqual(3, ann.count())
        ann = Annotation.objects.get(title='bar foo')
        self.assertEqual(ann.the_geom.x, 20.)
        ann = Annotation.objects.get(title='bunk')
        self.assertTrue(u'\u201c', ann.content)
        ann = Annotation.objects.get(title='foo bar')
        self.assertEqual('foo bar', ann.title)
        self.assertEqual(ann.the_geom.x, 10.)

        resp = self.c.get(reverse('annotations',args=[self.dummy.id]) + "?csv")
        x = list(unicode_csv_dict_reader(resp.content))
        self.assertEqual(3, len(x))
        by_title = dict( [(v['title'],v) for v in x] )
        # verify round trip of unicode quote
        self.assertEqual(u'\u201c', by_title['bunk']['content'])
        # and times
        self.assertEqual('2010-01-01T00:00:00', by_title['bar foo']['start_time'])
        self.assertEqual('2001-01-01T00:00:00', by_title['foo bar']['start_time'])
        self.assertEqual('2005-01-01T00:00:00', by_title['foo bar']['end_time'])

        # verify windows codepage quotes
        fp = tempfile.NamedTemporaryFile(delete=True)
        fp.write((
            str(header) +
            ',\x93windows quotes\x94,yay,,,,'
        ))
        fp.seek(0)
        resp = self.c.post(reverse('annotations',args=[self.dummy.id]),{'csv':fp})
        ann = Annotation.objects.get(map=self.dummy.id)
        # windows quotes are unicode now
        self.assertEqual(u'\u201cwindows quotes\u201d', ann.title)

        # make sure a bad upload aborts the transaction (and prevents dropping existing)
        fp = tempfile.NamedTemporaryFile(delete=True)
        fp.write((
            str(header) * 2
        ))
        fp.seek(0)
        resp = self.c.post(reverse('annotations',args=[self.dummy.id]),{'csv':fp})
        self.assertEqual(400, resp.status_code)
        # there should only be one that we uploaded before
        Annotation.objects.get(map=self.dummy.id)
        self.assertEqual('yay', ann.content)
Пример #4
0
def annotations(req, mapid):
    '''management of annotations for a given mapid'''
    #todo cleanup and break apart
    if req.method == 'GET':
        cols = [
            'title', 'content', 'start_time', 'end_time', 'in_map',
            'in_timeline', 'appearance'
        ]

        mapobj = _resolve_object(req,
                                 models.Map,
                                 'maps.view_map',
                                 allow_owner=True,
                                 id=mapid)
        ann = models.Annotation.objects.filter(map=mapid)
        ann = ann.order_by('start_time', 'end_time')
        if bool(req.GET.get('in_map', False)):
            ann = ann.filter(in_map=True)
        if bool(req.GET.get('in_timeline', False)):
            ann = ann.filter(in_timeline=True)
        if 'page' in req.GET:
            page = int(req.GET['page'])
            page_size = 25
            start = page * page_size
            end = start + page_size
            ann = ann[start:end]
        if 'csv' in req.GET:
            response = HttpResponse(mimetype='text/csv')
            response[
                'Content-Disposition'] = 'attachment; filename=map-%s-annotations.csv' % mapobj.id
            response['Content-Encoding'] = 'utf-8'
            writer = csv.writer(response)
            writer.writerow(cols)
            sidx = cols.index('start_time')
            eidx = cols.index('end_time')
            # default csv writer chokes on unicode
            encode = lambda v: v.encode('utf-8') if isinstance(v, basestring
                                                               ) else str(v)
            get_value = lambda a, c: getattr(a, c) if c not in (
                'start_time', 'end_time') else ''
            for a in ann:
                vals = [encode(get_value(a, c)) for c in cols]
                vals[sidx] = a.start_time_str
                vals[eidx] = a.end_time_str
                writer.writerow(vals)
            return response
        # strip the superfluous id, it will be added at the feature level
        props = [c for c in cols if c != 'id']

        def encoder(query_set):
            results = []
            for res in query_set:
                feature = {'id': res.id}
                if res.the_geom:
                    geometry = {}
                    geometry['type'] = res.the_geom.geom_type
                    geometry['coordinates'] = res.the_geom.coords
                    feature['geometry'] = geometry
                fp = feature['properties'] = {}
                for p in props:
                    val = getattr(res, p)
                    if val is not None:
                        fp[p] = val
                results.append(feature)
            return results

        return json_response({
            'type': 'FeatureCollection',
            'features': encoder(ann)
        })

    if req.method == 'POST':
        mapobj = _resolve_object(req,
                                 models.Map,
                                 'maps.change_map',
                                 allow_owner=True,
                                 id=mapid)
        # either a bulk upload or a JSON change
        action = 'upsert'
        get_props = lambda r: r
        finish = lambda: None
        created = []
        form_mode = 'client'
        content_type = None
        overwrite = False
        error_format = None

        def id_collector(form):
            created.append(form.instance.id)

        if req.FILES:
            fp = iter(req.FILES.values()).next()
            # ugh, builtin csv reader chokes on unicode
            data = unicode_csv_dict_reader(fp)
            id_collector = lambda f: None
            form_mode = 'csv'
            content_type = 'text/html'
            ids = list(
                models.Annotation.objects.filter(map=mapobj).values_list(
                    'id', flat=True))
            # delete existing, we overwrite
            finish = lambda: models.Annotation.objects.filter(id__in=ids
                                                              ).delete()
            overwrite = True

            def error_format(row_errors):
                response = []
                for re in row_errors:
                    row = re[0] + 1
                    for e in re[1]:
                        response.append('[%s] %s : %s' % (row, e, re[1][e]))
                return 'The following rows had problems:<ul><li>' + '</li><li>'.join(
                    response) + "</li></ul>"
        else:
            data = json.loads(req.body)
            if isinstance(data, dict):
                action = data.get('action', action)
            if 'features' in data:
                data = data.get('features')
                get_props = lambda r: r['properties']

        if action == 'delete':
            models.Annotation.objects.filter(pk__in=data['ids'],
                                             map=mapobj).delete()
            return json_response({'success': True})

        errors = []
        i = None
        for i, r in enumerate(data):
            props = get_props(r)
            props['map'] = mapobj.id
            ann = None
            id = r.get('id', None)
            if id and not overwrite:
                ann = models.Annotation.objects.get(map=mapobj, pk=id)

            # form expects everything in the props, copy geometry in
            if 'geometry' in r:
                props['geometry'] = r['geometry']
            props.pop('id', None)
            form = AnnotationForm(props, instance=ann, form_mode=form_mode)
            if not form.is_valid():
                errors.append((i, form.errors))
            else:
                form.save()
            if id is None:
                id_collector(form)
        if i is None:
            errors = [(0, 'No data could be read')]
        if errors:
            body = None
            if error_format:
                return HttpResponse(error_format(errors), status=400)
        else:
            finish()
            body = {'success': True}
            if created:
                body['ids'] = created
        return json_response(body=body,
                             errors=errors,
                             content_type=content_type)

    return HttpResponse(status=400)
Пример #5
0
def annotations(req, mapid):
    '''management of annotations for a given mapid'''
    #todo cleanup and break apart
    if req.method == 'GET':
        cols = [ f.name for f in models.Annotation._meta.fields if f.name not in ('map','the_geom') ]

        mapobj = _resolve_object(req, models.Map, 'maps.view_map',
                                 allow_owner=True, id=mapid)
        ann = models.Annotation.objects.filter(map=mapid)
        if bool(req.GET.get('in_map', False)):
            ann = ann.filter(in_map=True)
        if bool(req.GET.get('in_timeline', False)):
            ann = ann.filter(in_timeline=True)
        if 'page' in req.GET:
            page = int(req.GET['page'])
            page_size = 25
            start = page * page_size
            end = start + page_size
            ann = ann[start:end]
        if 'csv' in req.GET:
            response = HttpResponse(mimetype='text/csv')
            response['Content-Disposition'] = 'attachment; filename=map-%s-annotations.csv' % mapobj.id
            response['Content-Encoding'] = 'utf-8'
            writer = csv.writer(response)
            cols.remove('id')
            writer.writerow(cols)
            sidx = cols.index('start_time')
            eidx = cols.index('end_time')
            # default csv writer chokes on unicode
            encode = lambda v: v.encode('utf-8') if isinstance(v, basestring) else v
            for a in ann:
                vals = [ encode(getattr(a, c)) for c in cols if c not in ('start_time','end_time')]
                vals[sidx] = a.start_time_str
                vals[eidx] = a.end_time_str
                writer.writerow(vals)
            return response
        # strip the superfluous id, it will be added at the feature level
        props = [ c for c in cols if c != 'id' ]
        def encoder(query_set):
            results = []
            for res in query_set:
                feature = { 'id' : res.id}
                if res.the_geom:
                    geometry = {}
                    geometry['type'] = res.the_geom.geom_type
                    geometry['coordinates'] = res.the_geom.coords
                    feature['geometry'] = geometry
                fp = feature['properties'] = {}
                for p in props:
                    val = getattr(res, p)
                    if val:
                        fp[p] = val
                results.append(feature)
            return results

        return json_response({'type':'FeatureCollection','features':encoder(ann)})

    if req.method == 'POST':
        mapobj = _resolve_object(req, models.Map, 'maps.change_map',
                                 allow_owner=True, id=mapid)
        # either a bulk upload or a JSON change
        action = 'upsert'
        get_props = lambda r: r
        finish = lambda: None
        created = []
        form_mode = 'client'
        content_type = None
        overwrite = False
        error_format = None

        def id_collector(form):
            created.append(form.instance.id)

        if req.FILES:
            fp = iter(req.FILES.values()).next()
            # ugh, builtin csv reader chokes on unicode
            data = unicode_csv_dict_reader(fp)
            id_collector = lambda f: None
            form_mode = 'csv'
            content_type = 'text/html'
            ids = list(models.Annotation.objects.filter(map=mapobj).values_list('id', flat=True))
            # delete existing, we overwrite
            finish = lambda: models.Annotation.objects.filter(id__in=ids).delete()
            overwrite = True
            def error_format(row_errors):
                response = []
                for re in row_errors:
                    row = re[0] + 1
                    for e in re[1]:
                        response.append('[%s] %s : %s' % (row, e, re[1][e]))
                return 'The following rows had problems:<ul><li>' + '</li><li>'.join(response) + "</li></ul>"
        else:
            data = json.loads(req.body)
            if isinstance(data, dict):
                action = data.get('action', action)
            if 'features' in data:
                data = data.get('features')
                get_props = lambda r: r['properties']

        if action == 'delete':
            models.Annotation.objects.filter(pk__in=data['ids'], map=mapobj).delete()
            return json_response({'success' : True})

        errors = []
        i = None
        for i,r in enumerate(data):
            props = get_props(r)
            props['map'] = mapobj.id
            ann = None
            id = r.get('id', None)
            if id and not overwrite:
                ann = models.Annotation.objects.get(map=mapobj, pk=id)

            # form expects everything in the props, copy geometry in
            if 'geometry' in r:
                props['geometry'] = r['geometry']
            props.pop('id', None)
            form = AnnotationForm(props, instance=ann, form_mode=form_mode)
            if not form.is_valid():
                errors.append((i, form.errors))
            else:
                form.save()
            if id is None:
                id_collector(form)
        if i is None:
            errors = [ (0, 'No data could be read')]
        if errors:
            body = None
            if error_format:
                return HttpResponse(error_format(errors), status=400)
        else:
            finish()
            body = {'success' : True}
            if created:
                body['ids'] = created
        return json_response(body=body, errors=errors, content_type=content_type)

    return HttpResponse(status=400)
Пример #6
0
    def test_csv_upload(self):
        '''test csv upload with update and insert'''

        #@todo cleanup and break out into simpler cases

        self.make_annotations(self.dummy, 2)

        header = u"id,title,content,lat,lon,start_time,end_time,appearance\n"

        # first row is insert, second update (as it has an id)
        fp = tempfile.NamedTemporaryFile(delete=True)
        fp.write((header + u'"",foo bar,blah,5,10,2001/01/01,2005\n'
                  u"1,bar foo,halb,10,20,2010-01-01,,\n"
                  u"2,bunk,\u201c,20,30,,,").encode('utf-8'))
        fp.seek(0)
        # verify failure before login
        resp = self.c.post(reverse('annotations', args=[self.dummy.id]),
                           {'csv': fp})
        self.assertEqual(403, resp.status_code)
        # still only 2 annotations
        self.assertEqual(2,
                         Annotation.objects.filter(map=self.dummy.id).count())

        # login, rewind the buffer and verify
        self.c.login(username='******', password='******')
        fp.seek(0)
        resp = self.c.post(reverse('annotations', args=[self.dummy.id]),
                           {'csv': fp})
        # response type must be text/html for ext fileupload
        self.assertEqual('text/html', resp['content-type'])
        jsresp = json.loads(resp.content)
        self.assertEqual(True, jsresp['success'])
        ann = Annotation.objects.filter(map=self.dummy.id)
        # we uploaded 3, the other 2 should be deleted (overwrite mode)
        self.assertEqual(3, ann.count())
        ann = Annotation.objects.get(title='bar foo')
        self.assertEqual(ann.the_geom.x, 20.)
        ann = Annotation.objects.get(title='bunk')
        self.assertTrue(u'\u201c', ann.content)
        ann = Annotation.objects.get(title='foo bar')
        self.assertEqual('foo bar', ann.title)
        self.assertEqual(ann.the_geom.x, 10.)

        resp = self.c.get(
            reverse('annotations', args=[self.dummy.id]) + "?csv")
        x = list(unicode_csv_dict_reader(resp.content))
        self.assertEqual(3, len(x))
        by_title = dict([(v['title'], v) for v in x])
        # verify round trip of unicode quote
        self.assertEqual(u'\u201c', by_title['bunk']['content'])
        # and times
        self.assertEqual('2010-01-01T00:00:00',
                         by_title['bar foo']['start_time'])
        self.assertEqual('2001-01-01T00:00:00',
                         by_title['foo bar']['start_time'])
        self.assertEqual('2005-01-01T00:00:00',
                         by_title['foo bar']['end_time'])

        # verify windows codepage quotes
        fp = tempfile.NamedTemporaryFile(delete=True)
        fp.write((str(header) + ',\x93windows quotes\x94,yay,,,,'))
        fp.seek(0)
        resp = self.c.post(reverse('annotations', args=[self.dummy.id]),
                           {'csv': fp})
        ann = Annotation.objects.get(map=self.dummy.id)
        # windows quotes are unicode now
        self.assertEqual(u'\u201cwindows quotes\u201d', ann.title)

        # make sure a bad upload aborts the transaction (and prevents dropping existing)
        fp = tempfile.NamedTemporaryFile(delete=True)
        fp.write((str(header) * 2))
        fp.seek(0)
        resp = self.c.post(reverse('annotations', args=[self.dummy.id]),
                           {'csv': fp})
        self.assertEqual(400, resp.status_code)
        # there should only be one that we uploaded before
        Annotation.objects.get(map=self.dummy.id)
        self.assertEqual('yay', ann.content)