コード例 #1
0
def s_ka_association_download(request):
    dbsession = DBSession()
    
    try:
        all = dbsession.query(square_keyarea_association).all()
    except DBAPIError:
        result = {'success': False, 'msg': 'Ошибка подключения к БД'}
    
    
    names = ['square_id', 'key_area_id']
    rows = [names, ]
    for row in all:
        data = []
        for name in names:
            data.append(try_encode(getattr(row, name)))
        rows.append(data)
        
    fname = tempfile.mktemp()
    try:
        file = open(fname, 'w')
        writer = csv.writer(file, delimiter = '\t')
        writer.writerows(rows)
        file.close()
        file = open(fname, 'r')
        data = file.read()
        resname = 'square_karea_association.csv'
    finally: # в любом случае удаляем файл
        os.remove(fname)

    dbsession.close()
    return Response(content_type="application/octet-stream", 
            content_disposition="attachment; filename=%s" % (resname, ), body=data)
コード例 #2
0
ファイル: squares.py プロジェクト: nextgis/nextgisbio
def s_ka_association_download(request):
    dbsession = DBSession()

    try:
        all = dbsession.query(square_keyarea_association).all()
    except DBAPIError:
        result = {'success': False, 'msg': 'Ошибка подключения к БД'}

    names = ['square_id', 'key_area_id']
    rows = [
        names,
    ]
    for row in all:
        data = []
        for name in names:
            data.append(try_encode(getattr(row, name)))
        rows.append(data)

    fname = tempfile.mktemp()
    try:
        file = open(fname, 'w')
        writer = csv.writer(file, delimiter='\t')
        writer.writerows(rows)
        file.close()
        file = open(fname, 'r')
        data = file.read()
        resname = 'square_karea_association.csv'
    finally:  # в любом случае удаляем файл
        os.remove(fname)

    dbsession.close()
    return Response(content_type="application/octet-stream",
                    content_disposition="attachment; filename=%s" %
                    (resname, ),
                    body=data)
コード例 #3
0
def anns_download(request):
    format = id=request.matchdict['format']
    
    if not format in ['csv', 'shp']:
        return Response()
    
    try:
        #taxon_list -- список, аналогичный querystring в view.cards.points_text
        # (taxon_id1,taxon_id2)
        taxons = request.params['taxon_list']
        if taxons == 'root':
            taxon_list = None
        elif taxons != '':
            taxons = urllib.unquote(taxons)
            taxons = taxons.split(',')
            taxons = [t.split('_') for t in taxons]
            taxon_list = [id for (t,id) in taxons]
        else:
            taxon_list = None
    except KeyError:
        taxon_list = None
    
    anns = Annotation.as_join_list(taxon_list)
    
    if format == 'csv':
        fname = tempfile.mktemp()
        try:
            file = open(fname, 'w')
            writer = csv.writer(file, delimiter = '\t')
            
            # Сохраним в файл
            for ann in anns:
                x = [try_encode(v) for v in ann]
                writer.writerow(x)
            file.close()
            # Наверное, стоит архивировать перед передачей. Тут без архивации.
            file = open(fname, 'r')
            data = file.read()
            resname = 'annotations.csv'
        finally: # в любом случае удаляем файл
            os.remove(fname)
            
    elif format == 'shp':
        print 'Not implemented'

    return Response(content_type="application/octet-stream", 
            content_disposition="attachment; filename=%s" % (resname, ), body=data)
コード例 #4
0
def anns_download(request):
    format = id=request.matchdict['format']
    
    if not format in ['csv', 'shp']:
        return Response()
    
    try:
        #taxon_list -- список, аналогичный querystring в view.cards.points_text
        # (taxon_id1,taxon_id2)
        taxons = request.params['taxon_list']
        if taxons == 'root':
            taxon_list = None
        elif taxons != '':
            taxons = urllib.unquote(taxons)
            taxons = taxons.split(',')
            taxons = [t.split('_') for t in taxons]
            taxon_list = [id for (t,id) in taxons]
        else:
            taxon_list = None
    except KeyError:
        taxon_list = None
    
    anns = Annotation.as_join_list(taxon_list)
    
    if format == 'csv':
        fname = tempfile.mktemp()
        try:
            file = open(fname, 'w')
            writer = csv.writer(file, delimiter = '\t')
            
            # Сохраним в файл
            for ann in anns:
                x = [try_encode(v) for v in ann]
                writer.writerow(x)
            file.close()
            # Наверное, стоит архивировать перед передачей. Тут без архивации.
            file = open(fname, 'r')
            data = file.read()
            resname = 'annotations.csv'
        finally: # в любом случае удаляем файл
            os.remove(fname)
            
    elif format == 'shp':
        print 'Not implemented'

    return Response(content_type="application/octet-stream", 
            content_disposition="attachment; filename=%s" % (resname, ), body=data)
コード例 #5
0
ファイル: __init__.py プロジェクト: nextgis/nextgisbio
def table_download(request):
    dbsession = DBSession()
    modelname = request.matchdict['table']
    try:
        model = table_by_name(modelname)
    except KeyError:
        return {
            'success': False,
            'msg': 'Ошибка: отсутствует таблица с указанным именем'
        }

    try:
        all = dbsession.query(model).all()
        dbsession.close()
    except DBAPIError:
        result = {'success': False, 'msg': 'Ошибка подключения к БД'}

    names = []
    for c in model.__table__.columns:
        names.append(c.name)
    rows = [
        names,
    ]
    for row in all:
        data = []
        for name in names:
            data.append(try_encode(getattr(row, name)))
        rows.append(data)

    fname = tempfile.mktemp()
    try:
        file = open(fname, 'w')
        writer = csv.writer(file, delimiter='\t')
        writer.writerows(rows)
        file.close()
        file = open(fname, 'r')
        data = file.read()
        resname = modelname + '.csv'
    finally:  # в любом случае удаляем файл
        os.remove(fname)

    return Response(content_type="application/octet-stream",
                    content_disposition="attachment; filename=%s" %
                    (resname, ),
                    body=data)
コード例 #6
0
ファイル: __init__.py プロジェクト: nextgis/nextgisbio
def table_download(request):
    dbsession = DBSession()
    modelname = request.matchdict['table']
    try:
        model = table_by_name(modelname)
    except KeyError:
        return {'success': False, 'msg': 'Ошибка: отсутствует таблица с указанным именем'}
    
    try:
        all = dbsession.query(model).all()
        dbsession.close()
    except DBAPIError:
        result = {'success': False, 'msg': 'Ошибка подключения к БД'}
    
    
    names = []
    for c in model.__table__.columns:
        names.append(c.name)
    rows = [names, ]
    for row in all:
        data = []
        for name in names:
            data.append(try_encode(getattr(row, name)))
        rows.append(data)
        
    fname = tempfile.mktemp()
    try:
        file = open(fname, 'w')
        writer = csv.writer(file, delimiter = '\t')
        writer.writerows(rows)
        file.close()
        file = open(fname, 'r')
        data = file.read()
        resname = modelname + '.csv'
    finally: # в любом случае удаляем файл
        os.remove(fname)
        
    return Response(content_type="application/octet-stream", 
            content_disposition="attachment; filename=%s" % (resname, ), body=data)
コード例 #7
0
ファイル: cards.py プロジェクト: nextgis/nextgisbio
def cards_download(request):
    format = request.matchdict['format']

    if not format in ['csv', 'shp']:
        return Response()

    try:
        # taxon_list -- список, аналогичный querystring в points_text
        # (taxon_id1,taxon_id2)
        taxons = request.params['taxon_list']
        if taxons != '':
            taxons = urllib.unquote(taxons)
            taxons = taxons.split(',')
            taxons = [t.split('_') for t in taxons]
            taxon_list = [id for (t, id) in taxons]
            if any('root' in s for s in taxon_list):
                taxon_list = None
        else:
            taxon_list = None
    except KeyError:
        taxon_list = None

    cards = Cards.as_join_list(taxon_list)

    if format == 'csv':
        fname = tempfile.mktemp()
        try:
            file = open(fname, 'w')
            writer = csv.writer(file, delimiter='\t')

            # Сохраним в файл
            for card in cards:
                x = [try_encode(v) for v in card]
                writer.writerow(x)
            file.close()
            # Наверное, стоит архивировать перед передачей. Тут без архивации.
            file = open(fname, 'r')
            data = file.read()
            resname = 'cards.csv'
        finally:  # в любом случае удаляем файл
            os.remove(fname)

    elif format == 'shp':
        workdir = tempfile.mkdtemp()
        try:
            driver = ogr.GetDriverByName('ESRI Shapefile')
            sr = osr.SpatialReference()
            sr.ImportFromProj4("+init=epsg:4326")

            ds = driver.CreateDataSource(workdir)
            lyr = ds.CreateLayer('point_out', sr, ogr.wkbPoint)

            # Создадим поля в dbf, при этом 
            # Обрежем имена полей на 10-ти символах для сохранения в dbf
            fieldnames = [name[:10] for name in cards[0]]
            fieldsize = 254
            for name in fieldnames:
                field_defn = ogr.FieldDefn(name, ogr.OFTString)
                field_defn.SetWidth(fieldsize)
                if lyr.CreateField(field_defn) != 0:
                    print "Creating Name field failed.\n"

            # Заполним данными
            lon_idx, lat_idx = 36, 37  # номера полей lat,lon в cards
            for row in cards[1:]:  # пропустили загловки
                row = [try_encode(v, 'cp1251') for v in row]
                x = row[lon_idx]
                y = row[lat_idx]
                if x and y:
                    x = float(row[lon_idx])
                    y = float(row[lat_idx])
                    feat = ogr.Feature(lyr.GetLayerDefn())
                    for i, name in enumerate(fieldnames):
                        if row[i]:
                            feat.SetField(name, row[i])
                    pt = ogr.Geometry(ogr.wkbPoint)
                    pt.SetPoint_2D(0, x, y)
                    feat.SetGeometry(pt)
                    if lyr.CreateFeature(feat) != 0:
                        print "Failed to create feature in shapefile.\n"
                    feat.Destroy()
            ds = None

            zipfd = tempfile.NamedTemporaryFile(delete=False, suffix='.zip', prefix='')
            zipa = zipfile.ZipFile(zipfd, 'w')
            for dirname, dirnames, filenames in os.walk(workdir):
                for filename in filenames:
                    zipa.write(os.path.join(dirname, filename),
                               os.path.join(dirname, filename).replace(workdir + os.sep, ''), zipfile.ZIP_DEFLATED)

            zipa.close()
            file = open(zipa.filename, 'r')
            data = file.read()
            resname = 'cards.zip'
        finally:
            # в любом случае подчищаем папку с собранными данными
            shutil.rmtree(workdir)

    return Response(content_type="application/octet-stream",
                    content_disposition="attachment; filename=%s" % (resname,), body=data)