def redbook_filter(request): dbsession = DBSession() query_str = request.params['name'].encode('utf-8').decode('utf-8') start = int(request.params['start']) count = int(request.params['count']) try: query_str_upper = query_str.upper() aFilter = u"UPPER({0}) LIKE '%{1}%'".format('name', query_str_upper) order_by_clauses = [] order_by_clauses = dojo.parse_sort(request) red_books = dbsession.query(RedBook.id, RedBook.name)\ .filter(aFilter)\ .order_by(order_by_clauses)\ .all() itemsPage = red_books[start:start + count] except DBAPIError: return {'success': False, 'msg': 'Ошибка подключения к БД'} rows = [{'id': id, 'name': name} for id, name in itemsPage] dbsession.close() return { 'items': rows, 'success': True, 'numRows': len(itemsPage), 'identity': 'id' }
def get_child_taxons_by_parent(request): parent_taxon_id = request.params['id'] is_full_data = ('isFullData' in request.params ) and request.params['isFullData'] == 'true' is_root_node_requsted = parent_taxon_id == '#' if is_root_node_requsted: parent_taxon_id = None else: parent_taxon_id = int(parent_taxon_id) dbsession = DBSession() children_taxons = dbsession.query(Taxon).filter_by( parent_id=parent_taxon_id).order_by(Taxon.name).all() dbsession.close() children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_jsTree_item(taxon, is_full_data)) if is_root_node_requsted: result = _get_root_jsTree_item() result['children'] = children_taxons_json else: result = children_taxons_json return result
def s_ka_association_download(request): dbsession = DBSession() try: all = dbsession.query(square_keyarea_association).all() except DBAPIError: result = {'success': False, 'msg': 'Ошибка подключения к БД'} names = ['square_id', 'key_area_id'] rows = [ names, ] for row in all: data = [] for name in names: data.append(try_encode(getattr(row, name))) rows.append(data) fname = tempfile.mktemp() try: file = open(fname, 'w') writer = csv.writer(file, delimiter='\t') writer.writerows(rows) file.close() file = open(fname, 'r') data = file.read() resname = 'square_karea_association.csv' finally: # в любом случае удаляем файл os.remove(fname) dbsession.close() return Response(content_type="application/octet-stream", content_disposition="attachment; filename=%s" % (resname, ), body=data)
def _get_squares_by_taxonlist(taxons, geomtype='geojson'): ''' Выбор квадратов из БД, на которые приходятся анн.списки таксонов из taxons='taxon_id1,taxon_id2,...'. Вернуть по запросу геометрию каждого квадрата в соответствии с типом geomtype = ['geojson', 'wkt'] ''' assert geomtype in ['geojson', 'wkt'] dbsession = DBSession() if '#' in taxons: if geomtype == 'geojson': all = dbsession.query(Squares.id, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).all() else: all = dbsession.query(Squares.id, sqlalchemy.func.st_astext(Squares.geom.RAW)).all() else: # Выбираем ключевые участки, где встречен таксон, а по ним --- id квадратов, которые приходятся на эти участки: subquery = TAXON_ID_QUERY % (", ".join([ str(num) for num in taxons]), TAXON_TYPES[len(TAXON_TYPES)-1]) qs = """ SELECT DISTINCT square_id from square_karea_association WHERE square_karea_association.key_area_id in (SELECT DISTINCT key_area.id FROM annotation INNER JOIN key_area ON annotation.key_area = key_area.id""" + ' AND annotation.species IN (' + subquery +'));' k_set = dbsession.query(Squares.id).from_statement(qs).all() k_set = [k[0] for k in k_set] if geomtype == 'geojson': all = dbsession.query(Squares.id, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).filter(Squares.id.in_(k_set)).all() else: all = dbsession.query(Squares.id, sqlalchemy.func.st_astext(Squares.geom.RAW)).filter(Squares.id.in_(k_set)).all() dbsession.close() return all
def table_view(request): can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) user_id = authenticated_userid(request) dbsession = DBSession() card, user = None, None try: card = dbsession.query(Cards).filter_by(id=request.matchdict['id']).one() user = dbsession.query(User).filter_by(id=user_id).one() if can_i_edit else None result = card.as_json_dict() except NoResultFound: result = {'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено'} if not can_i_edit: # обнулим координаты перед показом result['lat'] = 0 result['lon'] = 0 if isinstance(has_permission('admin', request.context, request), ACLAllowed): is_editable = True else: is_editable = card.inserter == user.person_id if user else False dbsession.close() return {'data': result, 'editable': is_editable, 'success': True}
def taxon_filter(request): query_str = request.params['name'].encode('utf-8').decode('utf-8') start = int(request.params['start']) count = int(request.params['count']) # Нужно выдернуть номера id, названия таксонов и авторов (для синонимов) из таблиц таксонов и синонимов dbsession = DBSession() try: query_str_upper = query_str.upper() # ищем в таблице таксонов: aFilter = u"UPPER({0}) LIKE '%{1}%'".format('name', query_str_upper) tax_all = dbsession.query(Taxon.id, Taxon.name, Taxon.author).filter(aFilter).all() aFilter = u"UPPER({0}) LIKE '%{1}%'".format('russian_name', query_str_upper) rus_all = dbsession.query(Taxon.id, Taxon.russian_name, Taxon.author).filter(aFilter).all() # ищем в таблице синонимов: aFilter = u"UPPER({0}) LIKE '%{1}%'".format('synonym', query_str_upper) s_all = dbsession.query(Synonym.species_id, Synonym.synonym, Synonym.author).filter(aFilter).all() all = [tax_all + s_all + rus_all][0] itemsPage = all[start:start + count] dbsession.close() except DBAPIError: dbsession.close() return {'success': False, 'msg': 'Ошибка подключения к БД'} rows = [] if all: rec_id = itertools.count() rows = [{'recId': rec_id.next(), 'id': id, 'name': name, 'author': author} for id, name, author in itemsPage] return {'items': rows, 'success': True, 'numRows': len(all), 'identity': 'id'}
def update_related_items(table, db_relative_field, relative_field_name, old_id_from_csv, new_corrected_id): session = DBSession() new_person = session.query(Person)\ .filter(Person.id == new_corrected_id)\ .one() count_items = session.query(table)\ .outerjoin(Person, db_relative_field == Person.id)\ .filter(db_relative_field == old_id_from_csv)\ .count() items = session.query(table)\ .outerjoin(Person, db_relative_field == Person.id)\ .filter(db_relative_field == old_id_from_csv) if count_items > 0: with transaction.manager: save_session = DBSession() for item in items: save_session.query(table).filter_by(id=item.id).update( {relative_field_name: new_person.id}) transaction.commit() session.close()
def get_child_taxons_by_parent(request): parent_taxon_id = request.params['id'] is_full_data = ('isFullData' in request.params) and request.params['isFullData'] == 'true' is_root_node_requsted = parent_taxon_id == '#' if is_root_node_requsted: parent_taxon_id = None else: parent_taxon_id = int(parent_taxon_id) dbsession = DBSession() children_taxons = dbsession.query(Taxon).filter_by(parent_id=parent_taxon_id).order_by(Taxon.name).all() dbsession.close() children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_jsTree_item(taxon, is_full_data)) if is_root_node_requsted: result = _get_root_jsTree_item() result['children'] = children_taxons_json else: result = children_taxons_json return result
def s_ka_association_download(request): dbsession = DBSession() try: all = dbsession.query(square_keyarea_association).all() except DBAPIError: result = {'success': False, 'msg': 'Ошибка подключения к БД'} names = ['square_id', 'key_area_id'] rows = [names, ] for row in all: data = [] for name in names: data.append(try_encode(getattr(row, name))) rows.append(data) fname = tempfile.mktemp() try: file = open(fname, 'w') writer = csv.writer(file, delimiter = '\t') writer.writerows(rows) file.close() file = open(fname, 'r') data = file.read() resname = 'square_karea_association.csv' finally: # в любом случае удаляем файл os.remove(fname) dbsession.close() return Response(content_type="application/octet-stream", content_disposition="attachment; filename=%s" % (resname, ), body=data)
def table_view(request): can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) user_id = authenticated_userid(request) try: model = table_by_name(request.matchdict['table']) except KeyError: return {'success': False, 'msg': 'Ошибка: отсутствует таблица с указанным именем'} dbsession = DBSession() try: entity = dbsession.query(model).filter_by(id=request.matchdict['id']).one() user = dbsession.query(User).filter_by(id=user_id).one() if can_i_edit else None result = {'data': entity.as_json_dict(), 'success': True} except NoResultFound: result = {'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено'} if hasattr(entity, 'inserter'): if isinstance(has_permission('admin', request.context, request), ACLAllowed): is_editable = True else: is_editable = entity.inserter == user.person_id if user else False else: is_editable = True result['editable'] = is_editable dbsession.close() return result
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump dbsession = DBSession() redbook_species_db = dbsession.query(RedBook, RedBookSpecies, Taxon)\ .join(RedBookSpecies, RedBook.id == RedBookSpecies.red_book_id)\ .join(Taxon, RedBookSpecies.specie_id == Taxon.id)\ .order_by(RedBook.id, RedBookSpecies.specie_id)\ .all() dbsession.close() attribute_names = ['region', 'orig_name', 'lat_name', 'author', 'population', 'status', 'univ_status', 'year', 'bibl'] objects_for_dump = [ [ o[1].region, o[1].orig_name, o[2].name, o[1].author, o[1].population, o[1].status, o[1].univ_status, o[1].year, o[0].name ] for o in redbook_species_db ] dump(filename, attribute_names, objects_for_dump, is_array=True)
def redbook_filter(request): dbsession = DBSession() query_str = request.params['name'].encode('utf-8').decode('utf-8') start = int(request.params['start']) count = int(request.params['count']) try: query_str_upper = query_str.upper() aFilter = u"UPPER({0}) LIKE '%{1}%'".format('name', query_str_upper) order_by_clauses = [] order_by_clauses = dojo.parse_sort(request) red_books = dbsession.query(RedBook.id, RedBook.name)\ .filter(aFilter)\ .order_by(order_by_clauses)\ .all() itemsPage = red_books[start:start + count] except DBAPIError: return {'success': False, 'msg': 'Ошибка подключения к БД'} rows = [{'id': id, 'name': name} for id, name in itemsPage] dbsession.close() return {'items': rows, 'success': True, 'numRows': len(itemsPage), 'identity': 'id'}
def square(request): dbsession = DBSession() id = request.matchdict['id'] square = dbsession.query(Squares).filter_by(id=id).one() key_areas = [{'id': s.id, 'name': s.name} for s in square.key_areas] dbsession.close() return {'id': square.id, 'key_areas': key_areas }
def square(request): dbsession = DBSession() id = request.matchdict['id'] square = dbsession.query(Squares).filter_by(id=id).one() key_areas = [{'id': s.id, 'name': s.name} for s in square.key_areas] dbsession.close() return {'id': square.id, 'key_areas': key_areas}
def squares_text(request): dbsession = DBSession() all = dbsession.query(Squares, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).all() squares = [] for sq, geom in all: squares.append({'id': sq.id, 'geom': geom}) dbsession.close() return {'squares' : squares}
def points_text(request): # Есть querystring, содержащее строку вида 'nodes=taxon_id1,taxon_id2'). # Например, "nodes=taxon_1,taxon_5" # Это значит, что пользователь выбрал записи из таблицы taxon с id=1 и id=5. # Требуется вернуть карточки наблюдений соотв. таксонов # # Граничный случай, когда нужно выбрать все карточки: nodes="root_" dbsession = DBSession() try: taxons = request.params['nodes'] except KeyError: taxons = '' red_book_id = None if 'red_book' in request.params: red_book_id = int(request.params['red_book']) if red_book_id == -1: red_book_id = None can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) if taxons: taxons = urllib.unquote(taxons) taxon_id = taxons.split(',') if 'root' in taxons: cards = dbsession.query(Cards, Taxon).join(Taxon).all() else: # Получим список видов-потомков выбранных таксонов и связанных с ними карточек subquery = TAXON_ID_QUERY % (", ".join([str(num) for num in taxon_id]), TAXON_TYPES[len(TAXON_TYPES) - 1]) qs = """ SELECT cards.id,cards.species,cards.lat,cards.lon, taxon.name FROM cards INNER JOIN taxon ON cards.species = taxon.id %s WHERE """ % ( 'INNER JOIN red_books_species ON cards.species = red_books_species.specie_id' if red_book_id else '') \ + ((' red_books_species.red_book_id = ' + str(red_book_id) + ' AND ') if red_book_id else '') \ + ' cards.species IN (' + subquery + ');' cards = dbsession.query(Cards, Taxon).from_statement(qs).all() points = [] for card, taxon in cards: id, spec_id, lat, lon = card.id, card.species, card.lat, card.lon name = taxon.name if lat and lon: if not can_i_edit: # настоящие координаты показывать нельзя # сдвинем координаты перед показом примерно на 10 км в случайном направлении lat = lat + (random() - random()) / 7 lon = lon + (random() - random()) / 4 points.append({'lat': lat, 'lon': lon, 'name': name, 'card_id': id, 'spec_id': spec_id}) else: points = {} dbsession.close() return {'points': points}
def table_item_save(request): session = DBSession() session.expire_on_commit = False if ('person_id' in request.POST) and request.POST['person_id'].isdigit(): person_id = int(request.POST['person_id']) person = session.query(Person) \ .options(joinedload('user')) \ .filter(Person.id == person_id) \ .all()[0] user = person.user else: person = Person() user = User() session.add(user) person.user = user for attr in request.POST: table_name, field = attr.split('_') if field == 'id': continue if table_name == 'person': setattr(person, field, request.POST[attr]) if table_name == 'user': setattr(user, field, request.POST[attr]) if 'user_active' in request.POST and request.POST['user_active']: user.active = True else: user.active = False if 'user_password' in request.POST and request.POST['user_password']: user.password = User.password_hash(request.POST['user_password']) session.add(person) try: transaction.commit() except IntegrityError: transaction.abort() return { 'Result': 'Error', 'Message': u'Такой логин уже присутствует в системе' } person_json = person.as_json_dict('person_') user_json = user.as_json_dict('user_') item_json = person_json.copy() item_json.update(user_json) session.close() return { 'Result': 'OK', 'Record': item_json }
def anns_text(request): # Есть querystring, содержащее строку вида 'nodes=taxon_id1,taxon_id2'). # Например, "nodes=taxon_1,taxon_5" # Это значит, что пользователь выбрал записи из таблицы taxon с id=1 и id=5. # Требуется вернуть аннотированные списки соотв. таксонов # # Граничный случай, когда нужно выбрать все списки: nodes="root_" dbsession = DBSession() # Ключевые участки по квадрату: id = request.matchdict['id'] square = dbsession.query(Squares).filter_by(id=id).one() key_areas = [str(s.id) for s in square.key_areas] key_areas = ", ".join(key_areas) try: taxons_id = request.params['nodes'] except KeyError: taxons_id = '' can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) if taxons_id: taxons_id = urllib.unquote(taxons_id) taxons_id = taxons_id.split(',') if "root" in taxons_id: anns = dbsession.query(Annotation,Taxon).join(Taxon).all() qs = """ SELECT annotation.id,annotation.species, taxon.name FROM annotation INNER JOIN taxon ON annotation.species = taxon.id """ + ' AND annotation.key_area IN ( %s ) ;' % (key_areas, ) anns = dbsession.query(Annotation, Taxon).from_statement(qs).all() else: # Получим список видов-потомков выбранных таксонов и связанных с ними аннотаций из ключевых участков квадрата id subquery = TAXON_ID_QUERY % (", ".join([ str(num) for num in taxons_id]), TAXON_TYPES[len(TAXON_TYPES)-1]) qs = """ SELECT annotation.id,annotation.species, taxon.name FROM annotation INNER JOIN taxon ON annotation.species = taxon.id """ + ' AND annotation.key_area IN ( %s ) ' % (key_areas, ) + ' AND annotation.species IN (' + subquery +');' anns = dbsession.query(Annotation, Taxon).from_statement(qs).all() squares = [] for ann, taxon in anns: id, spec_id= ann.id, ann.species name = taxon.name squares.append({'name': name, 'ann_id': id, 'spec_id': spec_id}) else: points = {} dbsession.close() return {'data': squares}
def inforesources_name(request): dbsession = DBSession() numRows = 0 inforesources = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: inforesources = dbsession.query(Inforesources.id, Inforesources.filename)\ .filter(Inforesources.id == id).all() numRows = 1 except DBAPIError: success = False else: start, count = helpers.get_paging_params(request.params) parsed_filename = helpers.get_parsed_search_attr( request.params, 'filename') filter_conditions = [] if parsed_filename: filter_conditions.append( Inforesources.filename.ilike(parsed_filename)) try: if (start is not None) and (count is not None): inforesources = dbsession.query(Inforesources.id, Inforesources.filename) \ .filter(*filter_conditions) \ .order_by(Inforesources.filename) \ .slice(start, start + count) \ .all() numRows = dbsession.query(Inforesources) \ .filter(*filter_conditions) \ .count() else: inforesources = dbsession.query(Inforesources.id, Inforesources.filename) \ .filter(*filter_conditions) \ .order_by(Inforesources.filename) \ .all() numRows = len(inforesources) except DBAPIError: success = False inforesources_json = [] for (id, name) in inforesources: inforesources_json.append({'id': id, 'filename': name}) dbsession.close() return { 'items': inforesources_json, 'success': success, 'numRows': numRows, 'identifier': 'id' }
def anns_text(request): # Есть querystring, содержащее строку вида 'nodes=taxon_id1,taxon_id2'). # Например, "nodes=taxon_1,taxon_5" # Это значит, что пользователь выбрал записи из таблицы taxon с id=1 и id=5. # Требуется вернуть аннотированные списки соотв. таксонов # # Граничный случай, когда нужно выбрать все списки: nodes="root_" dbsession = DBSession() # Ключевые участки по квадрату: id = request.matchdict['id'] square = dbsession.query(Squares).filter_by(id=id).one() key_areas = [str(s.id) for s in square.key_areas] key_areas = ", ".join(key_areas) try: taxons_id = request.params['nodes'] except KeyError: taxons_id = '' can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) if taxons_id: taxons_id = urllib.unquote(taxons_id) taxons_id = taxons_id.split(',') if "root" in taxons_id: anns = dbsession.query(Annotation,Taxon).join(Taxon).all() qs = """ SELECT annotation.id,annotation.species, taxon.name FROM annotation INNER JOIN taxon ON annotation.species = taxon.id """ + ' AND annotation.key_area IN ( %s ) ;' % (key_areas, ) anns = dbsession.query(Annotation, Taxon).from_statement(qs).all() else: # Получим список видов-потомков выбранных таксонов и связанных с ними аннотаций из ключевых участков квадрата id subquery = TAXON_ID_QUERY % (", ".join([ str(num) for num in taxons_id]), TAXON_TYPES[len(TAXON_TYPES)-1]) qs = """ SELECT annotation.id,annotation.species, taxon.name FROM annotation INNER JOIN taxon ON annotation.species = taxon.id """ + ' AND annotation.key_area IN ( %s ) ' % (key_areas, ) + ' AND annotation.species IN (' + subquery +');' anns = dbsession.query(Annotation, Taxon).from_statement(qs).all() squares = [] for ann, taxon in anns: id, spec_id= ann.id, ann.species name = taxon.name squares.append({'name': name, 'ann_id': id, 'spec_id': spec_id}) else: points = {} dbsession.close() return {'data': squares}
def squares_text(request): dbsession = DBSession() all = dbsession.query(Squares, sqlalchemy.func.st_asgeojson( Squares.geom.RAW)).all() squares = [] for sq, geom in all: squares.append({'id': sq.id, 'geom': geom}) dbsession.close() return {'squares': squares}
def taxon_cbtree(request): path_name = 'path' if 'path' in request.params else 'basePath' hierarchical_path = request.params[path_name].replace('"', '') if hierarchical_path == '.': parent_id = None else: parent_id = int(str.split(str(hierarchical_path), '/')[-1]) dbsession = DBSession() parent_taxon = dbsession.query(Taxon).filter_by(id=parent_id).first() children_taxons = dbsession.query(Taxon).filter_by( parent_id=parent_id).order_by(Taxon.name).all() dbsession.close() if hierarchical_path == '.': block = { 'name': '.', 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [{ 'name': '.', 'id': -1, 'path': hierarchical_path, 'directory': True }] } else: block = { 'name': parent_taxon.name, 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [] } children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_node(hierarchical_path, taxon)) if hierarchical_path == '.': block['items'][0]['children'] = children_taxons_json else: block['items'] = children_taxons_json return block if block else children_taxons_json
def karea_ann(request): dbsession = DBSession() id = request.matchdict['id'] karea = dbsession.query(Key_area).filter_by(id=id).one() annotations = [] for ann in karea.annotations: annotations.append({'id': ann.id, 'name': ann.species_link.name, 'species': ann.species}) dbsession.close() return {'data': annotations}
def table_browse_jtable(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) sorting = request.GET[ 'jtSorting'] if 'jtSorting' in request.GET else 'id asc' rows_count = 0 items = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: items = session.query(table) \ .filter(table.id == id) \ .all() rows_count = 1 except DBAPIError: success = False else: start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request, table) try: if (start is not None) and (count is not None): items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .slice(start, start+count) \ .all() rows_count = session.query(table) \ .filter(*filter_conditions) \ .count() else: items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .all() rows_count = len(items) except DBAPIError: success = False session.close() items_json = [] for row in items: items_json.append(row.as_json_dict()) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def inforesources_name(request): dbsession = DBSession() numRows = 0 inforesources = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: inforesources = dbsession.query(Inforesources.id, Inforesources.filename)\ .filter(Inforesources.id == id).all() numRows = 1 except DBAPIError: success = False else: start, count = helpers.get_paging_params(request.params) parsed_filename = helpers.get_parsed_search_attr(request.params, 'filename') filter_conditions = [] if parsed_filename: filter_conditions.append(Inforesources.filename.ilike(parsed_filename)) try: if (start is not None) and (count is not None): inforesources = dbsession.query(Inforesources.id, Inforesources.filename) \ .filter(*filter_conditions) \ .order_by(Inforesources.filename) \ .slice(start, start + count) \ .all() numRows = dbsession.query(Inforesources) \ .filter(*filter_conditions) \ .count() else: inforesources = dbsession.query(Inforesources.id, Inforesources.filename) \ .filter(*filter_conditions) \ .order_by(Inforesources.filename) \ .all() numRows = len(inforesources) except DBAPIError: success = False inforesources_json = [] for (id, name) in inforesources: inforesources_json.append({'id': id, 'filename': name}) dbsession.close() return { 'items': inforesources_json, 'success': success, 'numRows': numRows, 'identifier': 'id' }
def export_to_file(filename): fieldnames = ['card_id', 'image_id'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) session = DBSession() images = [[cards_image.card_id, cards_image.image_id] for cards_image in session.query(CardsImages).order_by( CardsImages.card_id, CardsImages.image_id).all()] session.close() writer.writerows(images)
def table_browse_jtable(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) sorting = request.GET['jtSorting'] if 'jtSorting' in request.GET else 'id asc' rows_count = 0 items = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: items = session.query(table) \ .filter(table.id == id) \ .all() rows_count = 1 except DBAPIError: success = False else: start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request, table) try: if (start is not None) and (count is not None): items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .slice(start, start+count) \ .all() rows_count = session.query(table) \ .filter(*filter_conditions) \ .count() else: items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .all() rows_count = len(items) except DBAPIError: success = False session.close() items_json = [] for row in items: items_json.append(row.as_json_dict()) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def export_to_file(filename): fieldnames = ['card_id', 'image_id'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) session = DBSession() images = [[cards_image.card_id, cards_image.image_id ] for cards_image in session.query(CardsImages) .order_by(CardsImages.card_id, CardsImages.image_id).all()] session.close() writer.writerows(images)
def export_to_file(filename): fieldnames = ['id', 'name', 'description', 'url', 'local', 'size'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) session = DBSession() images = [[ image.id, image.name, image.description, image.url, image.local, image.size ] for image in session.query(Images).order_by(Images.id).all()] session.close() writer.writerows(images)
def cards_jtable_browse(request): if not security.authenticated_userid(request): raise exc.HTTPForbidden() rows_count = 0 items = [] success = True observer = aliased(Person) inserter = aliased(Person) aliased_info = { 'observer': observer, 'inserter': inserter } start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request, aliased_info) sorting = _get_sorting_param(request, aliased_info) session = DBSession() try: items = session.query(inserter, func.count(Cards.id).label('cards_count')) \ .outerjoin(Cards, inserter.id == Cards.inserter) \ .filter(and_(*filter_conditions)) \ .group_by(inserter.id) \ .order_by(sorting) \ .slice(start, start+count) \ .all() rows_count = session.query(inserter, func.count(Cards.id).label('cards_count')) \ .outerjoin(Cards, inserter.id == Cards.inserter) \ .filter(and_(*filter_conditions)) \ .group_by(inserter.id) \ .count() except DBAPIError as err: print("DBAPIError error: {0}".format(err)) success = False session.close() items_json = [] for row in items: item_json = row[0].as_json_dict('inserter__') item_json['__cards_count'] = row[1] items_json.append(item_json) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def users_manager(request): if not security.has_permission('admin', request.context, request): raise exc.HTTPForbidden() session = DBSession() users = session.query(User).options(joinedload('person')).all() session.close() return { 'title': u'Управление пользователями', 'users': users, 'is_auth': security.authenticated_userid(request), 'is_admin': security.has_permission('admin', request.context, request) }
def export_to_file(filename): from nextgisbio.utils import csv_utf fieldnames = Cards.get_all_fields_names() with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) dbsession = DBSession() cards = [card.to_row() for card in dbsession.query(Cards).order_by(Cards.id).all()] dbsession.close() writer.writerows(cards)
def cards_table(request): if not security.authenticated_userid(request): raise exc.HTTPForbidden() session = DBSession() persons = session.query(Person).order_by(Person.name).all() session.close() return { 'title': u'Таблица карточек', 'persons': persons, 'is_auth': security.authenticated_userid(request), 'is_admin': security.has_permission('admin', request.context, request) }
def cards_table(request): if not security.authenticated_userid(request): raise exc.HTTPForbidden() session = DBSession() persons = session.query(Person).order_by(Person.name).all() session.close() return { 'title': u'Таблица карточек', 'persons': persons, 'is_auth': security.authenticated_userid(request), 'is_admin': security.has_permission('admin', request.context, request) }
def users_manager(request): if not security.has_permission('admin', request.context, request): raise exc.HTTPForbidden() session = DBSession() users = session.query(User).options(joinedload('person')).all() session.close() return { 'title': u'Управление пользователями', 'users': users, 'is_auth': security.authenticated_userid(request), 'is_admin': security.has_permission('admin', request.context, request) }
def persons_get_users_options(request): session = DBSession() users = session.query(User) \ .filter(~User.person.has()) \ .all() session.close() users_json = [{'DisplayText': 'Не присвоен', 'Value': -1}] for user in users: users_json.append({'DisplayText': user.login, 'Value': user.id}) return {'Result': 'OK', 'Options': users_json}
def table_delete_jtable(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) if ('id' in request.POST) and request.POST['id'].isdigit(): item_id = int(request.POST['id']) item = session.query(table).get(item_id) else: raise Exception('Deleting item: id is not applied') session.delete(item) transaction.commit() session.close() return {'Result': 'OK'}
def taxon_cbtree(request): path_name = 'path' if 'path' in request.params else 'basePath' hierarchical_path = request.params[path_name].replace('"', '') if hierarchical_path == '.': parent_id = None else: parent_id = int(str.split(str(hierarchical_path), '/')[-1]) dbsession = DBSession() parent_taxon = dbsession.query(Taxon).filter_by(id=parent_id).first() children_taxons = dbsession.query(Taxon).filter_by(parent_id=parent_id).order_by(Taxon.name).all() dbsession.close() if hierarchical_path == '.': block = { 'name': '.', 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [{ 'name': '.', 'id': -1, 'path': hierarchical_path, 'directory': True }] } else: block = { 'name': parent_taxon.name, 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [] } children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_node(hierarchical_path, taxon)) if hierarchical_path == '.': block['items'][0]['children'] = children_taxons_json else: block['items'] = children_taxons_json return block if block else children_taxons_json
def table_delete_jtable(request): session = DBSession() if ('person_id' in request.POST) and request.POST['person_id'].isdigit(): person_id = int(request.POST['person_id']) person = session.query(Person).options( joinedload('user')).get(person_id) else: raise Exception('Deleting item: id is not applied') session.delete(person) session.delete(person.user) transaction.commit() session.close() return {'Result': 'OK'}
def export_to_file(filename): fieldnames = ['id', 'name', 'description', 'url', 'local', 'size'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) session = DBSession() images = [[image.id, image.name, image.description, image.url, image.local, image.size] for image in session.query(Images).order_by(Images.id).all()] session.close() writer.writerows(images)
def karea_ann(request): dbsession = DBSession() id = request.matchdict['id'] karea = dbsession.query(Key_area).filter_by(id=id).one() annotations = [] for ann in karea.annotations: annotations.append({ 'id': ann.id, 'name': ann.species_link.name, 'species': ann.species }) dbsession.close() return {'data': annotations}
def species_by_redbook(request): dbsession = DBSession() redbook_id = request.matchdict['redbook_id'] order_by_clauses = dojo.parse_sort(request) species = dbsession.query(Taxon, RedBookSpecies) \ .join(RedBookSpecies, Taxon.id == RedBookSpecies.specie_id) \ .filter(RedBookSpecies.red_book_id == redbook_id) \ .order_by(order_by_clauses) \ .all() rows = [dict(specie[0].as_json_dict().items() + specie[1].as_json_dict().items()) for specie in species] dbsession.close() return rows
def export_to_file(filename): from nextgisbio.utils import csv_utf fieldnames = Cards.get_all_fields_names() with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) dbsession = DBSession() cards = [ card.to_row() for card in dbsession.query(Cards).order_by(Cards.id).all() ] dbsession.close() writer.writerows(cards)
def table_delete_jtable(request): session = DBSession() if ('person_id' in request.POST) and request.POST['person_id'].isdigit(): person_id = int(request.POST['person_id']) person = session.query(Person).options(joinedload('user')).get(person_id) else: raise Exception('Deleting item: id is not applied') session.delete(person) session.delete(person.user) transaction.commit() session.close() return { 'Result': 'OK' }
def table_delete_jtable(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) if ('id' in request.POST) and request.POST['id'].isdigit(): item_id = int(request.POST['id']) item = session.query(table).get(item_id) else: raise Exception('Deleting item: id is not applied') session.delete(item) transaction.commit() session.close() return { 'Result': 'OK' }
def taxon_filter(request): query_str = request.params['name'].encode('utf-8').decode('utf-8') start = int(request.params['start']) count = int(request.params['count']) # Нужно выдернуть номера id, названия таксонов и авторов (для синонимов) из таблиц таксонов и синонимов dbsession = DBSession() try: query_str_upper = query_str.upper() # ищем в таблице таксонов: aFilter = u"UPPER({0}) LIKE '%{1}%'".format('name', query_str_upper) tax_all = dbsession.query(Taxon.id, Taxon.name, Taxon.author).filter(aFilter).all() aFilter = u"UPPER({0}) LIKE '%{1}%'".format('russian_name', query_str_upper) rus_all = dbsession.query(Taxon.id, Taxon.russian_name, Taxon.author).filter(aFilter).all() # ищем в таблице синонимов: aFilter = u"UPPER({0}) LIKE '%{1}%'".format('synonym', query_str_upper) s_all = dbsession.query(Synonym.species_id, Synonym.synonym, Synonym.author).filter(aFilter).all() all = [tax_all + s_all + rus_all][0] itemsPage = all[start:start + count] dbsession.close() except DBAPIError: dbsession.close() return {'success': False, 'msg': 'Ошибка подключения к БД'} rows = [] if all: rec_id = itertools.count() rows = [{ 'recId': rec_id.next(), 'id': id, 'name': name, 'author': author } for id, name, author in itemsPage] return { 'items': rows, 'success': True, 'numRows': len(all), 'identity': 'id' }
def table_download(request): dbsession = DBSession() modelname = request.matchdict['table'] try: model = table_by_name(modelname) except KeyError: return { 'success': False, 'msg': 'Ошибка: отсутствует таблица с указанным именем' } try: all = dbsession.query(model).all() dbsession.close() except DBAPIError: result = {'success': False, 'msg': 'Ошибка подключения к БД'} names = [] for c in model.__table__.columns: names.append(c.name) rows = [ names, ] for row in all: data = [] for name in names: data.append(try_encode(getattr(row, name))) rows.append(data) fname = tempfile.mktemp() try: file = open(fname, 'w') writer = csv.writer(file, delimiter='\t') writer.writerows(rows) file.close() file = open(fname, 'r') data = file.read() resname = modelname + '.csv' finally: # в любом случае удаляем файл os.remove(fname) return Response(content_type="application/octet-stream", content_disposition="attachment; filename=%s" % (resname, ), body=data)
def direct_child(request): # Ext посылает запрос, содержащий строку вида 'node'='taxon_идентификатор') # например, 'node'='taxon_1', где id = id записи в таблице taxons # (об идентификаторах см. ниже, в цикле, # где в ответ на запрос выдаются дочерние узлы с идентификаторами) # Два граничных случая: # taxon == 'root': Корень дерева таксонов, childern=все записи из Kingdom # taxon.is_last_taxon == True: конец иерархии (это последний таксон) => leaf:=True node = request.params['node'] dbsession = DBSession() try: if node == 'root': childern = dbsession.query(Taxon).filter_by(parent_id=None).all() else: node = node.split('_') id = int(node[1]) childern = dbsession.query(Taxon).filter_by(parent_id=id).all() dbsession.close() except NoResultFound: dbsession.close() return { 'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено' } # Генерируем описания узлов для Ext.treepanel rows = [] for taxon in childern: node = {} # Ext хочет получать информацию из поля 'text' # Сформируем это поле из названия и автора author = taxon.author if taxon.author else '' is_last = taxon.is_last_taxon() node['id'] = 'taxon_' + str(taxon.id) node['leaf'] = is_last if is_last: node['text'] = "<b>%s</b> %s" % (taxon.name, author) else: node['text'] = "%s %s" % (taxon.name, author) rows.append(node) return rows
def _get_squares_by_taxonlist(taxons, geomtype='geojson'): ''' Выбор квадратов из БД, на которые приходятся анн.списки таксонов из taxons='taxon_id1,taxon_id2,...'. Вернуть по запросу геометрию каждого квадрата в соответствии с типом geomtype = ['geojson', 'wkt'] ''' assert geomtype in ['geojson', 'wkt'] dbsession = DBSession() if 'root' in taxons: if geomtype == 'geojson': all = dbsession.query( Squares.id, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).all() else: all = dbsession.query(Squares.id, sqlalchemy.func.st_astext( Squares.geom.RAW)).all() else: # Выбираем ключевые участки, где встречен таксон, а по ним --- id квадратов, которые приходятся на эти участки: subquery = TAXON_ID_QUERY % (", ".join( [str(num) for num in taxons]), TAXON_TYPES[len(TAXON_TYPES) - 1]) qs = """ SELECT DISTINCT square_id from square_karea_association WHERE square_karea_association.key_area_id in (SELECT DISTINCT key_area.id FROM annotation INNER JOIN key_area ON annotation.key_area = key_area.id""" + ' AND annotation.species IN (' + subquery + '));' k_set = dbsession.query(Squares.id).from_statement(qs).all() k_set = [k[0] for k in k_set] if geomtype == 'geojson': all = dbsession.query( Squares.id, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).filter( Squares.id.in_(k_set)).all() else: all = dbsession.query(Squares.id, sqlalchemy.func.st_astext( Squares.geom.RAW)).filter( Squares.id.in_(k_set)).all() dbsession.close() return all
def direct_child(request): # Ext посылает запрос, содержащий строку вида 'node'='taxon_идентификатор') # например, 'node'='taxon_1', где id = id записи в таблице taxons # (об идентификаторах см. ниже, в цикле, # где в ответ на запрос выдаются дочерние узлы с идентификаторами) # Два граничных случая: # taxon == 'root': Корень дерева таксонов, childern=все записи из Kingdom # taxon.is_last_taxon == True: конец иерархии (это последний таксон) => leaf:=True node = request.params['node'] dbsession = DBSession() try: if node == 'root': childern = dbsession.query(Taxon).filter_by(parent_id=None).all() else: node = node.split('_') id = int(node[1]) childern = dbsession.query(Taxon).filter_by(parent_id=id).all() dbsession.close() except NoResultFound: dbsession.close() return {'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено'} # Генерируем описания узлов для Ext.treepanel rows = [] for taxon in childern: node = {} # Ext хочет получать информацию из поля 'text' # Сформируем это поле из названия и автора author = taxon.author if taxon.author else '' is_last = taxon.is_last_taxon() node['id'] = 'taxon_' + str(taxon.id) node['leaf'] = is_last if is_last: node['text'] = "<b>%s</b> %s" % (taxon.name, author) else: node['text'] = "%s %s" % (taxon.name, author) rows.append(node) return rows
def species_by_redbook(request): dbsession = DBSession() redbook_id = request.matchdict['redbook_id'] order_by_clauses = dojo.parse_sort(request) species = dbsession.query(Taxon, RedBookSpecies) \ .join(RedBookSpecies, Taxon.id == RedBookSpecies.specie_id) \ .filter(RedBookSpecies.red_book_id == redbook_id) \ .order_by(order_by_clauses) \ .all() rows = [ dict(specie[0].as_json_dict().items() + specie[1].as_json_dict().items()) for specie in species ] dbsession.close() return rows
def table_item_save(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) if ('id' in request.POST) and request.POST['id'].isdigit(): item_id = int(request.POST['id']) item = session.query(table).get(item_id) else: item = table() for attr in request.POST: if attr == 'id': continue setattr(item, attr, request.POST[attr]) session.add(item) item_as_json = item.as_json_dict() transaction.commit() session.close() return {'Result': 'OK', 'Record': item_as_json}
def table_download(request): dbsession = DBSession() modelname = request.matchdict['table'] try: model = table_by_name(modelname) except KeyError: return {'success': False, 'msg': 'Ошибка: отсутствует таблица с указанным именем'} try: all = dbsession.query(model).all() dbsession.close() except DBAPIError: result = {'success': False, 'msg': 'Ошибка подключения к БД'} names = [] for c in model.__table__.columns: names.append(c.name) rows = [names, ] for row in all: data = [] for name in names: data.append(try_encode(getattr(row, name))) rows.append(data) fname = tempfile.mktemp() try: file = open(fname, 'w') writer = csv.writer(file, delimiter = '\t') writer.writerows(rows) file.close() file = open(fname, 'r') data = file.read() resname = modelname + '.csv' finally: # в любом случае удаляем файл os.remove(fname) return Response(content_type="application/octet-stream", content_disposition="attachment; filename=%s" % (resname, ), body=data)
def persons_jtable_browse(request): session = DBSession() rows_count = 0 items = [] success = True start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request) sorting = _get_sorting_param(request) try: items = session.query(Person, User) \ .join(User) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .slice(start, start + count) \ .all() rows_count = session.query(Person) \ .filter(*filter_conditions) \ .count() except DBAPIError: success = False session.close() items_json = [] for row in items: person = row[0].as_json_dict('person_') user = row[1].as_json_dict('user_') item_json = person.copy() item_json.update(user) items_json.append(item_json) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def cards_by_user(request): if not security.authenticated_userid(request): raise exc.HTTPForbidden() session = DBSession() query = session.query( func.max(Cards.added_date).label('max_date'), func.min(Cards.added_date).label('min_date') ) min_max = query.one() max_year = min_max.max_date.year min_year = min_max.min_date.year session.close() return { 'title': u'Отчет о внесении карточек', 'years': [min_year] if max_year == min_year else range(min_year.year, max_year.year), 'is_auth': security.authenticated_userid(request), 'is_admin': security.has_permission('admin', request.context, request) }
def persons_jtable_browse(request): session = DBSession() rows_count = 0 items = [] success = True start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request) sorting = _get_sorting_param(request) try: items = session.query(Person, User) \ .join(User) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .slice(start, start + count) \ .all() rows_count = session.query(Person) \ .filter(*filter_conditions) \ .count() except DBAPIError: success = False session.close() items_json = [] for row in items: person = row[0].as_json_dict('person_') user = row[1].as_json_dict('user_') item_json = person.copy() item_json.update(user) items_json.append(item_json) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def table_view(request): can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) user_id = authenticated_userid(request) try: model = table_by_name(request.matchdict['table']) except KeyError: return { 'success': False, 'msg': 'Ошибка: отсутствует таблица с указанным именем' } dbsession = DBSession() try: entity = dbsession.query(model).filter_by( id=request.matchdict['id']).one() user = dbsession.query(User).filter_by( id=user_id).one() if can_i_edit else None result = {'data': entity.as_json_dict(), 'success': True} except NoResultFound: result = { 'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено' } if hasattr(entity, 'inserter'): if isinstance(has_permission('admin', request.context, request), ACLAllowed): is_editable = True else: is_editable = entity.inserter == user.person_id if user else False else: is_editable = True result['editable'] = is_editable dbsession.close() return result
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump dbsession = DBSession() redbook_species_db = dbsession.query(RedBook, RedBookSpecies, Taxon)\ .join(RedBookSpecies, RedBook.id == RedBookSpecies.red_book_id)\ .join(Taxon, RedBookSpecies.specie_id == Taxon.id)\ .order_by(RedBook.id, RedBookSpecies.specie_id)\ .all() dbsession.close() attribute_names = [ 'region', 'orig_name', 'lat_name', 'author', 'population', 'status', 'univ_status', 'year', 'bibl' ] objects_for_dump = [[ o[1].region, o[1].orig_name, o[2].name, o[1].author, o[1].population, o[1].status, o[1].univ_status, o[1].year, o[0].name ] for o in redbook_species_db] dump(filename, attribute_names, objects_for_dump, is_array=True)