def update_related_items(table, db_relative_field, relative_field_name, old_id_from_csv, new_corrected_id): session = DBSession() new_person = session.query(Person)\ .filter(Person.id == new_corrected_id)\ .one() count_items = session.query(table)\ .outerjoin(Person, db_relative_field == Person.id)\ .filter(db_relative_field == old_id_from_csv)\ .count() items = session.query(table)\ .outerjoin(Person, db_relative_field == Person.id)\ .filter(db_relative_field == old_id_from_csv) if count_items > 0: with transaction.manager: save_session = DBSession() for item in items: save_session.query(table).filter_by(id=item.id).update( {relative_field_name: new_person.id}) transaction.commit() session.close()
def get_child_taxons_by_parent(request): parent_taxon_id = request.params['id'] is_full_data = ('isFullData' in request.params ) and request.params['isFullData'] == 'true' is_root_node_requsted = parent_taxon_id == '#' if is_root_node_requsted: parent_taxon_id = None else: parent_taxon_id = int(parent_taxon_id) dbsession = DBSession() children_taxons = dbsession.query(Taxon).filter_by( parent_id=parent_taxon_id).order_by(Taxon.name).all() dbsession.close() children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_jsTree_item(taxon, is_full_data)) if is_root_node_requsted: result = _get_root_jsTree_item() result['children'] = children_taxons_json else: result = children_taxons_json return result
def table_view(request): can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) user_id = authenticated_userid(request) dbsession = DBSession() card, user = None, None try: card = dbsession.query(Cards).filter_by(id=request.matchdict['id']).one() user = dbsession.query(User).filter_by(id=user_id).one() if can_i_edit else None result = card.as_json_dict() except NoResultFound: result = {'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено'} if not can_i_edit: # обнулим координаты перед показом result['lat'] = 0 result['lon'] = 0 if isinstance(has_permission('admin', request.context, request), ACLAllowed): is_editable = True else: is_editable = card.inserter == user.person_id if user else False dbsession.close() return {'data': result, 'editable': is_editable, 'success': True}
def post(self): synonym_dict = dict(self.request.json) with transaction.manager: dbsession = DBSession() dbsession.query(Synonym).filter_by( id=synonym_dict['id']).update(synonym_dict) return synonym_dict
def taxon_tree(request): taxon_parent_id = request.matchdict['taxon_parent_id'] parent_id = None if taxon_parent_id != 'root': parent_id = int(taxon_parent_id) with transaction.manager: dbsession = DBSession() parent_taxon = dbsession.query(Taxon).filter_by(id=parent_id).first() children_taxons = dbsession.query(Taxon).filter_by( parent_id=parent_id).all() if taxon_parent_id == 'root': parent_taxon_json = {'id': 'root', 'name': 'Все таксоны'} else: parent_taxon_json = parent_taxon.as_json_dict() if taxon_parent_id == 'root': parent_taxon_json['id'] = 'root' children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_json(taxon)) parent_taxon_json['children'] = children_taxons_json return parent_taxon_json
def redbook_filter(request): dbsession = DBSession() query_str = request.params['name'].encode('utf-8').decode('utf-8') start = int(request.params['start']) count = int(request.params['count']) try: query_str_upper = query_str.upper() aFilter = u"UPPER({0}) LIKE '%{1}%'".format('name', query_str_upper) order_by_clauses = [] order_by_clauses = dojo.parse_sort(request) red_books = dbsession.query(RedBook.id, RedBook.name)\ .filter(aFilter)\ .order_by(order_by_clauses)\ .all() itemsPage = red_books[start:start + count] except DBAPIError: return {'success': False, 'msg': 'Ошибка подключения к БД'} rows = [{'id': id, 'name': name} for id, name in itemsPage] dbsession.close() return { 'items': rows, 'success': True, 'numRows': len(itemsPage), 'identity': 'id' }
def s_ka_association_download(request): dbsession = DBSession() try: all = dbsession.query(square_keyarea_association).all() except DBAPIError: result = {'success': False, 'msg': 'Ошибка подключения к БД'} names = ['square_id', 'key_area_id'] rows = [ names, ] for row in all: data = [] for name in names: data.append(try_encode(getattr(row, name))) rows.append(data) fname = tempfile.mktemp() try: file = open(fname, 'w') writer = csv.writer(file, delimiter='\t') writer.writerows(rows) file.close() file = open(fname, 'r') data = file.read() resname = 'square_karea_association.csv' finally: # в любом случае удаляем файл os.remove(fname) dbsession.close() return Response(content_type="application/octet-stream", content_disposition="attachment; filename=%s" % (resname, ), body=data)
def add_from_file(filename): ''' Добавить данные в таблицу таксонов из файла filename (разделители - табуляция). Файл filename в формате csv, колонки: id parent_id old_id taxon_type name russian_name author source ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') row = reader.next() # пропускаем заголовки records = [line for line in reader] for row in records: id, parent_id, old_id, taxon_type, name, russian_name, author, source =\ [None if x == '' else x for x in row] taxon = Taxon(parent_id=parent_id, old_id=old_id, taxon_type=taxon_type, name=name, russian_name=russian_name, author=author, source=source) dbsession.add(taxon)
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id name fullname speciality degree organization position email phone address ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') row = reader.next() # пропускаем заголовки records = [line for line in reader] for (id, name, fullname, speciality, degree, organization, position, email, phone, address) in records: person = Person(name=name, fullname=fullname, speciality=speciality, degree=degree, organization=organization, position=position, email=email, phone=phone, address=address) dbsession.add(person)
def square(request): dbsession = DBSession() id = request.matchdict['id'] square = dbsession.query(Squares).filter_by(id=id).one() key_areas = [{'id': s.id, 'name': s.name} for s in square.key_areas] dbsession.close() return {'id': square.id, 'key_areas': key_areas}
def new_card(request): with transaction.manager: dbsession = DBSession() card = Cards() _update_card_attributes(card, dict(request.POST)) card.added_date = datetime.now() card.edited_date = card.added_date dbsession.add(card) return {}
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump fieldnames = [ 'id', 'parent_id', 'old_id', 'taxon_type', 'name', 'russian_name', 'author', 'source' ] dump(filename, fieldnames, DBSession().query(Taxon).order_by(Taxon.id).all())
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump fieldnames = [ 'id', 'name', 'fullname', 'speciality', 'degree', 'organization', 'position', 'email', 'phone', 'address' ] dbsession = DBSession() dump(filename, fieldnames, dbsession.query(Person).order_by(Person.id).all())
def points_text(request): # Есть querystring, содержащее строку вида 'nodes=taxon_id1,taxon_id2'). # Например, "nodes=taxon_1,taxon_5" # Это значит, что пользователь выбрал записи из таблицы taxon с id=1 и id=5. # Требуется вернуть карточки наблюдений соотв. таксонов # # Граничный случай, когда нужно выбрать все карточки: nodes="root_" dbsession = DBSession() try: taxons = request.params['nodes'] except KeyError: taxons = '' red_book_id = None if 'red_book' in request.params: red_book_id = int(request.params['red_book']) if red_book_id == -1: red_book_id = None can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) if taxons: taxons = urllib.unquote(taxons) taxon_id = taxons.split(',') if 'root' in taxons: cards = dbsession.query(Cards, Taxon).join(Taxon).all() else: # Получим список видов-потомков выбранных таксонов и связанных с ними карточек subquery = TAXON_ID_QUERY % (", ".join([str(num) for num in taxon_id]), TAXON_TYPES[len(TAXON_TYPES) - 1]) qs = """ SELECT cards.id,cards.species,cards.lat,cards.lon, taxon.name FROM cards INNER JOIN taxon ON cards.species = taxon.id %s WHERE """ % ( 'INNER JOIN red_books_species ON cards.species = red_books_species.specie_id' if red_book_id else '') \ + ((' red_books_species.red_book_id = ' + str(red_book_id) + ' AND ') if red_book_id else '') \ + ' cards.species IN (' + subquery + ');' cards = dbsession.query(Cards, Taxon).from_statement(qs).all() points = [] for card, taxon in cards: id, spec_id, lat, lon = card.id, card.species, card.lat, card.lon name = taxon.name if lat and lon: if not can_i_edit: # настоящие координаты показывать нельзя # сдвинем координаты перед показом примерно на 10 км в случайном направлении lat = lat + (random() - random()) / 7 lon = lon + (random() - random()) / 4 points.append({'lat': lat, 'lon': lon, 'name': name, 'card_id': id, 'spec_id': spec_id}) else: points = {} dbsession.close() return {'points': points}
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump fieldnames = [ 'id', 'doc_type_id', 'filename', 'fullname', 'author', 'magazine', 'pages', 'mammals', 'birds', 'reptiles', 'amphibians', 'fish', 'invertebrates', 'vascular', 'bryophytes', 'lichens', 'fungi', 'maps' ] dump(filename, fieldnames, DBSession().query(Inforesources).order_by(Inforesources.id).all())
def put(self): new_synonym_dict = dict(self.request.POST) with transaction.manager: dbsession = DBSession() synonym = Synonym() for k, v in new_synonym_dict.items(): if v == '': v = None if hasattr(synonym, k): setattr(synonym, k, v) synonym.species_id = int(self.request.matchdict['taxon_id']) dbsession.add(synonym)
def inforesources_name(request): dbsession = DBSession() numRows = 0 inforesources = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: inforesources = dbsession.query(Inforesources.id, Inforesources.filename)\ .filter(Inforesources.id == id).all() numRows = 1 except DBAPIError: success = False else: start, count = helpers.get_paging_params(request.params) parsed_filename = helpers.get_parsed_search_attr( request.params, 'filename') filter_conditions = [] if parsed_filename: filter_conditions.append( Inforesources.filename.ilike(parsed_filename)) try: if (start is not None) and (count is not None): inforesources = dbsession.query(Inforesources.id, Inforesources.filename) \ .filter(*filter_conditions) \ .order_by(Inforesources.filename) \ .slice(start, start + count) \ .all() numRows = dbsession.query(Inforesources) \ .filter(*filter_conditions) \ .count() else: inforesources = dbsession.query(Inforesources.id, Inforesources.filename) \ .filter(*filter_conditions) \ .order_by(Inforesources.filename) \ .all() numRows = len(inforesources) except DBAPIError: success = False inforesources_json = [] for (id, name) in inforesources: inforesources_json.append({'id': id, 'filename': name}) dbsession.close() return { 'items': inforesources_json, 'success': success, 'numRows': numRows, 'identifier': 'id' }
def anns_text(request): # Есть querystring, содержащее строку вида 'nodes=taxon_id1,taxon_id2'). # Например, "nodes=taxon_1,taxon_5" # Это значит, что пользователь выбрал записи из таблицы taxon с id=1 и id=5. # Требуется вернуть аннотированные списки соотв. таксонов # # Граничный случай, когда нужно выбрать все списки: nodes="root_" dbsession = DBSession() # Ключевые участки по квадрату: id = request.matchdict['id'] square = dbsession.query(Squares).filter_by(id=id).one() key_areas = [str(s.id) for s in square.key_areas] key_areas = ", ".join(key_areas) try: taxons_id = request.params['nodes'] except KeyError: taxons_id = '' can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) if taxons_id: taxons_id = urllib.unquote(taxons_id) taxons_id = taxons_id.split(',') if "root" in taxons_id: anns = dbsession.query(Annotation,Taxon).join(Taxon).all() qs = """ SELECT annotation.id,annotation.species, taxon.name FROM annotation INNER JOIN taxon ON annotation.species = taxon.id """ + ' AND annotation.key_area IN ( %s ) ;' % (key_areas, ) anns = dbsession.query(Annotation, Taxon).from_statement(qs).all() else: # Получим список видов-потомков выбранных таксонов и связанных с ними аннотаций из ключевых участков квадрата id subquery = TAXON_ID_QUERY % (", ".join([ str(num) for num in taxons_id]), TAXON_TYPES[len(TAXON_TYPES)-1]) qs = """ SELECT annotation.id,annotation.species, taxon.name FROM annotation INNER JOIN taxon ON annotation.species = taxon.id """ + ' AND annotation.key_area IN ( %s ) ' % (key_areas, ) + ' AND annotation.species IN (' + subquery +');' anns = dbsession.query(Annotation, Taxon).from_statement(qs).all() squares = [] for ann, taxon in anns: id, spec_id= ann.id, ann.species name = taxon.name squares.append({'name': name, 'ann_id': id, 'spec_id': spec_id}) else: points = {} dbsession.close() return {'data': squares}
def squares_text(request): dbsession = DBSession() all = dbsession.query(Squares, sqlalchemy.func.st_asgeojson( Squares.geom.RAW)).all() squares = [] for sq, geom in all: squares.append({'id': sq.id, 'geom': geom}) dbsession.close() return {'squares': squares}
def delete_anlist(request): annotation_id = request.matchdict['id'] success = True try: with transaction.manager: dbsession = DBSession() annotation = dbsession.query(Annotation).filter_by(id=annotation_id).one() dbsession.delete(annotation) except: success = False return {'success': success}
def delete_card(request): card_id = request.matchdict['id'] success = True try: with transaction.manager: dbsession = DBSession() card = dbsession.query(Cards).filter_by(id=card_id).one() dbsession.delete(card) except: success = False return {'success': success}
def taxon_cbtree(request): path_name = 'path' if 'path' in request.params else 'basePath' hierarchical_path = request.params[path_name].replace('"', '') if hierarchical_path == '.': parent_id = None else: parent_id = int(str.split(str(hierarchical_path), '/')[-1]) dbsession = DBSession() parent_taxon = dbsession.query(Taxon).filter_by(id=parent_id).first() children_taxons = dbsession.query(Taxon).filter_by( parent_id=parent_id).order_by(Taxon.name).all() dbsession.close() if hierarchical_path == '.': block = { 'name': '.', 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [{ 'name': '.', 'id': -1, 'path': hierarchical_path, 'directory': True }] } else: block = { 'name': parent_taxon.name, 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [] } children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_node(hierarchical_path, taxon)) if hierarchical_path == '.': block['items'][0]['children'] = children_taxons_json else: block['items'] = children_taxons_json return block if block else children_taxons_json
def __parent_in_list__(self, list): ''' Возвращает истину, если данный таксон является дочерним таксоном от одного из таксонов, чьи названия перечисленны в списке list ''' dbsession = DBSession() target = dbsession.query(Taxon).filter(Taxon.name.in_(list)).all() target_ids = [t.id for t in target] for id in target_ids: if self.child_of(id): return True return False
def get_synonyms(request): sessions = DBSession() taxon_id = int(request.matchdict['taxon_id']) synonyms = sessions.query(Synonym).filter_by(species_id=taxon_id).all() synonyms_json = [synonym.as_json_dict() for synonym in synonyms] count_synonyms = len(synonyms_json) request.response.headerlist = [ ('Content-Range', '{0}-{1}/{2}'.format(0, count_synonyms, count_synonyms)) ] return synonyms_json
def upload_image(request): filename = request.POST['file'].filename input_file = request.POST['file'].file obj_id = request.matchdict['id'] obj_type = request.matchdict['type'] path_to_images = os.path.join(os.path.dirname(nextgisbio.__file__), 'static/data/images') date_now = datetime.datetime.now().strftime('%Y-%m-%d') path_to_images_now = os.path.join(path_to_images, date_now) if not os.path.exists(path_to_images_now): os.mkdir(path_to_images_now) # from http://stackoverflow.com/questions/2782229/most-lightweight-way-to-create-a-random-string-and-a-random-hexadecimal-number random_file_name = str(uuid.uuid4()) base_file_path = os.path.join(path_to_images_now, '.'.join([random_file_name, 'jpg'])) with open(base_file_path, 'wb') as output_file: shutil.copyfileobj(input_file, output_file) for key_size in THUMBNAIL_SIZES: try: im = Image.open(base_file_path) im.thumbnail(THUMBNAIL_SIZES[key_size], Image.BICUBIC) im.save(os.path.join( path_to_images_now, '.'.join([random_file_name + '_' + key_size, 'jpg'])), 'JPEG', quality=70) except IOError: print "cannot create thumbnail for '%s'" % base_file_path with transaction.manager: dbSession = DBSession() image = Images() image.name = filename image.url = '/static/data/images/%s/%s.jpg' % (date_now, random_file_name) image.size = os.path.getsize(base_file_path) image.local = base_file_path dbSession.add(image) if obj_type == 'card': card_image = CardsImages() card_image.image = image card_image.card = dbSession.query(Cards).filter_by(id=obj_id).one() dbSession.add(card_image) photo_json = image.as_json_dict() return photo_json
def table_browse_jtable(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) sorting = request.GET[ 'jtSorting'] if 'jtSorting' in request.GET else 'id asc' rows_count = 0 items = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: items = session.query(table) \ .filter(table.id == id) \ .all() rows_count = 1 except DBAPIError: success = False else: start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request, table) try: if (start is not None) and (count is not None): items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .slice(start, start+count) \ .all() rows_count = session.query(table) \ .filter(*filter_conditions) \ .count() else: items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .all() rows_count = len(items) except DBAPIError: success = False session.close() items_json = [] for row in items: items_json.append(row.as_json_dict()) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def import_from_csv(path_to_file): with transaction.manager: session = DBSession() reader = csv.reader(open(path_to_file), delimiter='\t') reader.next() # пропускаем заголовки records = [line for line in reader] for row in records: card_id, image_id = [None if x == '' else x for x in row] card_image = CardsImages(card_id=card_id, image_id=image_id) session.add(card_image)
def export_to_file(filename): fieldnames = ['card_id', 'image_id'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) session = DBSession() images = [[cards_image.card_id, cards_image.image_id] for cards_image in session.query(CardsImages).order_by( CardsImages.card_id, CardsImages.image_id).all()] session.close() writer.writerows(images)
def export_to_file(filename): fieldnames = ['id', 'name', 'description', 'url', 'local', 'size'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) session = DBSession() images = [[ image.id, image.name, image.description, image.url, image.local, image.size ] for image in session.query(Images).order_by(Images.id).all()] session.close() writer.writerows(images)
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump fieldnames = ['square_id', 'key_area_id'] squares_from_db = DBSession().query(Squares).join( Squares.key_areas).order_by(Squares.id).all() squares = [] for square in squares_from_db: for key_area in square.key_areas: squares.append([square.id, key_area.id]) dump(filename, fieldnames, squares, is_array=True)