def table_view(request): can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) user_id = authenticated_userid(request) try: model = table_by_name(request.matchdict['table']) except KeyError: return {'success': False, 'msg': 'Ошибка: отсутствует таблица с указанным именем'} dbsession = DBSession() try: entity = dbsession.query(model).filter_by(id=request.matchdict['id']).one() user = dbsession.query(User).filter_by(id=user_id).one() if can_i_edit else None result = {'data': entity.as_json_dict(), 'success': True} except NoResultFound: result = {'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено'} if hasattr(entity, 'inserter'): if isinstance(has_permission('admin', request.context, request), ACLAllowed): is_editable = True else: is_editable = entity.inserter == user.person_id if user else False else: is_editable = True result['editable'] = is_editable dbsession.close() return result
def s_ka_association_download(request): dbsession = DBSession() try: all = dbsession.query(square_keyarea_association).all() except DBAPIError: result = {'success': False, 'msg': 'Ошибка подключения к БД'} names = ['square_id', 'key_area_id'] rows = [names, ] for row in all: data = [] for name in names: data.append(try_encode(getattr(row, name))) rows.append(data) fname = tempfile.mktemp() try: file = open(fname, 'w') writer = csv.writer(file, delimiter = '\t') writer.writerows(rows) file.close() file = open(fname, 'r') data = file.read() resname = 'square_karea_association.csv' finally: # в любом случае удаляем файл os.remove(fname) dbsession.close() return Response(content_type="application/octet-stream", content_disposition="attachment; filename=%s" % (resname, ), body=data)
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump dbsession = DBSession() redbook_species_db = dbsession.query(RedBook, RedBookSpecies, Taxon)\ .join(RedBookSpecies, RedBook.id == RedBookSpecies.red_book_id)\ .join(Taxon, RedBookSpecies.specie_id == Taxon.id)\ .order_by(RedBook.id, RedBookSpecies.specie_id)\ .all() dbsession.close() attribute_names = ['region', 'orig_name', 'lat_name', 'author', 'population', 'status', 'univ_status', 'year', 'bibl'] objects_for_dump = [ [ o[1].region, o[1].orig_name, o[2].name, o[1].author, o[1].population, o[1].status, o[1].univ_status, o[1].year, o[0].name ] for o in redbook_species_db ] dump(filename, attribute_names, objects_for_dump, is_array=True)
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id name fullname speciality degree organization position email phone address ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') row = reader.next() # пропускаем заголовки records = [line for line in reader] for (id, name, fullname, speciality, degree, organization, position, email, phone, address) in records: person = Person( name=name, fullname=fullname, speciality=speciality, degree=degree, organization=organization, position=position, email=email, phone=phone, address=address ) dbsession.add(person)
def parent_taxons(taxon_id): """ Возвращает родительские таксоны данного таксона. """ dbsession = DBSession() qs = """ WITH RECURSIVE subtree AS ( SELECT * FROM taxon WHERE id=%s UNION ALL SELECT t.* FROM taxon AS t, subtree AS st WHERE (st.parent_id = t.id) ) SELECT * FROM subtree ; """ % ( taxon_id, ) taxons = dbsession.query(Taxon).from_statement(qs).all() # Отсортируем таксоны так, чтобы на первом месте списка шли царства, на последнем -- виды. taxons.sort(key=lambda x: TAXON_TYPES.index(x.taxon_type)) return taxons
def add_from_file(filename): """ Добавить данные в таблицу таксонов из файла filename (разделители - табуляция). Файл filename в формате csv, колонки: id parent_id old_id taxon_type name russian_name author source """ import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter="\t") row = reader.next() # пропускаем заголовки records = [line for line in reader] for row in records: id, parent_id, old_id, taxon_type, name, russian_name, author, source = [ None if x == "" else x for x in row ] taxon = Taxon( parent_id=parent_id, old_id=old_id, taxon_type=taxon_type, name=name, russian_name=russian_name, author=author, source=source, ) dbsession.add(taxon)
def taxon_tree(request): taxon_parent_id = request.matchdict['taxon_parent_id'] parent_id = None if taxon_parent_id != 'root': parent_id = int(taxon_parent_id) with transaction.manager: dbsession = DBSession() parent_taxon = dbsession.query(Taxon).filter_by(id=parent_id).first() children_taxons = dbsession.query(Taxon).filter_by(parent_id=parent_id).all() if taxon_parent_id == 'root': parent_taxon_json = { 'id': 'root', 'name': 'Все таксоны' } else: parent_taxon_json = parent_taxon.as_json_dict() if taxon_parent_id == 'root': parent_taxon_json['id'] = 'root' children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_json(taxon)) parent_taxon_json['children'] = children_taxons_json return parent_taxon_json
def redbook_filter(request): dbsession = DBSession() query_str = request.params['name'].encode('utf-8').decode('utf-8') start = int(request.params['start']) count = int(request.params['count']) try: query_str_upper = query_str.upper() aFilter = u"UPPER({0}) LIKE '%{1}%'".format('name', query_str_upper) order_by_clauses = [] order_by_clauses = dojo.parse_sort(request) red_books = dbsession.query(RedBook.id, RedBook.name)\ .filter(aFilter)\ .order_by(order_by_clauses)\ .all() itemsPage = red_books[start:start + count] except DBAPIError: return {'success': False, 'msg': 'Ошибка подключения к БД'} rows = [{'id': id, 'name': name} for id, name in itemsPage] dbsession.close() return {'items': rows, 'success': True, 'numRows': len(itemsPage), 'identity': 'id'}
def get_child_taxons_by_parent(request): parent_taxon_id = request.params['id'] is_full_data = ('isFullData' in request.params) and request.params['isFullData'] == 'true' is_root_node_requsted = parent_taxon_id == '#' if is_root_node_requsted: parent_taxon_id = None else: parent_taxon_id = int(parent_taxon_id) dbsession = DBSession() children_taxons = dbsession.query(Taxon).filter_by(parent_id=parent_taxon_id).order_by(Taxon.name).all() dbsession.close() children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_jsTree_item(taxon, is_full_data)) if is_root_node_requsted: result = _get_root_jsTree_item() result['children'] = children_taxons_json else: result = children_taxons_json return result
def get_synonyms(request): sessions = DBSession() taxon_id = int(request.matchdict['taxon_id']) synonyms = sessions.query(Synonym).filter_by(species_id=taxon_id).all() synonyms_json = [synonym.as_json_dict() for synonym in synonyms] count_synonyms = len(synonyms_json) request.response.headerlist = [('Content-Range', '{0}-{1}/{2}'.format(0, count_synonyms, count_synonyms))] return synonyms_json
def square(request): dbsession = DBSession() id = request.matchdict['id'] square = dbsession.query(Squares).filter_by(id=id).one() key_areas = [{'id': s.id, 'name': s.name} for s in square.key_areas] dbsession.close() return {'id': square.id, 'key_areas': key_areas }
def new_card(request): with transaction.manager: dbsession = DBSession() card = Cards() _update_card_attributes(card, dict(request.POST)) card.added_date = datetime.now() card.edited_date = card.added_date dbsession.add(card) return {}
def squares_text(request): dbsession = DBSession() all = dbsession.query(Squares, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).all() squares = [] for sq, geom in all: squares.append({'id': sq.id, 'geom': geom}) dbsession.close() return {'squares' : squares}
def put(self): new_synonym_dict = dict(self.request.POST) with transaction.manager: dbsession = DBSession() synonym = Synonym() for k, v in new_synonym_dict.items(): if v == '': v = None if hasattr(synonym, k): setattr(synonym, k, v) synonym.species_id = int(self.request.matchdict['taxon_id']) dbsession.add(synonym)
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id doc_type_id filename fullname author magazine pages mammals birds reptiles amphibians fish invertebrates vascular bryophytes lichens fungi maps ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') reader.next() records = [line for line in reader] for row in records: ( id, doc_type_id, filename, fullname, author, magazine, pages, mammals, birds, reptiles, amphibians, fish, invertebrates, vascular, bryophytes, lichens, fungi, maps ) = [None if x == '' else x for x in row] infores = Inforesources( doc_type_id=doc_type_id, filename=filename, fullname=fullname, author=author, magazine=magazine, pages=pages, mammals=mammals, birds=birds, reptiles=reptiles, amphibians=amphibians, fish=fish, invertebrates=invertebrates, vascular=vascular, bryophytes=bryophytes, lichens=lichens, fungi=fungi, maps=maps ) dbsession.add(infores)
def taxon_type(request): taxon_id = request.matchdict['id'] with transaction.manager: dbsession = DBSession() p = dbsession.query(Taxon).filter(Taxon.id == taxon_id).one() types = {'mammalia': p.is_mammalia(), 'aves': p.is_aves(), 'plantae': p.is_plantae(), 'ara': p.is_ara(), 'arthropoda': p.is_arthropoda(), 'moss': p.is_moss(), 'lichenes': p.is_lichenes()} return types
def main(argv=sys.argv): if len(argv) != 2 and len(argv) != 3: usage(argv) config_uri = argv[1] setup_logging(config_uri) settings = get_appsettings(config_uri) engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) dir_name = dump_data() if len(argv) == 3 and argv[2] == '--make-id-start-0': id_verificator.start(dir_name)
def delete_anlist(request): annotation_id = request.matchdict['id'] success = True try: with transaction.manager: dbsession = DBSession() annotation = dbsession.query(Annotation).filter_by(id=annotation_id).one() dbsession.delete(annotation) except: success = False return {'success': success}
def delete_card(request): card_id = request.matchdict['id'] success = True try: with transaction.manager: dbsession = DBSession() card = dbsession.query(Cards).filter_by(id=card_id).one() dbsession.delete(card) except: success = False return {'success': success}
def karea_ann(request): dbsession = DBSession() id = request.matchdict['id'] karea = dbsession.query(Key_area).filter_by(id=id).one() annotations = [] for ann in karea.annotations: annotations.append({'id': ann.id, 'name': ann.species_link.name, 'species': ann.species}) dbsession.close() return {'data': annotations}
def __parent_in_list__(self, list): """ Возвращает истину, если данный таксон является дочерним таксоном от одного из таксонов, чьи названия перечисленны в списке list """ dbsession = DBSession() target = dbsession.query(Taxon).filter(Taxon.name.in_(list)).all() target_ids = [t.id for t in target] for id in target_ids: if self.child_of(id): return True return False
def species_by_taxon(taxon_ids): """ Возвращает список видов (species), которые являются потомками таксонов, указанных в списке taxon_ids """ dbsession = DBSession() qs = TAXON_ALL_QUERY % (", ".join([str(num) for num in taxon_ids]), TAXON_TYPES[len(TAXON_TYPES) - 1]) + ";" taxons = ( dbsession.query(Taxon.id, Taxon.taxon_type, Taxon.name, Taxon.author, Taxon.source).from_statement(qs).all() ) taxons = [{"id": t[0], "taxon_type": t[1], "name": t[2], "author": t[3], "source": t[4]} for t in taxons] return taxons
def import_from_csv(path_to_file): with transaction.manager: session = DBSession() reader = csv.reader(open(path_to_file), delimiter='\t') reader.next() # пропускаем заголовки records = [line for line in reader] for row in records: id_image, name, description, url, local, size = [None if x == '' else x for x in row] image = Images(name=name, description=description, url=url, local=local, size=size) session.add(image)
def import_from_csv(path_to_file): with transaction.manager: session = DBSession() reader = csv.reader(open(path_to_file), delimiter='\t') reader.next() # пропускаем заголовки records = [line for line in reader] for row in records: card_id, image_id = [None if x == '' else x for x in row] card_image = CardsImages(card_id=card_id, image_id=image_id) session.add(card_image)
def export_to_file(filename): fieldnames = ['card_id', 'image_id'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) session = DBSession() images = [[cards_image.card_id, cards_image.image_id ] for cards_image in session.query(CardsImages) .order_by(CardsImages.card_id, CardsImages.image_id).all()] session.close() writer.writerows(images)
def save_card(request): card_from_client = dict(request.POST) card_id = request.matchdict['id'] success = True try: with transaction.manager: dbsession = DBSession() card = dbsession.query(Cards).filter_by(id=card_id).one() card.edited_date = datetime.now() _update_card_attributes(card, card_from_client) except: success = False return {'success': success}
def main(argv=sys.argv): if len(argv) != 2 and len(argv) != 3: usage(argv) config_uri = argv[1] setup_logging(config_uri) settings = get_appsettings(config_uri) engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) script_dir = os.path.dirname(__file__) rel_path_source_data = "../source_data/" source_data_dir = os.path.join(script_dir, rel_path_source_data) parse_data(source_data_dir) verify_ids()
def cards_jtable_browse(request): if not security.authenticated_userid(request): raise exc.HTTPForbidden() rows_count = 0 items = [] success = True observer = aliased(Person) inserter = aliased(Person) aliased_info = { 'observer': observer, 'inserter': inserter } start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request, aliased_info) sorting = _get_sorting_param(request, aliased_info) session = DBSession() try: items = session.query(inserter, func.count(Cards.id).label('cards_count')) \ .outerjoin(Cards, inserter.id == Cards.inserter) \ .filter(and_(*filter_conditions)) \ .group_by(inserter.id) \ .order_by(sorting) \ .slice(start, start+count) \ .all() rows_count = session.query(inserter, func.count(Cards.id).label('cards_count')) \ .outerjoin(Cards, inserter.id == Cards.inserter) \ .filter(and_(*filter_conditions)) \ .group_by(inserter.id) \ .count() except DBAPIError as err: print("DBAPIError error: {0}".format(err)) success = False session.close() items_json = [] for row in items: item_json = row[0].as_json_dict('inserter__') item_json['__cards_count'] = row[1] items_json.append(item_json) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def taxon_type(request): taxon_id = request.matchdict['id'] with transaction.manager: dbsession = DBSession() p = dbsession.query(Taxon).filter(Taxon.id == taxon_id).one() types = { 'mammalia': p.is_mammalia(), 'aves': p.is_aves(), 'plantae': p.is_plantae(), 'ara': p.is_ara(), 'arthropoda': p.is_arthropoda(), 'moss': p.is_moss(), 'lichenes': p.is_lichenes() } return types
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id museum ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') reader.next() records = [line for line in reader] for id, m in records: museum = Museum(museum=m) dbsession.add(museum)
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id area_type legend name ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') reader.next() records = [line for line in reader] for id, atype_id, pr_id, k in records: key_a = Key_area(area_type=atype_id, legend=pr_id, name=k) dbsession.add(key_a)
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id taxa_scheme ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') reader.next() records = [line for line in reader] for id, t_scheme in records: t = Taxa_scheme(taxa_scheme=t_scheme) dbsession.add(t)
def add_from_file(users_csv_file_path, md5_pass): import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(users_csv_file_path), delimiter='\t') reader.next() records = [line for line in reader] for row in records: (id, login, password, person_id, role) = [None if x == '' else x for x in row] user = User(login=login, password=password if md5_pass else User.password_hash(password), role=role, person_id=person_id, active=True) dbsession.add(user)
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump fieldnames = [ 'id', 'doc_type_id', 'filename', 'fullname', 'author', 'magazine', 'pages', 'mammals', 'birds', 'reptiles', 'amphibians', 'fish', 'invertebrates', 'vascular', 'bryophytes', 'lichens', 'fungi', 'maps' ] dump(filename, fieldnames, DBSession().query(Inforesources).order_by(Inforesources.id).all())
def table_browse_jtable(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) sorting = request.GET[ 'jtSorting'] if 'jtSorting' in request.GET else 'id asc' rows_count = 0 items = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: items = session.query(table) \ .filter(table.id == id) \ .all() rows_count = 1 except DBAPIError: success = False else: start, count = helpers.get_jtable_paging_params(request.params) filter_conditions = _get_filter_conditions(request, table) try: if (start is not None) and (count is not None): items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .slice(start, start+count) \ .all() rows_count = session.query(table) \ .filter(*filter_conditions) \ .count() else: items = session.query(table) \ .filter(or_(*filter_conditions)) \ .order_by(sorting) \ .all() rows_count = len(items) except DBAPIError: success = False session.close() items_json = [] for row in items: items_json.append(row.as_json_dict()) return { 'Result': 'OK' if success else False, 'Records': items_json, 'TotalRecordCount': rows_count }
def species_by_taxon(taxon_ids): ''' Возвращает список видов (species), которые являются потомками таксонов, указанных в списке taxon_ids ''' dbsession = DBSession() qs = TAXON_ALL_QUERY % (", ".join( [str(num) for num in taxon_ids]), TAXON_TYPES[len(TAXON_TYPES) - 1]) + ';' taxons = dbsession.query(Taxon.id, Taxon.taxon_type, Taxon.name, Taxon.author, Taxon.source) \ .from_statement(qs).all() taxons = [{ 'id': t[0], 'taxon_type': t[1], 'name': t[2], 'author': t[3], 'source': t[4] } for t in taxons] return taxons
def main(argv=sys.argv): usage = 'usage: %prog -i INPUTFILE -c <config_uri>\n (example: "%prog -i data.scv development.ini")' parser = OptionParser(usage = usage) parser.add_option("-i", "--input", dest="infile", action='store', type="string", help="read csv from INPUT file") parser.add_option("-c", "--config", dest="config", action='store', type="string", help="config file") (options, args) = parser.parse_args() if options.infile == None or options.config == None: parser.error("incorrect number of arguments") config_uri = options.config setup_logging(config_uri) settings = get_appsettings(config_uri) engine = engine_from_config(settings, 'sqlalchemy.') DBSession.configure(bind=engine) # Заполним таблицы данными: gen_sql(read_data(options.infile))
def table_delete_jtable(request): session = DBSession() if ('person_id' in request.POST) and request.POST['person_id'].isdigit(): person_id = int(request.POST['person_id']) person = session.query(Person).options( joinedload('user')).get(person_id) else: raise Exception('Deleting item: id is not applied') session.delete(person) session.delete(person.user) transaction.commit() session.close() return {'Result': 'OK'}
def species_by_redbook(request): dbsession = DBSession() redbook_id = request.matchdict['redbook_id'] order_by_clauses = dojo.parse_sort(request) species = dbsession.query(Taxon, RedBookSpecies) \ .join(RedBookSpecies, Taxon.id == RedBookSpecies.specie_id) \ .filter(RedBookSpecies.red_book_id == redbook_id) \ .order_by(order_by_clauses) \ .all() rows = [ dict(specie[0].as_json_dict().items() + specie[1].as_json_dict().items()) for specie in species ] dbsession.close() return rows
def person_name(request): dbsession = DBSession() numRows = 0 persons = [] success = True if ('id' in request.params) and request.params['id'].isdigit(): id = int(request.params['id']) try: persons = dbsession.query( Person.id, Person.name).filter(Person.id == id).all() numRows = 1 except DBAPIError: success = False else: start, count = helpers.get_paging_params(request.params) parsed_name = helpers.get_parsed_search_attr(request.params) filter_conditions = [] if parsed_name: filter_conditions.append(Person.name.ilike(parsed_name)) try: if (start is not None) and (count is not None): persons = dbsession.query(Person.id, Person.name) \ .filter(*filter_conditions) \ .order_by(Person.name) \ .slice(start, start + count) \ .all() numRows = dbsession.query(Person) \ .filter(*filter_conditions) \ .count() else: persons = dbsession.query(Person.id, Person.name) \ .filter(*filter_conditions) \ .order_by(Person.name) \ .all() numRows = len(persons) except DBAPIError: success = False persons_json = [] for (id, name) in persons: persons_json.append({'id': id, 'name': name}) dbsession.close() return { 'items': persons_json, 'success': success, 'numRows': numRows, 'identifier': 'id' }
def taxon_filter(request): query_str = request.params['name'].encode('utf-8').decode('utf-8') start = int(request.params['start']) count = int(request.params['count']) # Нужно выдернуть номера id, названия таксонов и авторов (для синонимов) из таблиц таксонов и синонимов dbsession = DBSession() try: query_str_upper = query_str.upper() # ищем в таблице таксонов: aFilter = u"UPPER({0}) LIKE '%{1}%'".format('name', query_str_upper) tax_all = dbsession.query(Taxon.id, Taxon.name, Taxon.author).filter(aFilter).all() aFilter = u"UPPER({0}) LIKE '%{1}%'".format('russian_name', query_str_upper) rus_all = dbsession.query(Taxon.id, Taxon.russian_name, Taxon.author).filter(aFilter).all() # ищем в таблице синонимов: aFilter = u"UPPER({0}) LIKE '%{1}%'".format('synonym', query_str_upper) s_all = dbsession.query(Synonym.species_id, Synonym.synonym, Synonym.author).filter(aFilter).all() all = [tax_all + s_all + rus_all][0] itemsPage = all[start:start + count] dbsession.close() except DBAPIError: dbsession.close() return {'success': False, 'msg': 'Ошибка подключения к БД'} rows = [] if all: rec_id = itertools.count() rows = [{ 'recId': rec_id.next(), 'id': id, 'name': name, 'author': author } for id, name, author in itemsPage] return { 'items': rows, 'success': True, 'numRows': len(all), 'identity': 'id' }
def table_view(request): can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) user_id = authenticated_userid(request) dbsession = DBSession() card, user = None, None try: card = dbsession.query(Cards).filter_by(id=request.matchdict['id']).one() user = dbsession.query(User).filter_by(id=user_id).one() if can_i_edit else None result = card.as_json_dict() except NoResultFound: result = {'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено'} if not can_i_edit: # обнулим координаты перед показом result['lat'] = 0 result['lon'] = 0 if isinstance(has_permission('admin', request.context, request), ACLAllowed): is_editable = True else: is_editable = card.inserter == user.person_id if user else False dbsession.close() return {'data': result, 'editable': is_editable, 'success': True}
def create_taxon(request): new_data = dict(request.POST) dbsession = DBSession() taxon = Taxon() for k, v in new_data.items(): if v == '': v = None if hasattr(taxon, k): setattr(taxon, k, v) dbsession.add(taxon) dbsession.flush() dbsession.refresh(taxon) return {'item': taxon.as_json_dict()}
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump fieldnames = ['square_id', 'key_area_id'] squares_from_db = DBSession().query(Squares).join( Squares.key_areas).order_by(Squares.id).all() squares = [] for square in squares_from_db: for key_area in square.key_areas: squares.append([square.id, key_area.id]) dump(filename, fieldnames, squares, is_array=True)
def upload_image(request): filename = request.POST['file'].filename input_file = request.POST['file'].file obj_id = request.matchdict['id'] obj_type = request.matchdict['type'] path_to_images = os.path.join(os.path.dirname(nextgisbio.__file__), 'static/data/images') date_now = datetime.datetime.now().strftime('%Y-%m-%d') path_to_images_now = os.path.join(path_to_images, date_now) if not os.path.exists(path_to_images_now): os.mkdir(path_to_images_now) # from http://stackoverflow.com/questions/2782229/most-lightweight-way-to-create-a-random-string-and-a-random-hexadecimal-number random_file_name = str(uuid.uuid4()) base_file_path = os.path.join(path_to_images_now, '.'.join([random_file_name, 'jpg'])) with open(base_file_path, 'wb') as output_file: shutil.copyfileobj(input_file, output_file) for key_size in THUMBNAIL_SIZES: try: im = Image.open(base_file_path) im.thumbnail(THUMBNAIL_SIZES[key_size], Image.BICUBIC) im.save(os.path.join( path_to_images_now, '.'.join([random_file_name + '_' + key_size, 'jpg'])), 'JPEG', quality=70) except IOError: print "cannot create thumbnail for '%s'" % base_file_path with transaction.manager: dbSession = DBSession() image = Images() image.name = filename image.url = '/static/data/images/%s/%s.jpg' % (date_now, random_file_name) image.size = os.path.getsize(base_file_path) image.local = base_file_path dbSession.add(image) if obj_type == 'card': card_image = CardsImages() card_image.image = image card_image.card = dbSession.query(Cards).filter_by(id=obj_id).one() dbSession.add(card_image) photo_json = image.as_json_dict() return photo_json
def _get_squares_by_taxonlist(taxons, geomtype='geojson'): ''' Выбор квадратов из БД, на которые приходятся анн.списки таксонов из taxons='taxon_id1,taxon_id2,...'. Вернуть по запросу геометрию каждого квадрата в соответствии с типом geomtype = ['geojson', 'wkt'] ''' assert geomtype in ['geojson', 'wkt'] dbsession = DBSession() if 'root' in taxons: if geomtype == 'geojson': all = dbsession.query( Squares.id, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).all() else: all = dbsession.query(Squares.id, sqlalchemy.func.st_astext( Squares.geom.RAW)).all() else: # Выбираем ключевые участки, где встречен таксон, а по ним --- id квадратов, которые приходятся на эти участки: subquery = TAXON_ID_QUERY % (", ".join( [str(num) for num in taxons]), TAXON_TYPES[len(TAXON_TYPES) - 1]) qs = """ SELECT DISTINCT square_id from square_karea_association WHERE square_karea_association.key_area_id in (SELECT DISTINCT key_area.id FROM annotation INNER JOIN key_area ON annotation.key_area = key_area.id""" + ' AND annotation.species IN (' + subquery + '));' k_set = dbsession.query(Squares.id).from_statement(qs).all() k_set = [k[0] for k in k_set] if geomtype == 'geojson': all = dbsession.query( Squares.id, sqlalchemy.func.st_asgeojson(Squares.geom.RAW)).filter( Squares.id.in_(k_set)).all() else: all = dbsession.query(Squares.id, sqlalchemy.func.st_astext( Squares.geom.RAW)).filter( Squares.id.in_(k_set)).all() dbsession.close() return all
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id species key_area identifier collecter biblioref original_name location lon lat biotop difference substrat status frequency quantity annotation infosourse year month day exposure ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') row = reader.next() # пропускаем заголовки records = [line for line in reader] for row in records: x = [None if x == '' else x for x in row] print str(len(x)) + ' - ' + str(x[0]) ( id, species, inserter, key_area, identifier, collecter, biblioref, original_name, location, lon, lat, biotop, difference, substrat, status, frequency, quantity, annotation, infosourse, year, month, day, exposure ) = [None if x == '' else x for x in row] ann = Annotation( species=species, inserter=inserter, key_area=key_area, identifier=identifier, collecter=collecter, biblioref=biblioref, original_name=original_name, location=location, lon=lon, lat=lat, biotop=biotop, difference=difference, substrat=substrat, status=status, frequency=frequency, quantity=quantity, annotation=annotation, infosourse=infosourse, year=year, month=month, day=day, exposure=exposure ) dbsession.add(ann)
def db_handle(csv_data, data_structure_item): main_field = data_structure_item['main']['field'] main_table = data_structure_item['main']['db_table'] relation_table = data_structure_item['relation']['db_table'] relation_table_name = data_structure_item['relation']['db_table_name'] left_field = data_structure_item['main']['db_table_id'] right_field = data_structure_item['relation']['db_table_id'] session = DBSession() db_items = session.query(main_table, left_field, relation_table_name)\ .outerjoin(relation_table, left_field == right_field)\ .all() relation_db_aggregated = {} for db_item in db_items: relation_db_id = db_item[1] if relation_db_id in relation_db_aggregated: relation_db_aggregated[relation_db_id]['count'] += 1 else: relation_db_aggregated[relation_db_id] = { 'id': relation_db_id, 'name': db_item[2], 'count': 1 } relation_db_aggregated_items = sorted(relation_db_aggregated.values(), key=lambda (v): v['count'], reverse=True) print '\n -------------------' print 'DB - ' + data_structure_item['main'][ 'table'] + ' -> ' + data_structure_item['relation']['table'] print 'by field "' + main_field + '"' print '-------------------' for relation_db_aggregated_item in relation_db_aggregated_items: print u'{0} - {1} = {2}'.format(relation_db_aggregated_item['id'], relation_db_aggregated_item['name'], relation_db_aggregated_item['count'])
def export_to_file(filename): from nextgisbio.utils.dump_to_file import dump dbsession = DBSession() redbook_species_db = dbsession.query(RedBook, RedBookSpecies, Taxon)\ .join(RedBookSpecies, RedBook.id == RedBookSpecies.red_book_id)\ .join(Taxon, RedBookSpecies.specie_id == Taxon.id)\ .order_by(RedBook.id, RedBookSpecies.specie_id)\ .all() dbsession.close() attribute_names = [ 'region', 'orig_name', 'lat_name', 'author', 'population', 'status', 'univ_status', 'year', 'bibl' ] objects_for_dump = [[ o[1].region, o[1].orig_name, o[2].name, o[1].author, o[1].population, o[1].status, o[1].univ_status, o[1].year, o[0].name ] for o in redbook_species_db] dump(filename, attribute_names, objects_for_dump, is_array=True)
def add_from_file(filename): ''' Добавить данные в таблицу синонимов таксонов из файла filename (разделители - табуляция). Файл filename в формате csv, колонки: id species_id synonym author source ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') reader.next() records = [line for line in reader] for row in records: id, species_id, synonym, author, source = [ None if x == '' else x for x in row ] synonym = Synonym(species_id=species_id, synonym=synonym, author=author, source=source) dbsession.add(synonym)
def export_to_file(filename): fieldnames = ['id', 'species', 'inserter', 'key_area', 'identifier', 'collecter', 'biblioref', 'original_name', 'location', 'lon', 'lat', 'biotop', 'difference', 'substrat', 'status', 'frequency', 'quantity', 'annotation', 'infosourse', 'year', 'month', 'day', 'exposure'] with open(filename, 'wb') as file: writer = csv_utf.UnicodeWriter(file) writer.writerow(fieldnames) dbsession = DBSession() annotations = [[ann.id, ann.species, ann.inserter, ann.key_area, ann.identifier, ann.collecter, ann.biblioref, ann.original_name, ann.location, ann.lon, ann.lat, ann.biotop, ann.difference, ann.substrat, ann.status, ann.frequency, ann.quantity, ann.annotation, ann.infosourse, ann.year, ann.month, ann.day, ann.exposure] for ann in dbsession.query(Annotation).order_by(Annotation.id).all()] writer.writerows(annotations)
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: id doc_type_id filename fullname author magazine pages mammals birds reptiles amphibians fish invertebrates vascular bryophytes lichens fungi maps ''' import transaction with transaction.manager: dbsession = DBSession() reader = csv.reader(open(filename), delimiter='\t') reader.next() records = [line for line in reader] for row in records: (id, doc_type_id, filename, fullname, author, magazine, pages, mammals, birds, reptiles, amphibians, fish, invertebrates, vascular, bryophytes, lichens, fungi, maps) = [None if x == '' else x for x in row] infores = Inforesources(doc_type_id=doc_type_id, filename=filename, fullname=fullname, author=author, magazine=magazine, pages=pages, mammals=mammals, birds=birds, reptiles=reptiles, amphibians=amphibians, fish=fish, invertebrates=invertebrates, vascular=vascular, bryophytes=bryophytes, lichens=lichens, fungi=fungi, maps=maps) dbsession.add(infores)
def login(request): message = None if hasattr(request, 'exception') and isinstance(request.exception, HTTPForbidden): message = u"Недостаточно прав доступа для выполнения указанной операции!" login_url = route_url('login', request) referrer = request.url if referrer == login_url: referrer = route_url('home', request) next_url = route_url('home', request) login = '' password = '' if 'form.submitted' in request.params: login = request.params['login'] password = request.params['password'] try: dbsession = DBSession() user = dbsession.query(User)\ .filter_by(login=login, password=User.password_hash(password), active=True)\ .one() dbsession.close() headers = remember(request, user.id) return HTTPFound(location=next_url, headers=headers) except NoResultFound: pass message = u"Неверный логин или пароль!" return dict( message=message, url=request.application_url + '/login', next_url=next_url, login=login, password=password, )
def parent_taxons(taxon_id): ''' Возвращает родительские таксоны данного таксона. ''' dbsession = DBSession() qs = ''' WITH RECURSIVE subtree AS ( SELECT * FROM taxon WHERE id=%s UNION ALL SELECT t.* FROM taxon AS t, subtree AS st WHERE (st.parent_id = t.id) ) SELECT * FROM subtree ; ''' % (taxon_id, ) taxons = dbsession.query(Taxon).from_statement(qs).all() # Отсортируем таксоны так, чтобы на первом месте списка шли царства, на последнем -- виды. taxons.sort(key=lambda x: TAXON_TYPES.index(x.taxon_type)) return taxons
def points_text(request): # Есть querystring, содержащее строку вида 'nodes=taxon_id1,taxon_id2'). # Например, "nodes=taxon_1,taxon_5" # Это значит, что пользователь выбрал записи из таблицы taxon с id=1 и id=5. # Требуется вернуть карточки наблюдений соотв. таксонов # # Граничный случай, когда нужно выбрать все карточки: nodes="root_" dbsession = DBSession() try: taxons = request.params['nodes'] except KeyError: taxons = '' red_book_id = None if 'red_book' in request.params: red_book_id = int(request.params['red_book']) if red_book_id == -1: red_book_id = None can_i_edit = has_permission('edit', request.context, request) can_i_edit = isinstance(can_i_edit, ACLAllowed) if taxons: taxons = urllib.unquote(taxons) taxon_id = taxons.split(',') if 'root' in taxons: cards = dbsession.query(Cards, Taxon).join(Taxon).all() else: # Получим список видов-потомков выбранных таксонов и связанных с ними карточек subquery = TAXON_ID_QUERY % (", ".join([str(num) for num in taxon_id]), TAXON_TYPES[len(TAXON_TYPES) - 1]) qs = """ SELECT cards.id,cards.species,cards.lat,cards.lon, taxon.name FROM cards INNER JOIN taxon ON cards.species = taxon.id %s WHERE """ % ( 'INNER JOIN red_books_species ON cards.species = red_books_species.specie_id' if red_book_id else '') \ + ((' red_books_species.red_book_id = ' + str(red_book_id) + ' AND ') if red_book_id else '') \ + ' cards.species IN (' + subquery + ');' cards = dbsession.query(Cards, Taxon).from_statement(qs).all() points = [] for card, taxon in cards: id, spec_id, lat, lon = card.id, card.species, card.lat, card.lon name = taxon.name if lat and lon: if not can_i_edit: # настоящие координаты показывать нельзя # сдвинем координаты перед показом примерно на 10 км в случайном направлении lat = lat + (random() - random()) / 7 lon = lon + (random() - random()) / 4 points.append({'lat': lat, 'lon': lon, 'name': name, 'card_id': id, 'spec_id': spec_id}) else: points = {} dbsession.close() return {'points': points}
def direct_child(request): # Ext посылает запрос, содержащий строку вида 'node'='taxon_идентификатор') # например, 'node'='taxon_1', где id = id записи в таблице taxons # (об идентификаторах см. ниже, в цикле, # где в ответ на запрос выдаются дочерние узлы с идентификаторами) # Два граничных случая: # taxon == 'root': Корень дерева таксонов, childern=все записи из Kingdom # taxon.is_last_taxon == True: конец иерархии (это последний таксон) => leaf:=True node = request.params['node'] dbsession = DBSession() try: if node == 'root': childern = dbsession.query(Taxon).filter_by(parent_id=None).all() else: node = node.split('_') id = int(node[1]) childern = dbsession.query(Taxon).filter_by(parent_id=id).all() dbsession.close() except NoResultFound: dbsession.close() return { 'success': False, 'msg': 'Результатов, соответствующих запросу, не найдено' } # Генерируем описания узлов для Ext.treepanel rows = [] for taxon in childern: node = {} # Ext хочет получать информацию из поля 'text' # Сформируем это поле из названия и автора author = taxon.author if taxon.author else '' is_last = taxon.is_last_taxon() node['id'] = 'taxon_' + str(taxon.id) node['leaf'] = is_last if is_last: node['text'] = "<b>%s</b> %s" % (taxon.name, author) else: node['text'] = "%s %s" % (taxon.name, author) rows.append(node) return rows
def taxon_cbtree(request): path_name = 'path' if 'path' in request.params else 'basePath' hierarchical_path = request.params[path_name].replace('"', '') if hierarchical_path == '.': parent_id = None else: parent_id = int(str.split(str(hierarchical_path), '/')[-1]) dbsession = DBSession() parent_taxon = dbsession.query(Taxon).filter_by(id=parent_id).first() children_taxons = dbsession.query(Taxon).filter_by( parent_id=parent_id).order_by(Taxon.name).all() dbsession.close() if hierarchical_path == '.': block = { 'name': '.', 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [{ 'name': '.', 'id': -1, 'path': hierarchical_path, 'directory': True }] } else: block = { 'name': parent_taxon.name, 'path': hierarchical_path, 'directory': True, 'total': 1, 'status': 200, 'items': [] } children_taxons_json = [] for taxon in children_taxons: children_taxons_json.append(_taxon_to_node(hierarchical_path, taxon)) if hierarchical_path == '.': block['items'][0]['children'] = children_taxons_json else: block['items'] = children_taxons_json return block if block else children_taxons_json
def table_delete_jtable(request): session = DBSession() table, table_name = helpers.get_table_by_name(request) if ('id' in request.POST) and request.POST['id'].isdigit(): item_id = int(request.POST['id']) item = session.query(table).get(item_id) else: raise Exception('Deleting item: id is not applied') session.delete(item) transaction.commit() session.close() return {'Result': 'OK'}