def table_item_save(request): person_id = None if ('person_id' in request.POST) and request.POST['person_id'].isdigit(): person_id = int(request.POST['person_id']) user_login = request.POST[ 'user_login'] if 'user_login' in request.POST else None if not user_login: raise HTTPBadRequest('"user_login" is required parameter') if not person_id: users = DBSession.query(User).filter(User.login == user_login).all() if len(users) > 0: return { 'Result': 'Error', 'Message': u'Такой логин уже присутствует в системе' } with transaction.manager: if person_id: person = DBSession.query(Person) \ .options(joinedload('user')) \ .filter(Person.id == person_id) \ .all()[0] user = person.user else: person = Person() DBSession.add(person) user = User() DBSession.add(user) person.user = user for attr in request.POST: table_name, field = attr.split('_') if field == 'id': continue if table_name == 'person': setattr(person, field, request.POST[attr]) if table_name == 'user': setattr(user, field, request.POST[attr]) if 'user_active' in request.POST and request.POST['user_active']: user.active = True else: user.active = False if 'user_password' in request.POST and request.POST['user_password']: user.password = User.password_hash(request.POST['user_password']) DBSession.flush() DBSession.refresh(user) DBSession.refresh(person) person_json = person.as_json_dict('person_') user_json = user.as_json_dict('user_') item_json = person_json.copy() item_json.update(user_json) return {'Result': 'OK', 'Record': item_json}
def add_from_file(associations_filename, shp_filename): ''' Добавить данные из shp-файла shp_filename. Первое поле аттрибутивной таблицы--идентификатор. Одновременно добавляются в таблицу связи данные из файла с разделителями associations_filename. Файл filename в формате csv (разделитель табуляция), колонки: square_id key_area_id ''' import transaction with transaction.manager: dbsession = DBSession() ogrData = ogr.Open(shp_filename) layer = ogrData.GetLayer(0) sq = layer.GetNextFeature() while sq is not None: id = sq.GetFieldAsString(0) geom = sq.GetGeometryRef() geom = geom.ExportToWkt() square = Squares(id=id, geom=WKTSpatialElement(geom, srid=3857)) dbsession.add(square) sq = layer.GetNextFeature() dbsession.flush() reader = csv.reader(open(associations_filename), delimiter='\t') reader.next() records = [line for line in reader] for id, key_area_id in records: # Определим ключевоq уч-к по его id key_a = dbsession.query(Key_area).filter_by(id=key_area_id).one() # Определим полигон по его id square = dbsession.query(Squares).filter_by(id=id).one() square.key_areas.append(key_a)
def create_taxon(request): new_data = dict(request.POST) dbsession = DBSession() taxon = Taxon() for k, v in new_data.items(): if v == '': v = None if hasattr(taxon, k): setattr(taxon, k, v) dbsession.add(taxon) dbsession.flush() dbsession.refresh(taxon) return {'item': taxon.as_json_dict()}
def create_taxon(request): new_data = dict(request.POST) dbsession = DBSession() taxon = Taxon() for k, v in new_data.items(): if v == '': v = None if hasattr(taxon, k): setattr(taxon, k, v) dbsession.add(taxon) dbsession.flush() dbsession.refresh(taxon) return {'item': taxon.as_json_dict()}
def gen_sql(records): with transaction.manager: for r in records: id = None for t in 'Kingdom', 'Phylum', 'Class', 'Order', 'Family', 'Genus', 'Species': try: print t, r[t] taxon = DBSession.query(Taxon).filter_by(taxon_type = t, name = r[t], parent_id = id).one() print taxon except NoResultFound: taxon = Taxon(taxon_type = t, name = r[t], parent_id = id, author=r[t+'_author'], source=r[t+'_source']) DBSession.add(taxon) DBSession.flush() id = taxon.id print taxon.id, taxon.taxon_type, taxon.name, taxon.parent_id
def save_anlist(request): dbsession = DBSession() new_data = dict(request.POST) annotation_id = request.matchdict['id'] success = True try: anlist = dbsession.query(Annotation).filter_by(id=annotation_id).one() for k,v in new_data.items(): if v == '': v = None if hasattr(anlist, k): setattr(anlist, k, v) dbsession.flush() except : success = False return {'success': success}
def save_anlist(request): dbsession = DBSession() new_data = dict(request.POST) annotation_id = request.matchdict['id'] success = True try: anlist = dbsession.query(Annotation).filter_by(id=annotation_id).one() for k,v in new_data.items(): if v == '': v = None if hasattr(anlist, k): setattr(anlist, k, v) dbsession.flush() except : success = False return {'success': success}
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: idDocType,docType ''' dbsession = DBSession() reader = csv.reader(open(filename), delimiter=',') reader.next() records = [line for line in reader] for id, dt in records: dtp = Doc_type(doc_type=dt) dbsession.add(dtp) dbsession.flush()
def add_from_file(filename): ''' Добавить данные в таблицу из файла с разделителями filename. Файл filename в формате csv, колонки: idDocType,docType ''' dbsession = DBSession() reader = csv.reader(open(filename), delimiter=',') reader.next() records = [line for line in reader] for id, dt in records: dtp = Doc_type(doc_type=dt) dbsession.add(dtp) dbsession.flush()
def add_from_file(associations_filename, shp_filename): ''' Добавить данные из shp-файла shp_filename. Первое поле аттрибутивной таблицы--идентификатор. Одновременно добавляются в таблицу связи данные из файла с разделителями associations_filename. Файл filename в формате csv (разделитель табуляция), колонки: square_id key_area_id ''' import transaction with transaction.manager: dbsession = DBSession() ogrData = ogr.Open(shp_filename) layer = ogrData.GetLayer(0) sq = layer.GetNextFeature() while sq is not None: id = sq.GetFieldAsString(0) geom = sq.GetGeometryRef() geom = geom.ExportToWkt() square = Squares(id=id, geom=WKTSpatialElement(geom, srid=3857)) dbsession.add(square) sq = layer.GetNextFeature() dbsession.flush() reader = csv.reader(open(associations_filename), delimiter='\t') reader.next() records = [line for line in reader] for id, key_area_id in records: # Определим ключевоq уч-к по его id key_a = dbsession.query(Key_area).filter_by( id=key_area_id).one() # Определим полигон по его id square = dbsession.query(Squares).filter_by(id=id).one() square.key_areas.append(key_a)
def table_item_save(request): person_id = None if ('person_id' in request.POST) and request.POST['person_id'].isdigit(): person_id = int(request.POST['person_id']) user_login = request.POST['user_login'] if 'user_login' in request.POST else None if not user_login: raise HTTPBadRequest('"user_login" is required parameter') if not person_id: users = DBSession.query(User).filter(User.login == user_login).all() if len(users) > 0: return { 'Result': 'Error', 'Message': u'Такой логин уже присутствует в системе' } with transaction.manager: if person_id: person = DBSession.query(Person) \ .options(joinedload('user')) \ .filter(Person.id == person_id) \ .all()[0] user = person.user else: person = Person() DBSession.add(person) user = User() DBSession.add(user) person.user = user for attr in request.POST: table_name, field = attr.split('_') if field == 'id': continue if table_name == 'person': setattr(person, field, request.POST[attr]) if table_name == 'user': setattr(user, field, request.POST[attr]) if 'user_active' in request.POST and request.POST['user_active']: user.active = True else: user.active = False if 'user_password' in request.POST and request.POST['user_password']: user.password = User.password_hash(request.POST['user_password']) DBSession.flush() DBSession.refresh(user) DBSession.refresh(person) person_json = person.as_json_dict('person_') user_json = user.as_json_dict('user_') item_json = person_json.copy() item_json.update(user_json) return { 'Result': 'OK', 'Record': item_json }
def import_from_csv(path_to_file): session = DBSession() log = {'not_found': [], 'duplicates': [], 'multiple': []} reader = csv.reader(open(path_to_file), delimiter='\t') reader.next() records = [line for line in reader] red_books = {} for region, orig_name, lat_name, author, population, status, univ_status, year, bibl in records: if bibl in red_books: continue else: red_books[bibl] = True with transaction.manager: for red_book_name in red_books.keys(): red_book = RedBook(name=red_book_name) session.add(red_book) red_books_db = session.query(RedBook).all() red_books = {} for red_book_db in red_books_db: red_books[red_book_db.name.encode('utf8')] = red_book_db.id with transaction.manager: for region, orig_name, lat_name, author, population, status, univ_status, year, bibl in records: lat_name = lat_name.strip() taxons = session.query(Taxon).filter_by(name=lat_name).all() taxons_count = len(taxons) if taxons_count == 1: taxon_id = taxons[0].id elif taxons_count > 1: taxons = session.query(Taxon).filter_by( name=lat_name).filter_by(author=author).all() taxon_id = taxons[0].id if len(taxons) > 1: log['multiple'].append(lat_name) continue else: log['not_found'].append(lat_name) continue red_book_id = red_books[bibl] count = session.query(func.count(RedBookSpecies.specie_id)) \ .filter(RedBookSpecies.red_book_id == red_book_id) \ .filter(RedBookSpecies.specie_id == taxon_id).scalar() if count > 0: log['duplicates'].append(taxons[0].name) continue red_book_specie = RedBookSpecies( red_book_id=red_book_id, specie_id=taxon_id, population=population, status=status, univ_status=univ_status, year=int(year) if year else None, region=region, author=author, orig_name=orig_name.strip()) session.add(red_book_specie) session.flush() print '\n\rMULTIPLE:\n\r{0}'.format('\n\r'.join(log['multiple'])) print '\n\rNOT FOUND:\n\r{0}'.format('\n\r'.join(log['not_found'])) print '\n\rDUPLICATES:\n\r{0}'.format('\n\r'.join(log['duplicates']))
def import_from_csv(path_to_file): session = DBSession() log = { 'not_found': [], 'duplicates': [], 'multiple': [] } reader = csv.reader(open(path_to_file), delimiter='\t') reader.next() records = [line for line in reader] red_books = {} for region, orig_name, lat_name, author, population, status, univ_status, year, bibl in records: if bibl in red_books: continue else: red_books[bibl] = True with transaction.manager: for red_book_name in red_books.keys(): red_book = RedBook( name=red_book_name ) session.add(red_book) red_books_db = session.query(RedBook).all() red_books = {} for red_book_db in red_books_db: red_books[red_book_db.name.encode('utf8')] = red_book_db.id with transaction.manager: for region, orig_name, lat_name, author, population, status, univ_status, year, bibl in records: lat_name = lat_name.strip() taxons = session.query(Taxon).filter_by(name=lat_name).all() taxons_count = len(taxons) if taxons_count == 1: taxon_id = taxons[0].id elif taxons_count > 1: taxons = session.query(Taxon).filter_by(name=lat_name).filter_by(author=author).all() taxon_id = taxons[0].id if len(taxons) > 1: log['multiple'].append(lat_name) continue else: log['not_found'].append(lat_name) continue red_book_id = red_books[bibl] count = session.query(func.count(RedBookSpecies.specie_id)) \ .filter(RedBookSpecies.red_book_id == red_book_id) \ .filter(RedBookSpecies.specie_id == taxon_id).scalar() if count > 0: log['duplicates'].append(taxons[0].name) continue red_book_specie = RedBookSpecies( red_book_id=red_book_id, specie_id=taxon_id, population=population, status=status, univ_status=univ_status, year=int(year) if year else None, region=region, author=author, orig_name=orig_name.strip() ) session.add(red_book_specie) session.flush() print '\n\rMULTIPLE:\n\r{0}'.format('\n\r'.join(log['multiple'])) print '\n\rNOT FOUND:\n\r{0}'.format('\n\r'.join(log['not_found'])) print '\n\rDUPLICATES:\n\r{0}'.format('\n\r'.join(log['duplicates']))