def row_converters_from_ids(self): """ Generate a row converter object for every csv row """ if self.ignore or not self.object_ids: return self.row_converters = [] index = 0 for ids_pool in list_chunks(self.object_ids, self.ROW_CHUNK_SIZE): # sqlalchemy caches all queries and it takes a lot of memory. # This line clears query cache. _app_ctx_stack.top.sqlalchemy_queries = [] objects = self.object_class.eager_query().filter( self.object_class.id.in_(ids_pool)).execution_options( stream_results=True) for obj in objects: yield RowConverter(self, self.object_class, obj=obj, headers=self.headers, index=index) index += 1 # Clear all objects from session (it helps to avoid memory leak) for obj in db.session: del obj
def row_converters_from_csv(self): """ Generate a row converter object for every csv row """ if self.ignore: return self.row_converters = [] for i, row in enumerate(self.rows): row = RowConverter(self, self.object_class, row=row, headers=self.headers, index=i) self.row_converters.append(row)
def row_converters_from_ids(self): """ Generate a row converter object for every csv row """ if self.ignore or not self.object_ids: return self.row_converters = [] objects = self.object_class.eager_query().filter( self.object_class.id.in_(self.object_ids)).all() for i, obj in enumerate(objects): row = RowConverter(self, self.object_class, obj=obj, headers=self.headers, index=i) self.row_converters.append(row)