def make_json_error(ex): code = ex.code if isinstance(ex, HTTPException) else 500 message = to_unicode(ex) data = getattr(ex, 'data', None) if data: message = to_unicode(data) response = jsonify(code=code, message=message) response.status_code = code return response
def raw_value(self, value): if value is not None: if isinstance(self.type, ListArg): if len(value) > 0: return self.type.raw_value(value) else: return to_unicode(value)
def fix_sequences(ctx, to_engine=None, tables=[]): if to_engine is None: to_engine = ctx.archive_db.engine if not tables: tables = ctx.current_db.metadata.sorted_tables if not to_engine.dialect.name == "postgresql": return def get_sequences_values(): for table in tables: pks = [c for c in table.c if c.primary_key] for pk in pks: if not pk.autoincrement: continue sequence_name = "%s_%s_seq" % (table.name, pk.name) if to_engine.dialect.has_sequence(to_engine, sequence_name): yield sequence_name, pk.name, pk.table.name for sequence_name, pk_name, table_name in get_sequences_values(): ctx.log(green('\r fix') + ' ~> sequence %s' % sequence_name) query = "select setval('%s', max(%s)) from %s" try: to_engine.execute(query % (sequence_name, pk_name, table_name)) except Exception as ex: ctx.log(*red(to_unicode(ex)).splitlines(), prefix=(' ' * 9))
def before_cursor_execute(conn, cursor, statement, parameters, context, executemany): sql = to_unicode(statement) for string in self.ignored: if sql.lower().startswith(string.lower()): return sql = sql.replace(' \n', '\n').rstrip('\n') self.buf.write(sql.rstrip('\n') + ";" + '\n')
def inspect_db(ctx): infos = dict() for table_name, size in count_all(ctx.current_db): infos[table_name] = {"current_db_size": size, "archive_db_size": 0, "diff": size} if database_exists(ctx.archive_db.engine.url): for table_name, size in count_all(ctx.archive_db): infos[table_name]["archive_db_size"] = size diff = infos[table_name]["current_db_size"] - size infos[table_name]["diff"] = diff headers = ["Table", "Current DB size", "Archive DB size", "Diff"] rows = [(k, to_unicode(infos[k]["current_db_size"]), to_unicode(infos[k]["archive_db_size"]), to_unicode(infos[k]["diff"])) for k in iterkeys(infos)] return sorted(rows, key=lambda x: x[0]), headers
def convert(self, value, argtype): if argtype == str: return to_unicode(value) elif argtype == bool: return self.convert_bool(value) elif argtype in integer_types: return self.convert_int(value) if isinstance(argtype, ListArg): return argtype(value, self.convert) else: return argtype(value)
def sync_schema(ctx, tables, from_engine, to_engine): inspector = Inspector.from_engine(to_engine) existing_tables = inspector.get_table_names() if tables is None: ctx.current_db.reflect() tables = ctx.current_db.metadata.sorted_tables if to_engine is None: to_engine = ctx.archive_db.engine if from_engine is None: from_engine = ctx.current_db.engine for table in tables: if table.name not in existing_tables: ctx.log(' %s ~> table %s' % (green('create'), table.name)) try: table.create(bind=to_engine, checkfirst=True) except Exception as ex: ctx.log(*red(to_unicode(ex)).splitlines(), prefix=(' ' * 9)) metadata = MetaData(from_engine) for table in tables: # Make sure we have the good version of the table yield Table(table.name, metadata, autoload=True)
def alembic_apply_diff(ctx, op, op_name, diff, tables=[]): tables_dict = dict(((table.name, table) for table in tables)) supported_operations = SUPPORTED_ALEMBIC_OPERATIONS.keys() if op_name not in supported_operations: raise ValueError("Unsupported '%s' operation" % op_name) if op_name == "add_table": table_name = diff[1].name columns = [c.copy() for c in diff[1].columns] msg = SUPPORTED_ALEMBIC_OPERATIONS[op_name] % table_name op_callback = lambda: op.create_table(table_name, *columns) elif op_name in ('add_column', 'remove_column'): column = diff[3].copy() table_name = diff[2] if 'add' in op_name: op_callback = lambda: op.add_column(diff[2], column) else: op_callback = lambda: op.drop_column(diff[2], column.name) msg = SUPPORTED_ALEMBIC_OPERATIONS[op_name] % (column.name, table_name) elif op_name in ('remove_index', 'add_index'): index = diff[1] columns = [i for i in index.columns] table_name = index.table.name index_colums = () for column in columns: index_colums += ("%s.%s" % (column.table.name, column.name),) if 'add' in op_name: args = (index.name, table_name, [c.name for c in columns],) kwargs = {'unique': index.unique} op_callback = lambda: op.create_index(*args, **kwargs) else: op_callback = lambda: op.drop_index(index.name) msg = SUPPORTED_ALEMBIC_OPERATIONS[op_name] \ % (index.name, ",".join(index_colums)) elif op_name in ('modify_type',): table_name = diff[0][2] column_name = diff[0][3] kwargs = diff[0][4] type_ = diff[0][6] def op_callback(): try: op.alter_column(table_name, column_name, server_default=None) op.alter_column(table_name, column_name, type_=type_, **kwargs) except: # Some types cannot be casted if table_name in tables_dict: table = tables_dict[table_name] column = table.columns[column_name].copy() op.drop_column(table_name, column_name) op.add_column(table_name, column) msg = SUPPORTED_ALEMBIC_OPERATIONS[op_name] \ % (table_name, column_name, type_) elif op_name in ('modify_nullable',): table_name = diff[0][2] column_name = diff[0][3] kwargs = diff[0][4] nullable = diff[0][6] existing_type = kwargs['existing_type'] def op_callback(): op.alter_column(table_name, column_name, nullable=nullable, existing_type=existing_type) msg = SUPPORTED_ALEMBIC_OPERATIONS[op_name] \ % (table_name, column_name, nullable) try: if msg is not None: ctx.log("%s ~> %s" % (yellow('upgrade'), msg)) op_callback() except Exception as ex: ctx.log(*red(to_unicode(ex)).splitlines(), prefix=(' ' * 9))
def raw_value(self, values): return to_unicode(self.sep.join(("%s" % v for v in values)))
def convert(): string = to_unicode(value) if string: for item in string.split(self.sep): yield callback(item, self.type)