Exemplo n.º 1
0
def main():
    print("""\
/*** THIS FILE IS AUTO-GENERATED BY %s ***/

import { GristObjCode } from "app/plugin/GristData";

// tslint:disable:object-literal-key-quotes

export const SCHEMA_VERSION = %d;

export const schema = {
""" % (__file__, schema.SCHEMA_VERSION))

    for table in schema.schema_create_actions():
        print('  "%s": {' % table.table_id)
        for column in table.columns:
            print('    %-20s: "%s",' % (column['id'], column['type']))
        print('  },\n')

    print("""};

export interface SchemaTypes {
""")
    for table in schema.schema_create_actions():
        print('  "%s": {' % table.table_id)
        for column in table.columns:
            print('    %s: %s;' % (column['id'], get_ts_type(column['type'])))
        print('  };\n')
    print("}")
Exemplo n.º 2
0
  def test_migrations(self):
    tdset = table_data_set.TableDataSet()
    tdset.apply_doc_actions(schema_version0())
    migration_actions = migrations.create_migrations(tdset.all_tables)
    tdset.apply_doc_actions(migration_actions)

    # Compare schema derived from migrations to the current schema.
    migrated_schema = tdset.get_schema()
    current_schema = {a.table_id: {c['id']: c for c in a.columns}
                      for a in schema.schema_create_actions()}
    # pylint: disable=too-many-nested-blocks
    if migrated_schema != current_schema:
      # Figure out the version of new migration to suggest, and whether to update SCHEMA_VERSION.
      new_version = max(schema.SCHEMA_VERSION, migrations.get_last_migration_version() + 1)

      # Figure out the missing actions.
      doc_actions = []
      for table_id in sorted(six.viewkeys(current_schema) | six.viewkeys(migrated_schema)):
        if table_id not in migrated_schema:
          doc_actions.append(actions.AddTable(table_id, current_schema[table_id].values()))
        elif table_id not in current_schema:
          doc_actions.append(actions.RemoveTable(table_id))
        else:
          current_cols = current_schema[table_id]
          migrated_cols = migrated_schema[table_id]
          for col_id in sorted(six.viewkeys(current_cols) | six.viewkeys(migrated_cols)):
            if col_id not in migrated_cols:
              doc_actions.append(actions.AddColumn(table_id, col_id, current_cols[col_id]))
            elif col_id not in current_cols:
              doc_actions.append(actions.RemoveColumn(table_id, col_id))
            else:
              current_info = current_cols[col_id]
              migrated_info = migrated_cols[col_id]
              delta = {k: v for k, v in six.iteritems(current_info) if v != migrated_info.get(k)}
              if delta:
                doc_actions.append(actions.ModifyColumn(table_id, col_id, delta))

      suggested_migration = (
        "----------------------------------------------------------------------\n" +
        "*** migrations.py ***\n" +
        "----------------------------------------------------------------------\n" +
        "@migration(schema_version=%s)\n" % new_version +
        "def migration%s(tdset):\n" % new_version +
        "  return tdset.apply_doc_actions([\n" +
        "".join(stringify(a) + ",\n" for a in doc_actions) +
        "  ])\n"
      )

      if new_version != schema.SCHEMA_VERSION:
        suggested_schema_update = (
          "----------------------------------------------------------------------\n" +
          "*** schema.py ***\n" +
          "----------------------------------------------------------------------\n" +
          "SCHEMA_VERSION = %s\n" % new_version
        )
      else:
        suggested_schema_update = ""

      self.fail("Migrations are incomplete. Suggested migration to add:\n" +
                suggested_schema_update + suggested_migration)
Exemplo n.º 3
0
def main():
    print """
/*** THIS FILE IS AUTO-GENERATED BY %s ***/
// tslint:disable:object-literal-key-quotes

export const schema = {
""" % __file__

    for table in schema.schema_create_actions():
        print '  "%s": {' % table.table_id
        for column in table.columns:
            print '    %-20s: "%s",' % (column['id'], column['type'])
        print '  },\n'

    print """};

export interface SchemaTypes {
"""
    for table in schema.schema_create_actions():
        print '  "%s": {' % table.table_id
        for column in table.columns:
            print '    %s: %s;' % (column['id'], get_ts_type(column['type']))
        print '  };\n'
    print "}"
Exemplo n.º 4
0
    def load_sample(self, sample):
        """
    Load _table_data_set with given sample data. The sample is a dict with keys "SCHEMA" and
    "DATA", each a dictionary mapping table names to actions.TableData objects. "SCHEMA" contains
    "_grist_Tables" and "_grist_Tables_column" tables.
    """
        self._table_data_set = table_data_set.TableDataSet()
        for a in schema.schema_create_actions():
            if a.table_id not in self._table_data_set.all_tables:
                self._table_data_set.apply_doc_action(a)

        for a in six.itervalues(sample["SCHEMA"]):
            self._table_data_set.BulkAddRecord(*a)

        # Create AddTable actions for each table described in the metadata.
        meta_tables = self._table_data_set.all_tables['_grist_Tables']
        meta_columns = self._table_data_set.all_tables['_grist_Tables_column']

        add_tables = {
        }  # maps the row_id of the table to the schema object for the table.
        for rec in actions.transpose_bulk_action(meta_tables):
            add_tables[rec.id] = actions.AddTable(rec.tableId, [])

        # Go through all columns, adding them to the appropriate tables.
        for rec in actions.transpose_bulk_action(meta_columns):
            add_tables[rec.parentId].columns.append({
                "id": rec.colId,
                "type": rec.type,
                "widgetOptions": rec.widgetOptions,
                "isFormula": rec.isFormula,
                "formula": rec.formula,
                "label": rec.label,
                "parentPos": rec.parentPos,
            })

        # Sort the columns in the schema according to the parentPos field from the column records.
        for action in six.itervalues(add_tables):
            action.columns.sort(key=lambda r: r["parentPos"])
            self._table_data_set.AddTable(*action)

        for a in six.itervalues(sample["DATA"]):
            self._table_data_set.ReplaceTableData(*a)
Exemplo n.º 5
0
def create_migrations(all_tables, metadata_only=False):
    """
  Creates and returns a list of DocActions needed to bring this document to
  schema.SCHEMA_VERSION.
    all_tables: all tables or just the metadata tables (those named with _grist_ prefix) as a
      dictionary mapping table name to TableData.
    metadata_only: should be set if only metadata tables are passed in. If ALL tables are
      required to process migrations, this method will raise a "need all tables..." exception.
  """
    try:
        doc_version = all_tables['_grist_DocInfo'].columns["schemaVersion"][0]
    except Exception:
        doc_version = 0

    # We create a TableDataSet, and populate it with the subset of the current schema that matches
    # all_tables. For missing items, we make up tables and incomplete columns, which should be OK
    # since we would not be adding new records to deprecated columns.
    # Note that this approach makes it NOT OK to change column types.
    tdset = table_data_set.TableDataSet()

    # For each table in the provided metadata tables, create an AddTable action.
    user_schema = schema.build_schema(all_tables['_grist_Tables'],
                                      all_tables['_grist_Tables_column'],
                                      include_builtin=False)
    for t in six.itervalues(user_schema):
        tdset.apply_doc_action(
            actions.AddTable(t.tableId, schema.cols_to_dict_list(t.columns)))

    # For each old table/column, construct an AddTable action using the current schema.
    new_schema = {a.table_id: a for a in schema.schema_create_actions()}
    for table_id, data in sorted(six.iteritems(all_tables)):
        # User tables should already be in tdset; the rest must be metadata tables.
        # (If metadata_only is true, there is simply nothing to skip here.)
        if table_id not in tdset.all_tables:
            new_col_info = {}
            if table_id in new_schema:
                new_col_info = {
                    c['id']: c
                    for c in new_schema[table_id].columns
                }
            # Use an incomplete default for unknown (i.e. deprecated) columns; some uses of the column
            # would be invalid, such as adding a new record with missing values.
            col_info = sorted([
                new_col_info.get(col_id, {'id': col_id})
                for col_id in data.columns
            ],
                              key=lambda c: list(six.iteritems(c)))
            tdset.apply_doc_action(actions.AddTable(table_id, col_info))

        # And load in the original data, interpreting the TableData object as BulkAddRecord action.
        tdset.apply_doc_action(actions.BulkAddRecord(*data))

    migration_actions = []
    for version in xrange(doc_version + 1, schema.SCHEMA_VERSION + 1):
        migration_func = all_migrations.get(version, noop_migration)
        if migration_func.need_all_tables and metadata_only:
            raise Exception("need all tables for migration to %s" % version)
        migration_actions.extend(
            all_migrations.get(version, noop_migration)(tdset))

    # Note that if we are downgrading versions (i.e. doc_version is higher), then the following is
    # the only action we include into the migration.
    migration_actions.append(
        actions.UpdateRecord('_grist_DocInfo', 1,
                             {'schemaVersion': schema.SCHEMA_VERSION}))
    return migration_actions