Пример #1
0
    def __init__(self, logger=None, dbi=None, params=None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}

        tablespaceTable = ""

        if params:
            if "tablespace_table" in params:
                tablespaceTable = "TABLESPACE %s" % params["tablespace_table"]

        self.create["rc_threshold"] = """
        CREATE TABLE rc_threshold(
            site_id INTEGER NOT NULL,
            sub_type_id INTEGER NOT NULL,
            pending_slots INTEGER NOT NULL,
            max_slots INTEGER NOT NULL) %s""" % tablespaceTable

        self.constraints["rc_threshold_fk1"] = \
            """ALTER TABLE rc_threshold ADD
                 (CONSTRAINT rc_threshold_fk1 FOREIGN KEY (site_id) REFERENCES wmbs_location(id) ON DELETE CASCADE)"""

        self.constraints["rc_threshold_fk2"] = \
            """ALTER TABLE rc_threshold ADD
Пример #2
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        self.create[
            'i_transfers'] = "CREATE INDEX TM_TASKNAME_IDX ON FILETRANSFERSDB (TM_TASKNAME)"
        self.create[
            'i_workers'] = "CREATE INDEX TM_WORKER_STATE ON FILETRANSFERSDB (TM_ASO_WORKER, TM_TRANSFER_STATE) COMPRESS 2"
        self.create[
            'i_publishers'] = "CREATE INDEX TM_WORKER_STATE ON FILETRANSFERSDB (TM_ASO_WORKER, TM_PUBLICATION_STATE) COMPRESS 2"
        #  //
        # // Define create statements for each table
        # //
        #  //
        # tm_id - unique ID which is also the primary key (len 60 chars)
        # tm_user - username (len 30 chars is ENOUGH)
        # tm_workflow - taskName (len 255 chars)
        # tm_group - group which is used inside cert
        # tm_role - role which is used inside cert

        self.create['b_transfers'] = """
Пример #3
0
    def __init__(self, logger=None, dbi=None):
        """
        _init_

        Call the base class's constructor and add all necessary tables for
        deletion,
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)
        orderedTables = Create.requiredTables[:]
        orderedTables.reverse()
        i = 0
        for tableName in orderedTables:
            i += 1
            prefix = string.zfill(i, 2)
            if tableName.endswith("_seq"):
                self.create[prefix +
                            tableName] = "DROP SEQUENCE %s" % tableName
            elif tableName.endswith("_trg"):
                self.create[prefix + tableName] = "DROP TRIGGER %s" % tableName
            else:
                self.create[prefix + tableName] = "DROP TABLE %s" % tableName
Пример #4
0
    def __init__(self, logger = None, dbi = None):
        """
        _init_

        Call the base class's constructor and add all necessary tables for 
        deletion,
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi
            
        DBCreator.__init__(self, logger, dbi)
        orderedTables = Create.requiredTables[:]
        orderedTables.reverse()
        i = 0
        for tableName in orderedTables:
            i += 1
            prefix = string.zfill(i, 2)
            if tableName.endswith("_seq"):
                 self.create[prefix + tableName] = "DROP SEQUENCE %s" % tableName
            elif tableName.endswith("_trg"):
                self.create[prefix + tableName] = "DROP TRIGGER %s" % tableName
            else:
                self.create[prefix + tableName] = "DROP TABLE %s" % tableName
Пример #5
0
    def __init__(self, logger = None, dbi = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi
            
        DBCreator.__init__(self, logger, dbi)

        self.delete['01bl_runjob']    = "DROP TABLE bl_runjob"
        self.delete['02bl_status']    = "DROP TABLE bl_status"


        j = 50
        for i in Create.sequence_tables:
            seqname = '%s_SEQ' % i
            self.create["%s%s" % (j, seqname)] = \
                           "DROP SEQUENCE %s"  % seqname 

        return
Пример #6
0
    def __init__(self, logger=None, dbi=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.delete['01bl_runjob'] = "DROP TABLE bl_runjob"
        self.delete['02bl_status'] = "DROP TABLE bl_status"

        j = 50
        for i in Create.sequence_tables:
            seqname = '%s_SEQ' % i
            self.create["%s%s" % (j, seqname)] = \
                           "DROP SEQUENCE %s"  % seqname

        return
Пример #7
0
    def __init__(self, logger = None, dbi = None, params = None):    
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}

        tablespaceTable = ""
        tablespaceIndex = ""

        if params:
            if params.has_key("tablespace_table"):
                tablespaceTable = "TABLESPACE %s" % params["tablespace_table"]
            if params.has_key("tablespace_index"):
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params["tablespace_index"]

        self.create["rc_threshold"] = """
        CREATE TABLE rc_threshold(
            site_id INTEGER NOT NULL,
            sub_type_id INTEGER NOT NULL,
            max_slots INTEGER NOT NULL,
            priority  INTEGER DEFAULT 1) %s""" % tablespaceTable

        self.constraints["rc_threshold_fk1"] = \
          """ALTER TABLE rc_threshold ADD
               (CONSTRAINT rc_threshold_fk1 FOREIGN KEY (site_id) REFERENCES wmbs_location(id) ON DELETE CASCADE)"""

        self.constraints["rc_threshold_fk2"] = \
          """ALTER TABLE rc_threshold ADD
Пример #8
0
    def __init__(self, logger=None, dbi=None, params=None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}
        self.create['a_transaction'] = """
SET AUTOCOMMIT = 0; """
        self.create['threadpool'] = """
CREATE TABLE tp_threadpool(
   id                      int(11)      NOT NULL auto_increment,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                 enum('queued','process') default 'queued',
   primary key(id)
   ) ENGINE=InnoDB;
"""
        self.create['threadpool_buffer_in'] = """
CREATE TABLE tp_threadpool_buffer_in(
   id                      int(11)      NOT NULL auto_increment,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                 enum('queued','process') default 'queued',
   primary key(id)
   ) ENGINE=InnoDB;
"""
        self.create['threadpool_buffer_out'] = """
Пример #9
0
    def __init__(self, logger = None, dbi = None, params = None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}
        self.create['a_transaction'] = """
SET AUTOCOMMIT = 0; """
        self.create['threadpool'] = """
CREATE TABLE tp_threadpool(
   id                      int(11)      NOT NULL auto_increment,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                 enum('queued','process') default 'queued',
   primary key(id)
   ) ENGINE=InnoDB;
"""
        self.create['threadpool_buffer_in'] = """
CREATE TABLE tp_threadpool_buffer_in(
   id                      int(11)      NOT NULL auto_increment,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                 enum('queued','process') default 'queued',
   primary key(id)
   ) ENGINE=InnoDB;
"""
        self.create['threadpool_buffer_out'] = """
Пример #10
0
    def execute(self, conn=None, transaction=None):
        """
        _execute_

        """
        DBCreator.execute(self, conn, transaction)

        return True
Пример #11
0
    def execute(self, conn = None, transaction = None):
        """
        _execute_

        """
        DBCreator.execute(self, conn, transaction)

        return True
Пример #12
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.requiredTables = ["01bl_status", "02bl_runjob"]


        self.create['01bl_status'] = \
        """CREATE TABLE bl_status
            (
            id            INT  auto_increment,
            name          VARCHAR(255),
            PRIMARY KEY (id),
            UNIQUE (name)
            )
            ENGINE = InnoDB DEFAULT CHARSET=latin1;
        """

        
        self.create['02bl_runjob'] = \
        """CREATE TABLE bl_runjob
           (
           id            INT   auto_increment,
           wmbs_id       INT,
           grid_id       VARCHAR(255),
           bulk_id       VARCHAR(255),
           status        CHAR(1)   DEFAULT '1',
           sched_status  INT,
           retry_count   INT,
           status_time   INT,
           location      INT,
           user_id       INT,
           PRIMARY KEY (id),
           FOREIGN KEY (wmbs_id) REFERENCES wmbs_job(id) ON DELETE CASCADE,
           FOREIGN KEY (sched_status) REFERENCES bl_status(id),
           FOREIGN KEY (user_id) REFERENCES wmbs_users(id) ON DELETE CASCADE,
           FOREIGN KEY (location) REFERENCES wmbs_location(id) ON DELETE CASCADE,
           UNIQUE (retry_count, wmbs_id)
           )
           ENGINE = InnoDB DEFAULT CHARSET=latin1;
           
        """


        return
Пример #13
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.requiredTables = ["01bl_status", "02bl_runjob"]


        self.create['01bl_status'] = \
        """CREATE TABLE bl_status
            (
            id            INT  auto_increment,
            name          VARCHAR(255),
            PRIMARY KEY (id),
            UNIQUE (name)
            )
            ENGINE = InnoDB DEFAULT CHARSET=latin1;
        """


        self.create['02bl_runjob'] = \
        """CREATE TABLE bl_runjob
           (
           id            INT   auto_increment,
           wmbs_id       INT,
           grid_id       VARCHAR(255),
           bulk_id       VARCHAR(255),
           status        CHAR(1)   DEFAULT '1',
           sched_status  INT,
           retry_count   INT,
           status_time   INT,
           location      INT,
           user_id       INT,
           PRIMARY KEY (id),
           FOREIGN KEY (wmbs_id) REFERENCES wmbs_job(id) ON DELETE CASCADE,
           FOREIGN KEY (sched_status) REFERENCES bl_status(id),
           FOREIGN KEY (user_id) REFERENCES wmbs_users(id) ON DELETE CASCADE,
           FOREIGN KEY (location) REFERENCES wmbs_location(id) ON DELETE CASCADE,
           UNIQUE (retry_count, wmbs_id)
           )
           ENGINE = InnoDB DEFAULT CHARSET=latin1;

        """


        return
Пример #14
0
    def __init__(self):
        """
        _init_

        Call the base class's constructor and create all necessary tables,
        constraints and inserts.
        """
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.delete["02managed_feeders"] = "DROP TABLE managed_feeders"
        self.delete["01managed_filesets"] = "DROP TABLE managed_filesets"
Пример #15
0
    def __init__(self):
        """
        _init_

        Call the base class's constructor and create all necessary tables,
        constraints and inserts.
        """
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.delete["02managed_feeders"] = "DROP TABLE managed_feeders"
        self.delete["01managed_filesets"] = "DROP TABLE managed_filesets"
Пример #16
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        # Define create statements for each table
        self.create['b_filemetadata'] = """
Пример #17
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        # Define create statements for each table
        self.create['b_filemetadata'] = """
Пример #18
0
    def __init__(self):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}

        self.create['threadpool'] = """
CREATE TABLE tp_threadpool(
   id                      integer      PRIMARY KEY AUTOINCREMENT,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                   varchard(20) NOT NULL default 'queued',

   FOREIGN KEY(state) references tp_queued_process_enum(value)
   )
"""
        self.create['threadpool_buffer_in'] = """
CREATE TABLE tp_threadpool_buffer_in(
   id                      integer      PRIMARY KEY AUTOINCREMENT,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                   varchard(20) NOT NULL default 'queued',

   FOREIGN KEY(state) references tp_queued_process_enum(value)
   )
"""
        self.create['threadpool_buffer_out'] = """
CREATE TABLE tp_threadpool_buffer_out(
   id                      integer      PRIMARY KEY AUTOINCREMENT,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                   varchard(20) NOT NULL default 'queued',

   FOREIGN KEY(state) references tp_queued_process_enum(value)
   )
"""

        self.create['tp_queued_process_enum'] = """
CREATE TABLE tp_queued_process_enum (
        value varchar(20)       PRIMARY KEY  NOT NULL
        )"""

        self.create['tp_queued_process_enum_insert1'] = """
INSERT INTO tp_queued_process_enum VALUES('queued')
"""

        self.create['tp_queued_process_enum_insert2'] = """
Пример #19
0
    def __init__(self):
        """
        _init_

        Call the base class's constructor and create all necessary tables,
        constraints and inserts.
        """
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.delete["02wm_managed_workflow_location"] = \
            "DROP TABLE wm_managed_workflow_location"
        self.delete["01wm_managed_workflow"] = "DROP TABLE wm_managed_workflow"
Пример #20
0
    def __init__(self):
        """
        _init_

        Call the base class's constructor and create all necessary tables,
        constraints and inserts.
        """
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.delete["02wm_managed_workflow_location"] = \
            "DROP TABLE wm_managed_workflow_location"
        self.delete["01wm_managed_workflow"] = "DROP TABLE wm_managed_workflow"
Пример #21
0
    def __init__(self):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}

        self.create['threadpool'] = """      
CREATE TABLE tp_threadpool(
   id                      integer      PRIMARY KEY AUTOINCREMENT,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                   varchard(20) NOT NULL default 'queued',

   FOREIGN KEY(state) references tp_queued_process_enum(value)
   )
"""
        self.create['threadpool_buffer_in'] = """      
CREATE TABLE tp_threadpool_buffer_in(
   id                      integer      PRIMARY KEY AUTOINCREMENT,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                   varchard(20) NOT NULL default 'queued',

   FOREIGN KEY(state) references tp_queued_process_enum(value)
   )
"""
        self.create['threadpool_buffer_out'] = """      
CREATE TABLE tp_threadpool_buffer_out(
   id                      integer      PRIMARY KEY AUTOINCREMENT,
   event                   varchar(255) NOT NULL,
   component               varchar(255) NOT NULL,
   payload                 text         NOT NULL,
   thread_pool_id          varchar(255) NOT NULL,
   state                   varchard(20) NOT NULL default 'queued',

   FOREIGN KEY(state) references tp_queued_process_enum(value)
   )
"""

        self.create['tp_queued_process_enum'] = """
CREATE TABLE tp_queued_process_enum (
        value varchar(20)       PRIMARY KEY  NOT NULL
        )"""

        self.create['tp_queued_process_enum_insert1'] = """
INSERT INTO tp_queued_process_enum VALUES('queued')
"""
        
        self.create['tp_queued_process_enum_insert2'] = """
Пример #22
0
    def execute(self, conn=None, transaction=None):
        """
        _execute_

        Check to make sure that all required tables have been defined.  If
        everything is in place have the DBCreator make everything.
        """
        for requiredTable in self.requiredTables:
            if requiredTable not in self.create.keys():
                raise WMException("The table '%s' is not defined." % requiredTable, "WMCORE-2")

        DBCreator.execute(self, conn, transaction)
        return True
Пример #23
0
    def execute(self, conn=None, transaction=None):
        """
        _execute_

        Check to make sure that all required tables have been defined.  If
        everything is in place have the DBCreator make everything.
        """
        for requiredTable in self.requiredTables:
            if requiredTable not in self.create.keys():
                raise WMException("The table '%s' is not defined." % \
                                  requiredTable, "WMCORE-2")

        DBCreator.execute(self, conn, transaction)
        return True
Пример #24
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        #  //
        # // Define create statements for each table
        #//
        #  //255 chars for tm_task_status is even too much
        self.create['b_tasks'] = """
Пример #25
0
    def __init__(self, logger=None, dbi=None, params=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger is None:
            logger = myThread.logger
        if dbi is None:
            dbi = myThread.dbi

        tablespaceTable = ""
        tablespaceIndex = ""
        if params:
            if "tablespace_table" in params:
                tablespaceTable = "TABLESPACE %s" % params["tablespace_table"]
            if "tablespace_index" in params:
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params[
                    "tablespace_index"]

        DBCreator.__init__(self, logger, dbi)

        self.create["01wm_components"] = \
            """CREATE TABLE wm_components (
               id               INTEGER      PRIMARY KEY AUTO_INCREMENT,
               name             VARCHAR(255) NOT NULL,
               pid              INTEGER      NOT NULL,
               update_threshold INTEGER      NOT NULL,
               UNIQUE (name))"""

        self.create["02wm_workers"] = \
            """CREATE TABLE wm_workers (
               component_id  INTEGER NOT NULL,
               name          VARCHAR(255) NOT NULL,
               last_updated  INTEGER      NOT NULL,
               state         VARCHAR(255),
               pid           INTEGER,
               poll_interval INTEGER      NOT NULL,
               last_error    INTEGER,
               cycle_time    FLOAT DEFAULT 0 NOT NULL,
               outcome       VARCHAR(1000),
               error_message VARCHAR(1000),
               UNIQUE (name))"""

        self.constraints["FK_wm_component_worker"] = \
            """ALTER TABLE wm_workers ADD CONSTRAINT FK_wm_component_worker
Пример #26
0
    def __init__(self,logger=None, dbi=None, params = None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}
        self.create['taa'] = """      
SET AUTOCOMMIT = 0; """
        self.create['ta_managed_feeders'] = """
CREATE TABLE `managed_feeders` (
    id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
    feeder_type  VARCHAR(256) NOT NULL,
    feeder_state VARCHAR(256) NOT NULL,
    insert_time  INT(11)      NOT NULL
    ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
"""
        self.create['ta_managed_filesets'] = """
Пример #27
0
    def __init__(self,logger=None, dbi=None, params = None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}
        self.create['taa'] = """
SET AUTOCOMMIT = 0; """
        self.create['ta_managed_feeders'] = """
CREATE TABLE `managed_feeders` (
    id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
    feeder_type  VARCHAR(256) NOT NULL,
    feeder_state VARCHAR(256) NOT NULL,
    insert_time  INT(11)      NOT NULL
    ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
"""
        self.create['ta_managed_filesets'] = """
Пример #28
0
    def __init__(self, logger = None, dbi = None, params = None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}

        self.create["rc_threshold"] = """
        CREATE TABLE rc_threshold(
            site_id     INTEGER NOT NULL,
            sub_type_id INTEGER NOT NULL,
            max_slots   INTEGER NOT NULL,
            priority    INTEGER DEFAULT 1,
            FOREIGN KEY (site_id) REFERENCES wmbs_location(id) ON DELETE CASCADE,
            FOREIGN KEY (sub_type_id) REFERENCES wmbs_sub_types(id) ON DELETE CASCADE)"""

        return
Пример #29
0
    def __init__(self, logger=None, dbi=None, params=None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}

        self.create["rc_threshold"] = """
        CREATE TABLE rc_threshold(
            site_id       INTEGER NOT NULL,
            sub_type_id   INTEGER NOT NULL,
            pending_slots INTEGER NOT NULL,
            max_slots     INTEGER NOT NULL,
            FOREIGN KEY (site_id) REFERENCES wmbs_location(id) ON DELETE CASCADE,
            FOREIGN KEY (sub_type_id) REFERENCES wmbs_sub_types(id) ON DELETE CASCADE)"""

        return
Пример #30
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        tablespaceTable = ""
        tablespaceIndex = ""
        if params:
            if "tablespace_table" in params:
                tablespaceTable = "TABLESPACE %s" % params["tablespace_table"]
            if "tablespace_index" in params:
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params["tablespace_index"]

        DBCreator.__init__(self, logger, dbi)

        self.create["01wm_components"] = \
          """CREATE TABLE wm_components (
             id               INTEGER      PRIMARY KEY AUTO_INCREMENT,
             name             VARCHAR(255) NOT NULL,
             pid              INTEGER      NOT NULL,
             update_threshold INTEGER      NOT NULL,
             UNIQUE (name))"""

        self.create["02wm_workers"] = \
          """CREATE TABLE wm_workers (
             component_id  INTEGER NOT NULL,
             name          VARCHAR(255) NOT NULL,
             last_updated  INTEGER      NOT NULL,
             state         VARCHAR(255),
             pid           INTEGER,
             poll_interval INTEGER      NOT NULL,
             last_error    INTEGER,
             cycle_time    DECIMAL(9,4) DEFAULT 0 NOT NULL,
             outcome       VARCHAR(1000),
             error_message VARCHAR(1000),
             UNIQUE (component_id, name))"""

        self.constraints["FK_wm_component_worker"] = \
              """ALTER TABLE wm_workers ADD CONSTRAINT FK_wm_component_worker
Пример #31
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        __init__

        """
        myThread = threading.currentThread()
        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi
            
        DBCreator.__init__(self, logger, dbi)
        
        self.delete["01test_tablea"] = "DROP TABLE test_tablea"
        self.delete["02test_tableb"] = "DROP TABLE test_tableb"
        self.delete["03test_tablec"] = "DROP TABLE test_tablec" 
        self.delete["04test_bigcol"] = "DROP TABLE test_bigcol"       

        return
Пример #32
0
    def __init__(self, logger=None, dbi=None, params=None):
        """
        __init__

        """
        myThread = threading.currentThread()
        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.delete["01test_tablea"] = "DROP TABLE test_tablea"
        self.delete["02test_tableb"] = "DROP TABLE test_tableb"
        self.delete["03test_tablec"] = "DROP TABLE test_tablec"
        self.delete["04test_bigcol"] = "DROP TABLE test_bigcol"

        return
Пример #33
0
    def __init__(self, logger = None, dbi = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi
            
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.delete = {}
        self.delete["03tp_threadpool"]             = "DROP TABLE tp_threadpool"
        self.delete["02tp_threadpool_buffer_in"]   = "DROP TABLE tp_threadpool_buffer_in"
        self.delete["01tp_threadpool_buffer_out"]  = "DROP TABLE tp_threadpool_buffer_out"
Пример #34
0
    def __init__(self, logger=None, dbi=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.delete = {}
        self.delete["rc_threshold"] = "DROP TABLE rc_threshold"

        return
Пример #35
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        #  //
        # // Define create statements for each table
        # //
        #  //
        # tm_id - unique ID which is also the primary key (len 60 chars)
        # tm_user - username (len 30 chars is ENOUGH)
        # tm_workflow - taskName (len 255 chars)
        # tm_group - group which is used inside cert
        # tm_role - role which is used inside cert

        self.create['b_transfers'] = """
Пример #36
0
    def __init__(self, logger = None, dbi = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi
            
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.delete = {}
        self.delete["rc_threshold"] = "DROP TABLE rc_threshold"

        return
Пример #37
0
    def __init__(self, logger=None, dbi=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.create['01bl_runjob'] = "DROP TABLE bl_runjob"
        self.create['02bl_status'] = "DROP TABLE bl_status"

        self.requiredTables = []

        return
Пример #38
0
    def __init__(self, logger = None, dbi = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)
        orderedTables = Create.requiredTables[:]
        orderedTables.reverse()
        i = 0
        for tableName in orderedTables:
            i += 1
            prefix = string.zfill(i, 2)
            self.create[prefix + tableName] = "DROP TABLE %s" % tableName
Пример #39
0
    def __init__(self,logger=None, dbi=None, params = None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}
        self.create['taa'] = """      
SET AUTOCOMMIT = 0; """
        self.create['ta_wm_managed_workflow'] = """
CREATE TABLE `wm_managed_workflow` (
    id               INTEGER         PRIMARY KEY AUTO_INCREMENT,
    workflow         INT(11)         NOT NULL,
    fileset_match    VARCHAR(256)    NOT NULL,
    split_algo       VARCHAR(256)    NOT NULL,
    type             VARCHAR(256)    NOT NULL,
    UNIQUE (workflow, fileset_match),
    FOREIGN KEY(workflow) REFERENCES wmbs_workflow(id)
        ON DELETE CASCADE
    ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
"""

        self.create['ta_wm_managed_workflow_location'] = """
Пример #40
0
    def __init__(self):
        """
        _init_

        Call the base class's constructor and create all necessary tables,
        constraints and inserts.
        """
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)

        self.delete["11dbsbuffer_block"]              = "DROP TABLE dbsbuffer_block"
        self.delete["10dbsbuffer_dataset"]            = "DROP TABLE dbsbuffer_dataset"
        self.delete["09dbsbuffer_algo"]               = "DROP TABLE dbsbuffer_algo"
        self.delete["08dbsbuffer_algo_dataset_assoc"] = "DROP TABLE dbsbuffer_algo_dataset_assoc"
        self.delete["07dbsbuffer_file"]               = "DROP TABLE dbsbuffer_file"
        self.delete["06dbsbuffer_file_parent"]        = "DROP TABLE dbsbuffer_file_parent"
        self.delete["05dbsbuffer_file_runlumi_map"]   = "DROP TABLE dbsbuffer_file_runlumi_map"
        self.delete["04dbsbuffer_location"]           = "DROP TABLE dbsbuffer_location"
        self.delete["03dbsbuffer_file_location"]      = "DROP TABLE dbsbuffer_file_location"
        self.delete["02dbsbuffer_checksum_type"]      = "DROP TABLE dbsbuffer_checksum_type"
        self.delete["01dbsbuffer_file_checksums"]     = "DROP TABLE dbsbuffer_file_checksums"
Пример #41
0
    def __init__(self, logger=None, dbi=None, params=None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}
        self.create['taa'] = """
SET AUTOCOMMIT = 0; """
        self.create['ta_wm_managed_workflow'] = """
CREATE TABLE `wm_managed_workflow` (
    id               INTEGER         PRIMARY KEY AUTO_INCREMENT,
    workflow         INT(11)         NOT NULL,
    fileset_match    VARCHAR(256)    NOT NULL,
    split_algo       VARCHAR(256)    NOT NULL,
    type             VARCHAR(256)    NOT NULL,
    UNIQUE (workflow, fileset_match),
    FOREIGN KEY(workflow) REFERENCES wmbs_workflow(id)
        ON DELETE CASCADE
    ) ENGINE=InnoDB DEFAULT CHARSET=latin1;
"""

        self.create['ta_wm_managed_workflow_location'] = """
Пример #42
0
    def __init__(self, logger=None, dbi=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)
        orderedTables = Create.requiredTables[:]
        orderedTables.reverse()
        i = 0
        for tableName in orderedTables:
            i += 1
            prefix = string.zfill(i, 2)
            self.create[prefix + tableName] = "DROP TABLE %s" % tableName
Пример #43
0
    def __init__(self, logger=None, dbi=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.delete = {}
        self.delete["03tp_threadpool"] = "DROP TABLE tp_threadpool"
        self.delete[
            "02tp_threadpool_buffer_in"] = "DROP TABLE tp_threadpool_buffer_in"
        self.delete[
            "01tp_threadpool_buffer_out"] = "DROP TABLE tp_threadpool_buffer_out"
Пример #44
0
    def __init__(self, logger = None, dbi = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi
            
        DBCreator.__init__(self, logger, dbi)

        self.delete["22wmbs_fileset"] = "DROP TABLE wmbs_fileset"
        self.delete["21wmbs_file_details"] = "DROP TABLE wmbs_file_details"
        self.delete["20wmbs_fileset_files"] = "DROP TABLE wmbs_fileset_files"
        self.delete["19wmbs_file_parent"] = "DROP TABLE wmbs_file_parent"
        self.delete["18wmbs_file_runlumi_map"] = "DROP TABLE wmbs_file_runlumi_map"
        self.delete["17wmbs_location"] = "DROP TABLE wmbs_location"
        self.delete["16wmbs_file_location"] = "DROP TABLE wmbs_file_location"
        self.delete["15wmbs_users"] = "DROP TABLE wmbs_users"
        self.delete["14wmbs_workflow"] = "DROP TABLE wmbs_workflow"
        self.delete["13wmbs_workflow_output"] = "DROP TABLE wmbs_workflow_output"
        self.delete["12wmbs_sub_types"] = "DROP TABLE wmbs_sub_types"
        self.delete["11wmbs_subscription"] = "DROP TABLE wmbs_subscription"
        self.delete["10wmbs_subscription_validation"] = "DROP TABLE wmbs_subscription_validation"
        self.delete["10wmbs_sub_files_acquired"] = "DROP TABLE wmbs_sub_files_acquired"
        self.delete["10wmbs_sub_files_available"] = "DROP TABLE wmbs_sub_files_available"
        self.delete["09wmbs_sub_files_failed"] = "DROP TABLE wmbs_sub_files_failed"
        self.delete["08wmbs_sub_files_complete"] = "DROP TABLE wmbs_sub_files_complete"
        self.delete["07wmbs_jobgroup"] = "DROP TABLE wmbs_jobgroup"
        self.delete["06wmbs_job_state"] = "DROP TABLE wmbs_job_state"
        self.delete["05wmbs_job"] = "DROP TABLE wmbs_job"
        self.delete["04wmbs_job_assoc"] = "DROP TABLE wmbs_job_assoc"
        self.delete["03wmbs_job_mask"] = "DROP TABLE wmbs_job_mask"
        self.delete["02wmbs_job_mask"] = "DROP TABLE wmbs_checksum_type"
        self.delete["01wmbs_job_mask"] = "DROP TABLE wmbs_file_checksums"
Пример #45
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        self.create['i_transfers'] = "CREATE INDEX TM_TASKNAME_IDX ON FILETRANSFERSDB (TM_TASKNAME)"
        self.create['i_workers'] = "CREATE INDEX TM_WORKER_STATE ON FILETRANSFERSDB (TM_ASO_WORKER, TM_TRANSFER_STATE) COMPRESS 2"
        #  //
        # // Define create statements for each table
        # //
        #  //
        # tm_id - unique ID which is also the primary key (len 60 chars)
        # tm_user - username (len 30 chars is ENOUGH)
        # tm_workflow - taskName (len 255 chars)
        # tm_group - group which is used inside cert
        # tm_role - role which is used inside cert

        self.create['b_transfers'] = """
Пример #46
0
    def __init__(self, logger = None, dbi = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi


        DBCreator.__init__(self, logger, dbi)
            

        self.create['01bl_runjob']    = "DROP TABLE bl_runjob"
        self.create['02bl_status']    = "DROP TABLE bl_status"

        self.requiredTables = []

        return
Пример #47
0
    def __init__(self, logger=None, dbi=None, params=None):
        """
        __init__

        """
        myThread = threading.currentThread()
        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        self.create["01test_tablea"] = \
          """CREATE TABLE test_tablea (
               column1 INTEGER,
               column2 INTEGER,
               column3 VARCHAR(255))"""

        self.create["01test_tableb"] = \
          """CREATE TABLE test_tableb (
               column1 VARCHAR(255),
               column2 INTEGER,
               column3 VARCHAR(255))"""

        self.create["01test_tablec"] = \
          """CREATE TABLE test_tablec (
               column1 VARCHAR(255),
               column2 VARCHAR(255),
               column3 VARCHAR(255))"""

        self.create["01test_bigcol"] = \
          """CREATE TABLE test_bigcol (
               column1 DEC(35))"""

        return
Пример #48
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        __init__

        """
        myThread = threading.currentThread()
        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi
            
        DBCreator.__init__(self, logger, dbi)
        
        self.create["01test_tablea"] = \
          """CREATE TABLE test_tablea (
               column1 INTEGER,
               column2 INTEGER,
               column3 VARCHAR(255))"""

        self.create["01test_tableb"] = \
          """CREATE TABLE test_tableb (
               column1 VARCHAR(255),
               column2 INTEGER,
               column3 VARCHAR(255))"""

        self.create["01test_tablec"] = \
          """CREATE TABLE test_tablec (
               column1 VARCHAR(255),
               column2 VARCHAR(255),
               column3 VARCHAR(255))"""

        self.create["01test_bigcol"] = \
          """CREATE TABLE test_bigcol (
               column1 DEC(35))"""

        return
Пример #49
0
    def __init__(self, logger = None, dbi = None, params = None):
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)
        self.create = {}
        self.constraints = {}

        tablespaceTable = ""
        tablespaceIndex = ""

        if params:
            if "tablespace_table" in params:
                tablespaceTable = "TABLESPACE %s" % params["tablespace_table"]
            if "tablespace_index" in params:
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params["tablespace_index"]

        self.create["tp_threadpool"] = \
          """CREATE TABLE tp_threadpool (
               id             NUMBER(11)    NOT NULL ENABLE,
               event          VARCHAR2(255) NOT NULL ENABLE,
               component      VARCHAR2(255) NOT NULL ENABLE,
               payload        CLOB          NOT NULL ENABLE,
               thread_pool_id VARCHAR2(255) NOT NULL ENABLE,
               state          VARCHAR(20)   DEFAULT 'queued'  NOT NULL ENABLE
               ) %s""" % tablespaceTable

        self.create["tp_threadpool_seq"] = \
          """CREATE SEQUENCE tp_threadpool_seq
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE"""

        self.indexes["tp_threadpool_pk"] = \
          """ALTER TABLE tp_threadpool ADD
               (CONSTRAINT tp_threadpool_pk PRIMARY KEY (id) %s)""" % tablespaceIndex

        self.constraints["tp_threadpool_fk"] = \
          """ALTER TABLE tp_threadpool ADD
               (CONSTRAINT tp_threadpool_state CHECK(state IN ('queued', 'process')))"""

        self.create["tp_threadpool_trg"] = \
          """CREATE TRIGGER tp_threadpool_trg
               BEFORE INSERT ON tp_threadpool
               FOR EACH ROW
                 DECLARE m_no INTEGER;
                 BEGIN
                   SELECT tp_threadpool_seq.nextval INTO :new.id FROM dual;
                 END;"""

        self.create["tp_threadpool_buffer_in"] = \
          """CREATE TABLE tp_threadpool_buffer_in (
               id             NUMBER(11)   NOT NULL ENABLE,
               event          VARCHAR(255) NOT NULL ENABLE,
               component      VARCHAR(255) NOT NULL ENABLE,
               payload        CLOB         NOT NULL ENABLE,
               thread_pool_id VARCHAR(255) NOT NULL ENABLE,
               state          VARCHAR2(20) DEFAULT 'queued' NOT NULL ENABLE
               ) %s""" % tablespaceTable

        self.create["tp_threadpool_buffer_in_seq"] = \
          """CREATE SEQUENCE tp_buffer_in_seq
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE"""

        self.create["tp_threadpool_buffer_in_trg"] = \
          """CREATE TRIGGER tp_buffer_in_trg
               BEFORE INSERT ON tp_threadpool_buffer_in
               FOR EACH ROW
                 BEGIN
                   SELECT tp_buffer_in_seq.nextval INTO :new.id FROM dual;
                 END;"""

        self.constraints["tp_threadpool_buffer_in_fk"] = \
          """ALTER TABLE tp_threadpool_buffer_in ADD
               (CONSTRAINT tp_threadpool_buffer_in_state CHECK(state IN ('queued', 'process')))"""

        self.indexes["tp_threadpool_buffer_in_pk"] = \
          """ALTER TABLE tp_threadpool_buffer_in ADD
               (CONSTRAINT tp_threadpool_buffer_in_pk PRIMARY KEY (id) %s)""" % tablespaceIndex

        self.create["tp_threadpool_buffer_out"] = \
          """CREATE TABLE tp_threadpool_buffer_out (
               id             NUMBER(11)   NOT NULL ENABLE,
               event          varchar(255) NOT NULL ENABLE,
               component      varchar(255) NOT NULL ENABLE,
               payload        clob         NOT NULL ENABLE,
               thread_pool_id varchar(255) NOT NULL ENABLE,
               state          varchar2(20) DEFAULT 'queued' NOT NULL ENABLE
               ) %s""" % tablespaceTable

        self.create["threadpool_buffer_out_seq"] = \
          """CREATE SEQUENCE tp_buffer_out_seq
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE"""

        self.create["tp_threadpool_buffer_out_trg"] = \
          """CREATE TRIGGER tp_buffer_out_trg
               BEFORE INSERT ON tp_threadpool_buffer_out
               FOR EACH ROW
                 BEGIN
                   SELECT tp_buffer_out_seq.nextval INTO :new.id FROM dual;
                 END;"""

        self.indexes["tp_threadpool_buffer_out_pk"] = \
          """ALTER TABLE tp_threadpool_buffer_out ADD
               (CONSTRAINT tp_threadpool_buffer_out_pk PRIMARY KEY (id) %s)""" % tablespaceIndex

        self.constraints["tp_threadpool_buffer_out_ck"] = \
          """ALTER TABLE tp_threadpool_buffer_out ADD
Пример #50
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        _init_

        Call the base class's constructor and create all necessary tables,
        constraints and inserts.
        """
        myThread = threading.currentThread()
        DBCreator.__init__(self, myThread.logger, myThread.dbi)

        self.create["01dbsbuffer_dataset"] = \
              """CREATE TABLE dbsbuffer_dataset
                        (
                           id              BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
                           path            VARCHAR(500) COLLATE latin1_general_cs UNIQUE NOT NULL,
                           processing_ver  VARCHAR(255),
                           acquisition_era VARCHAR(255),
                           valid_status    VARCHAR(20),
                           global_tag      VARCHAR(255),
                           parent          VARCHAR(500),
                           custodial_site  VARCHAR(255),
                           subscribed int default 0,
                           primary key(id)
                        ) ENGINE=InnoDB"""

        self.create["02dbsbuffer_algo"] = \
              """CREATE TABLE dbsbuffer_algo
                        (
                           id     BIGINT UNSIGNED not null auto_increment,
                           app_name varchar(100),
                           app_ver  varchar(100),
                           app_fam  varchar(100),
                           pset_hash varchar(700),
                           config_content LONGTEXT,
                           in_dbs int,
                           primary key(ID),
                           unique (app_name, app_ver, app_fam, pset_hash)
                        ) ENGINE=InnoDB"""

        self.create["03dbsbuffer_algo_dataset_assoc"] = \
              """CREATE TABLE dbsbuffer_algo_dataset_assoc
                        (
                           id BIGINT UNSIGNED NOT NULL AUTO_INCREMENT,
                           algo_id BIGINT UNSIGNED,
                           dataset_id BIGINT UNSIGNED,
                           in_dbs INTEGER DEFAULT 0,
                           primary key(id),
                           FOREIGN KEY (algo_id) REFERENCES dbsbuffer_algo(id)
                             ON DELETE CASCADE,
                           FOREIGN KEY (dataset_id) REFERENCES dbsbuffer_dataset(id)
                             ON DELETE CASCADE
                        ) ENGINE = InnoDB"""

        self.create["03dbsbuffer_workflow"] = \
          """CREATE TABLE dbsbuffer_workflow (
               id           INTEGER PRIMARY KEY AUTO_INCREMENT,
               name         VARCHAR(255),
               task         VARCHAR(255),
               spec         VARCHAR(255),
               UNIQUE(name, task)) ENGINE = InnoDB"""

        self.create["04dbsbuffer_file"] = \
          """CREATE TABLE dbsbuffer_file (
             id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
             lfn          VARCHAR(500) NOT NULL,
             filesize     BIGINT,
             events       INTEGER,
             dataset_algo BIGINT UNSIGNED   not null,
             block_id     BIGINT UNSIGNED,
             status       varchar(20),
             in_phedex    INTEGER DEFAULT 0,
             workflow     INTEGER,
             LastModificationDate  BIGINT,
             FOREIGN KEY (workflow) references dbsbuffer_workflow(id)
               ON DELETE CASCADE,
             UNIQUE(lfn)) ENGINE=InnoDB"""

        self.create["06dbsbuffer_file_parent"] = \
          """CREATE TABLE dbsbuffer_file_parent (
             child  INTEGER NOT NULL,
             parent INTEGER NOT NULL,
             FOREIGN KEY (child)  references dbsbuffer_file(id)
               ON DELETE CASCADE,
             FOREIGN KEY (parent) references dbsbuffer_file(id),
             UNIQUE(child, parent))ENGINE=InnoDB"""

        self.create["07dbsbuffer_file_runlumi_map"] = \
          """CREATE TABLE dbsbuffer_file_runlumi_map (
             filename    INTEGER NOT NULL,
             run         INTEGER NOT NULL,
             lumi        INTEGER NOT NULL,
             FOREIGN KEY (filename) references dbsbuffer_file(id)
               ON DELETE CASCADE)ENGINE=InnoDB"""

        self.create["08dbsbuffer_location"] = \
          """CREATE TABLE dbsbuffer_location (
             id      INTEGER      PRIMARY KEY AUTO_INCREMENT,
             se_name VARCHAR(255) NOT NULL,
             UNIQUE(se_name))ENGINE=InnoDB"""

        self.create["09dbsbuffer_file_location"] = \
          """CREATE TABLE dbsbuffer_file_location (
             filename INTEGER NOT NULL,
             location INTEGER NOT NULL,
             UNIQUE(filename, location)) ENGINE=InnoDB"""

        self.create["10dbsbuffer_block"] = \
          """CREATE TABLE dbsbuffer_block (
             id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
             blockname    VARCHAR(250) NOT NULL,
             location     INTEGER      NOT NULL,
             create_time  INTEGER,
             status       VARCHAR(20),
             UNIQUE(blockname, location))ENGINE=InnoDB"""

        self.create["11dbsbuffer_checksum_type"] = \
          """CREATE TABLE dbsbuffer_checksum_type (
              id            INTEGER      PRIMARY KEY AUTO_INCREMENT,
              type          VARCHAR(255) ) ENGINE=InnoDB"""


        self.create["12dbsbuffer_file_checksums"] = \
          """CREATE TABLE dbsbuffer_file_checksums (
              fileid        INTEGER,
              typeid        INTEGER,
              cksum         VARCHAR(100),
              UNIQUE (fileid, typeid),
              FOREIGN KEY (typeid) REFERENCES dbsbuffer_checksum_type(id)
                ON DELETE CASCADE,
              FOREIGN KEY (fileid) REFERENCES dbsbuffer_file(id)
                ON DELETE CASCADE) ENGINE=InnoDB"""




        checksumTypes = ['cksum', 'adler32', 'md5']
        for i in checksumTypes:
            checksumTypeQuery = """INSERT INTO dbsbuffer_checksum_type (type) VALUES ('%s')
            """ % (i)
            self.inserts["wmbs_checksum_type_%s" % (i)] = checksumTypeQuery
Пример #51
0
    def __init__(self, logger=None, dbi=None, params=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger is None:
            logger = myThread.logger
        if dbi is None:
            dbi = myThread.dbi

        tablespaceIndex = ""
        if params:
            if "tablespace_index" in params:
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params[
                    "tablespace_index"]

        DBCreator.__init__(self, logger, dbi)

        self.requiredTables = ["01bl_status", "02bl_runjob"]

        self.create['01bl_status'] = \
            """CREATE TABLE bl_status
                (
                id            INT  auto_increment,
                name          VARCHAR(255),
                PRIMARY KEY (id),
                UNIQUE (name)
                )
                ENGINE = InnoDB DEFAULT CHARSET=latin1;
            """

        self.create['02bl_runjob'] = \
            """CREATE TABLE bl_runjob
               (
               id            INT   auto_increment,
               wmbs_id       INT,
               grid_id       VARCHAR(255),
               bulk_id       VARCHAR(255),
               status        CHAR(1)   DEFAULT '1',
               sched_status  INT NOT NULL,
               retry_count   INT,
               status_time   INT,
               location      INT,
               user_id       INT,
               PRIMARY KEY (id),
               FOREIGN KEY (wmbs_id) REFERENCES wmbs_job(id) ON DELETE CASCADE,
               FOREIGN KEY (sched_status) REFERENCES bl_status(id),
               FOREIGN KEY (user_id) REFERENCES wmbs_users(id) ON DELETE CASCADE,
               FOREIGN KEY (location) REFERENCES wmbs_location(id) ON DELETE CASCADE,
               UNIQUE (retry_count, wmbs_id)
               )
               ENGINE = InnoDB DEFAULT CHARSET=latin1;

            """

        self.constraints["01_idx_bl_runjob"] = \
            """CREATE INDEX idx_bl_runjob_wmbs ON bl_runjob(wmbs_id) %s""" % tablespaceIndex

        self.constraints["02_idx_bl_runjob"] = \
            """CREATE INDEX idx_bl_runjob_status ON bl_runjob(sched_status) %s""" % tablespaceIndex

        self.constraints["03_idx_bl_runjob"] = \
            """CREATE INDEX idx_bl_runjob_users ON bl_runjob(user_id) %s""" % tablespaceIndex

        self.constraints["04_idx_bl_runjob"] = \
            """CREATE INDEX idx_bl_runjob_location ON bl_runjob(location) %s""" % tablespaceIndex

        return
Пример #52
0
    def __init__(self, logger=None, dbi=None, params=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """

        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        tablespaceTable = ""
        tablespaceIndex = ""

        self.create = {}
        self.constraints = {}
        self.indexes = {}

        if params:
            if params.has_key("tablespace_table"):
                tablespaceTable = "TABLESPACE %s" % params["tablespace_table"]
            if params.has_key("tablespace_index"):
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params[
                    "tablespace_index"]

        self.requiredTables = ["01bl_status", "02bl_runjob"]

        self.sequence_tables = []
        self.sequence_tables.append("bl_status")
        self.sequence_tables.append("bl_runjob")


        self.create['01bl_status'] = \
        """CREATE TABLE bl_status (
            id            INTEGER  NOT NULL,
            name          VARCHAR(255)
            ) %s  """ % (tablespaceTable)


        self.create['02bl_runjob'] = \
        """CREATE TABLE bl_runjob
           (
           id            INTEGER NOT NULL,
           wmbs_id       INTEGER,
           grid_id       VARCHAR(255),
           bulk_id       VARCHAR(255),
           status        CHAR(1)   DEFAULT '1',
           sched_status  INTEGER,
           retry_count   INTEGER,
           status_time   INTEGER,
           location      INTEGER,
           user_id       INTEGER
           ) %s  """ % (tablespaceTable)


        self.indexes["02_pk_bl_runjob"] = \
          """ALTER TABLE bl_runjob ADD
               (CONSTRAINT bl_runjob_pk PRIMARY KEY (id) %s)""" % tablespaceIndex


        self.indexes["01_pk_bl_status"] = \
          """ALTER TABLE bl_status ADD
               (CONSTRAINT bl_status_pk PRIMARY KEY (id) %s)""" % tablespaceIndex


        self.constraints["01_fk_bl_runjob"] = \
            """ALTER TABLE bl_runjob ADD
               (CONSTRAINT bl_runjob_fk1 FOREIGN KEY(wmbs_id)
               REFERENCES wmbs_job(id) ON DELETE CASCADE)"""


        self.constraints["02_fk_bl_runjob"] = \
            """ALTER TABLE bl_runjob ADD
               (CONSTRAINT bl_runjob_fk2 FOREIGN KEY(sched_status)
               REFERENCES bl_status(id) ON DELETE CASCADE)"""


        self.constraints["03_fk_bl_runjob"] = \
            """ALTER TABLE bl_runjob ADD
               (CONSTRAINT bl_runjob_fk3 FOREIGN KEY(user_id)
               REFERENCES wmbs_users(id) ON DELETE CASCADE)"""

        self.constraints["04_fk_bl_runjob"] = \
            """ALTER TABLE bl_runjob ADD
               (CONSTRAINT bl_runjob_fk4 FOREIGN KEY(location)
               REFERENCES wmbs_location(id) ON DELETE CASCADE)"""

        j = 50
        for i in self.sequence_tables:
            seqname = '%s_SEQ' % i
            self.create["%s%s" % (j, seqname)] = \
              "CREATE SEQUENCE %s start with 1 increment by 1 nomaxvalue cache 100" \
              % seqname

        return
Пример #53
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        #  //
        # // Define create statements for each table
        #//
        #  //
        self.create['b_tasks'] = """
        CREATE TABLE tasks(
        tm_taskname VARCHAR(255) NOT NULL,
        tm_activity VARCHAR(255),
        panda_jobset_id NUMBER(11),
        tm_task_status VARCHAR(255) NOT NULL,
        tm_start_time TIMESTAMP,
        tm_start_injection TIMESTAMP,
        tm_end_injection TIMESTAMP,
        tm_task_failure CLOB,
        tm_job_sw VARCHAR(255) NOT NULL,
        tm_job_arch VARCHAR(255),
        tm_input_dataset VARCHAR(500),
        tm_use_parent NUMBER(1),
        tm_site_whitelist VARCHAR(4000),
        tm_site_blacklist VARCHAR(4000),
        tm_split_algo VARCHAR(255) NOT NULL,
        tm_split_args CLOB NOT NULL,
        tm_totalunits NUMBER(38),
        tm_user_sandbox VARCHAR(255) NOT NULL,
        tm_cache_url VARCHAR(255) NOT NULL,
        tm_username VARCHAR(255) NOT NULL,
        tm_user_dn VARCHAR(255) NOT NULL,
        tm_user_vo VARCHAR(255) NOT NULL,
        tm_user_role VARCHAR(255),
        tm_user_group VARCHAR(255),
        tm_publish_name VARCHAR(500),
        tm_asyncdest VARCHAR(255) NOT NULL,
        tm_dbs_url VARCHAR(255) NOT NULL,
        tm_publish_dbs_url VARCHAR(255),
        tm_publication VARCHAR(1) NOT NULL,
        tm_outfiles CLOB,
        tm_tfile_outfiles CLOB,
        tm_edm_outfiles CLOB,
        tm_transformation VARCHAR(255) NOT NULL,
        tm_job_type VARCHAR(255) NOT NULL,
        tm_generator VARCHAR(255),
        tm_events_per_lumi NUMBER(38),
        tm_arguments CLOB,
        panda_resubmitted_jobs CLOB,
        tm_save_logs VARCHAR(1) NOT NULL,
        tw_name VARCHAR(255),
        tm_user_infiles VARCHAR(4000),
        tm_maxjobruntime NUMBER(38),
        tm_numcores NUMBER(38),
        tm_maxmemory NUMBER(38),
        tm_priority NUMBER(38),
        tm_output_dataset CLOB,
        tm_task_warnings CLOB DEFAULT '[]',
        tm_user_webdir VARCHAR(1000),
        tm_scriptexe VARCHAR(255),
        tm_scriptargs VARCHAR(4000),
        tm_extrajdl VARCHAR(1000),
        tm_asourl VARCHAR(4000),
        CONSTRAINT taskname_pk PRIMARY KEY(tm_taskname),
        CONSTRAINT check_tm_publication CHECK (tm_publication IN ('T', 'F')),
        CONSTRAINT check_tm_save_logs CHECK (tm_save_logs IN ('T', 'F'))
        )
        """
        self.create['c_jobgroups'] = """
        CREATE TABLE jobgroups(
        tm_jobgroups_id NUMBER(38) NOT NULL,
        tm_taskname VARCHAR(255) NOT NULL,
        panda_jobdef_id NUMBER(11),
        panda_jobdef_status VARCHAR(255) NOT NULL,
        tm_data_blocks CLOB,
        panda_jobgroup_failure CLOB,
        tm_user_dn VARCHAR(255) NOT NULL,
        CONSTRAINT taskname_fk FOREIGN KEY(tm_taskname) references
            tasks(tm_taskname)
            ON DELETE CASCADE,
        CONSTRAINT jobgroup_id_pk PRIMARY KEY(tm_jobgroups_id)
        )
        """
        self.create['c_jobgroups_id_seq'] = """
        CREATE SEQUENCE jobgroups_id_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['c_jobgroups_id_trg'] = """
Пример #54
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        #  //
        # // Define create statements for each table
        #//
        #  //
        # // State/Type tables: enumerated type lists
        #//  Type will be the type of workflow: MC, CmsGen, Reco, Skim etc
        self.create['a_reqmgr_request_type'] = """
        CREATE TABLE reqmgr_request_type(
        type_id NUMBER(11) NOT NULL,
        type_name VARCHAR(255) NOT NULL,
        PRIMARY KEY(type_id), UNIQUE(type_name)
        )
        """
        self.create['a_reqmgr_request_type_seq'] = """
        CREATE SEQUENCE reqmgr_request_type_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['a_reqmgr_request_type_trg'] =  """
        CREATE TRIGGER reqmgr_request_type_trg
        BEFORE INSERT ON reqmgr_request_type
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_request_type_seq.nextval INTO :new.type_id FROM dual;
        END;"""
        #  //
        # // states for a request to be in
        #//  new, untested, tested, approved, failed, aborted, junk etc
        self.create['b_reqmgr_request_status'] = """
        CREATE TABLE reqmgr_request_status(
        status_id NUMBER(11) NOT NULL,
        status_name VARCHAR(255) NOT NULL,
        PRIMARY KEY(status_id), UNIQUE(status_name)
        )
        """
        self.create['b_reqmgr_request_status_seq'] = """
        CREATE SEQUENCE reqmgr_request_status_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['b_reqmgr_request_status_trg'] =  """
        CREATE TRIGGER reqmgr_request_status_trg
        BEFORE INSERT ON reqmgr_request_status
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_request_status_seq.nextval INTO :new.status_id FROM dual;
        END;"""
        #  //
        # // Group definitions
        #//  Which group to bill, also allows a base priority modifier
        #  //which will allow certain groups to be raised/lowered
        # // with respect to each other.
        #//
        self.create['c_reqmgr_group'] = """
        CREATE TABLE reqmgr_group(

        group_id NUMBER(11) NOT NULL,
        group_name VARCHAR(255) NOT NULL,
        group_base_priority NUMBER(11) DEFAULT 0,

        UNIQUE(group_name),
        PRIMARY KEY(group_id)

        )
        """
        self.create['c_reqmgr_group_seq'] = """
        CREATE SEQUENCE reqmgr_group_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['c_reqmgr_group_trg'] =  """
        CREATE TRIGGER reqmgr_group_trg
        BEFORE INSERT ON reqmgr_group
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_group_seq.nextval INTO :new.group_id FROM dual;
        END;"""
        #  //
        # // Request Owner details
        #//  Actual person who makes a request.
        #  //
        # //
        #//
        #  //
        # //
        #//  Can also be used to prioritise users if need be. (We may want
        #  //to use this to track users who make a lot of bad requests
        # // and lower their priorities...)
        #//
        self.create['d_reqmgr_requestor'] = """

        CREATE TABLE reqmgr_requestor (

        requestor_id NUMBER(11) NOT NULL,
        requestor_hn_name VARCHAR(255) NOT NULL,
        contact_email VARCHAR(255) NOT NULL,

        requestor_dn_name VARCHAR(255),
        requestor_base_priority NUMBER(11) DEFAULT 0,

        PRIMARY KEY(requestor_id),
        UNIQUE (requestor_hn_name)


        )
        """
        self.create['d_reqmgr_requestor_seq'] = """
        CREATE SEQUENCE reqmgr_requestor_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['d_reqmgr_requestor_trg'] =  """
        CREATE TRIGGER reqmgr_requestor_trg
        BEFORE INSERT ON reqmgr_requestor
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_requestor_seq.nextval INTO :new.requestor_id FROM dual;
        END;"""

        #  //
        # //  requestor/group association, allows users to belong
        #//   to multiple groups
        #  //
        # //
        #//
        self.create['e_reqmgr_group_association'] = """
        CREATE TABLE reqmgr_group_association (

        association_id NUMBER(11) NOT NULL,
        requestor_id NUMBER(11) NOT NULL,
        group_id NUMBER(11) NOT NULL,

        PRIMARY KEY(association_id),
        UNIQUE(requestor_id, group_id),
        FOREIGN KEY(requestor_id) references reqmgr_requestor(requestor_id)
          ON DELETE CASCADE,
        FOREIGN KEY(group_id) references reqmgr_group(group_id)

        )
        """
        self.create['e_reqmgr_group_association_seq'] = """
        CREATE SEQUENCE reqmgr_group_association_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['e_reqmgr_group_association_trg'] =  """
        CREATE TRIGGER reqmgr_group_association_trg
        BEFORE INSERT ON reqmgr_group_association
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_group_association_seq.nextval INTO :new.association_id FROM dual;
        END;"""
        #  //
        # // Production/Processing teams
        #//  Simple team name for now, could be expanded to include
        #  //contact details, ProdAgent URLs etc
        # //
        #//
        self.create['f_reqmgr_teams'] = """
        CREATE TABLE reqmgr_teams (

        team_id NUMBER(11) NOT NULL,
        team_name VARCHAR(255),

        UNIQUE(team_name),
        PRIMARY KEY(team_id)

        )
        """
        self.create['f_reqmgr_teams_seq'] = """
        CREATE SEQUENCE reqmgr_teams_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['f_reqmgr_teams_trg'] =  """
        CREATE TRIGGER reqmgr_teams_trg
        BEFORE INSERT ON reqmgr_teams
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_teams_seq.nextval INTO :new.team_id FROM dual;
        END;"""
        #  //
        # // Main request table

        #//
        #  // just basics for now, anything else we want here?
        # //  Could generate GUIDs as request name.
        #//
        #  // workflow will be an LFN like name for the workflow spec file
        # //
        #//  basic estimates of request size in events or files can be added
        self.create['g_reqmgr_request'] = """
        CREATE TABLE  reqmgr_request(

        request_id NUMBER(11) NOT NULL,
        request_name VARCHAR(255) NOT NULL,
        request_type NUMBER(11) NOT NULL,
        request_status NUMBER(11) NOT NULL,
        request_priority NUMBER(11) NOT NULL,
        requestor_group_id NUMBER(11) NOT NULL,
        workflow VARCHAR(255) NOT NULL,

        request_size_events NUMBER(11) DEFAULT 0,
        request_size_files NUMBER(11)  DEFAULT 0,

        prep_id VARCHAR(255),

        UNIQUE(request_name),
        FOREIGN KEY(request_type) REFERENCES reqmgr_request_type(type_id),
        FOREIGN KEY(request_status) references
                    reqmgr_request_status(status_id),
        FOREIGN KEY(requestor_group_id) references
                    reqmgr_group_association(association_id),
        PRIMARY KEY(request_id)


        )
        """
        self.create['g_reqmgr_request_seq'] = """
        CREATE SEQUENCE reqmgr_request_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['g_reqmgr_request_trg'] =  """
        CREATE TRIGGER reqmgr_request_trg
        BEFORE INSERT ON reqmgr_request
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_request_seq.nextval INTO :new.request_id FROM dual;
        END;"""

        #  //
        # // Assignment of request to team(s)
        #//  Also includes priority modifier to
        #  //allow assignments to teams at different priorities
        # //
        #//
        self.create['h_reqmgr_assignment'] = """
        CREATE TABLE reqmgr_assignment (

        request_id NUMBER(11) NOT NULL,
        team_id NUMBER(11) NOT NULL,
        priority_modifier NUMBER(11) DEFAULT 0,

        UNIQUE( request_id, team_id),
        FOREIGN KEY (request_id) references
           reqmgr_request(request_id) ON DELETE CASCADE,
        FOREIGN KEY (team_id) references
           reqmgr_teams(team_id)

        )
        """
        #  //
        # // Request Attributes useful for scheduling and
        #//  prioritisation

        #  //
        # // Input datasets are needed for scheduling, need to track
        #//  which one is main input, any secondary input or pileup

        self.create['i_reqmgr_input_dataset'] = """
        CREATE TABLE reqmgr_input_dataset (

        dataset_id NUMBER(11) NOT NULL,
        request_id NUMBER(11) NOT NULL,
        dataset_name VARCHAR(255) NOT NULL,
        dataset_type VARCHAR(11),
        CONSTRAINT dataset_type_cons 
                CHECK (dataset_type IN ('source', 'secondary', 'pileup')),
        FOREIGN KEY(request_id) references
                reqmgr_request(request_id)
                  ON DELETE CASCADE,
        PRIMARY KEY(dataset_id)

        )
        """
        self.create['i_reqmgr_input_dataset_seq'] = """
        CREATE SEQUENCE reqmgr_input_dataset_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['i_reqmgr_input_dataset_trg'] =  """
        CREATE TRIGGER reqmgr_input_dataset_trg
        BEFORE INSERT ON reqmgr_input_dataset
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_input_dataset_seq.nextval INTO :new.dataset_id FROM dual;
        END;"""

        #  //
        # // Output datasets arent necessarily needed for scheduling
        #//  but we need some idea of what comes from each job
        #  // and tests will give us size per event estimates which will
        # //  be needed for storage planning
        #//
        self.create['j_reqmgr_output_dataset'] = """
        CREATE TABLE reqmgr_output_dataset (

        dataset_id NUMBER(11) NOT NULL,
        request_id NUMBER(11) NOT NULL,
        dataset_name VARCHAR(255) NOT NULL,
        size_per_event NUMBER(11),
        custodial_site VARCHAR(255),
        FOREIGN KEY(request_id) references
                reqmgr_request(request_id)
                  ON DELETE CASCADE,
        PRIMARY KEY(dataset_id)
        )
        """
        self.create['j_reqmgr_output_dataset_seq'] = """
        CREATE SEQUENCE reqmgr_output_dataset_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['j_reqmgr_output_dataset_trg'] =  """
        CREATE TRIGGER reqmgr_output_dataset_trg
        BEFORE INSERT ON reqmgr_output_dataset
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_output_dataset_seq.nextval INTO :new.dataset_id FROM dual;
        END;"""
        #  //
        # //  CMSSW version requirements are needed for planning
        #//   associate a set of SW versions with a request
        #  //
        # //  Since ReqMgr will have to create workflows, using
        #//   a release, this may be something we can use to show what
        #  // releases are available for production as well.
        # //
        #//
        self.create['k_reqmgr_software'] = """
        CREATE TABLE reqmgr_software (

        software_id NUMBER(11) NOT NULL,
        software_name VARCHAR(255) NOT NULL,

        UNIQUE(software_name),
        PRIMARY KEY(software_id)
        )
        """
        self.create['k_reqmgr_software_seq'] = """
        CREATE SEQUENCE reqmgr_software_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['k_reqmgr_software_trg'] =  """
        CREATE TRIGGER reqmgr_software_trg
        BEFORE INSERT ON reqmgr_software
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_software_seq.nextval INTO :new.software_id FROM dual;
        END;"""
        #  //
        # // Association between SW versions and a request
        #//  (chained processing could be multiple versions)
        self.create['l_reqmgr_software_dependency'] = """
        CREATE TABLE reqmgr_software_dependency (

           request_id NUMBER(11) NOT NULL,
           software_id NUMBER(11) NOT NULL,

           UNIQUE(request_id, software_id),
           FOREIGN KEY(software_id) references
                reqmgr_software(software_id),

           FOREIGN KEY(request_id) references
                reqmgr_request(request_id)
                  ON DELETE CASCADE
        )
        """

        #  //
        # // Request status and assignment tables used for tracking
        #//  and monitoring progress of a request
        #  // Should be coarse grained wide interval update (Eg per alloc
        # //  in PM) enough to give a big picture. Finer details can come
        #//   from drilling down to the PM/PA if needed
        self.create['m_reqmgr_progress_update'] = """

        CREATE TABLE reqmgr_progress_update (

        request_id NUMBER(11) NOT NULL,
        update_time TIMESTAMP,

        events_written NUMBER(11) DEFAULT 0,
        events_merged NUMBER(11) DEFAULT 0,
        files_written NUMBER(11) DEFAULT 0,
        files_merged NUMBER(11) DEFAULT 0,
        associated_dataset VARCHAR(255) DEFAULT NULL,
        time_per_event NUMBER(11) DEFAULT 0,
        size_per_event NUMBER(11) DEFAULT 0,
        percent_success FLOAT(11) DEFAULT 0,
        percent_complete FLOAT(11) DEFAULT 0,

        FOREIGN KEY(request_id) references
            reqmgr_request(request_id)
               ON DELETE CASCADE
        )
        """
        #  //
        # //  Attach messages to a request, eg:
        #//   "Request is now 50% done"
        #  // "Request was aborted due to failures"
        # //  "Which utter truncheon gave us a broken cfg?"
        #//   etc. Some kind of verbose summary
        self.create['n_reqmgr_message'] = """
        CREATE TABLE reqmgr_message(

        request_id NUMBER(11) NOT NULL,
        update_time TIMESTAMP,
        message VARCHAR2(1000) NOT NULL,

        FOREIGN KEY(request_id) references
            reqmgr_request(request_id)
               ON DELETE CASCADE
        )
        """
        #  //
        # // track the assigned ProdMgr
        #//
        self.create['o_reqmgr_assigned_prodmgr'] = """
        CREATE TABLE reqmgr_assigned_prodmgr (

        request_id NUMBER(11) NOT NULL,
        prodmgr_id VARCHAR(255) NOT NULL,

        UNIQUE(request_id, prodmgr_id),
        FOREIGN KEY (request_id) references
                reqmgr_request(request_id)
                  ON DELETE CASCADE
        )
        """
        #  //
        # // can also track assigned prodagents
        #//  expect this to flesh out more later, when we add
        #  //details of all PA instances and resources to enable
        # // better scheduling decisions, For now will probably just
        #// serve as a link to the HTTPFrontend to provide detailed
        #  //monitoring.
        # //
        #//
        self.create['p_reqmgr_assigned_prodagent'] = """
        CREATE TABLE reqmgr_assigned_prodagent (

        request_id NUMBER(11) NOT NULL,
        prodagent_id VARCHAR(255) NOT NULL,

        UNIQUE(request_id, prodagent_id),
        FOREIGN KEY (request_id) references
                reqmgr_request(request_id)
                  ON DELETE CASCADE

        )
        """

        self.create['q_reqmgr_campaign'] = """
        CREATE TABLE reqmgr_campaign (

        campaign_id NUMBER(11) NOT NULL,
        campaign_name VARCHAR(255),

        UNIQUE(campaign_name),
        PRIMARY KEY(campaign_id)

        )
        """
        self.create['q_reqmgr_campaign_seq'] = """
        CREATE SEQUENCE reqmgr_campaign_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['q_reqmgr_campaign_trg'] =  """
        CREATE TRIGGER reqmgr_campaign_trg
        BEFORE INSERT ON reqmgr_campaign
        FOR EACH ROW
        BEGIN
        SELECT reqmgr_campaign_seq.nextval INTO :new.campaign_id FROM dual;
        END;"""

        self.create['r_reqmgr_campaign_assoc'] = """
        CREATE TABLE reqmgr_campaign_assoc (

        request_id NUMBER(11) NOT NULL,
        campaign_id NUMBER(11) NOT NULL,

        UNIQUE( request_id, campaign_id),
        FOREIGN KEY (request_id) references
           reqmgr_request(request_id) ON DELETE CASCADE,
        FOREIGN KEY (campaign_id) references
           reqmgr_campaign(campaign_id)

        ) 
        """

        for typeName in TypesList:
            sql = "INSERT INTO reqmgr_request_type (type_name) VALUES ('%s')" % typeName
            self.inserts["reqtype" + typeName] = sql

        i = 0
        for status in StatusList:
            sql = "INSERT INTO reqmgr_request_status (status_name) VALUES('%s')" % status
            self.inserts["%3d_reqstatus" % i] = sql
            i = i + 1
Пример #55
0
    def __init__(self, logger=None, dbi=None, params=None):
        """
        _init_

        Call the base class's constructor and create all necessary tables,
        constraints and inserts.
        """
        myThread = threading.currentThread()

        DBCreator.__init__(self, myThread.logger, myThread.dbi)

        #
        # Tables, functions, procedures and sequences
        #
        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_dataset (
                 id              INTEGER      AUTO_INCREMENT,
                 path            VARCHAR(500) COLLATE latin1_general_cs NOT NULL,
                 processing_ver  VARCHAR(255),
                 acquisition_era VARCHAR(255),
                 valid_status    VARCHAR(20),
                 global_tag      VARCHAR(255),
                 parent          VARCHAR(500),
                 prep_id         VARCHAR(255),
                 PRIMARY KEY (id),
                 CONSTRAINT uq_dbs_dat UNIQUE (path)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_dataset_subscription (
                 id                     INTEGER      AUTO_INCREMENT,
                 dataset_id             INTEGER      NOT NULL,
                 site                   VARCHAR(100) NOT NULL,
                 custodial              INTEGER      DEFAULT 0,
                 auto_approve           INTEGER      DEFAULT 0,
                 move                   INTEGER      DEFAULT 0,
                 priority               VARCHAR(10)  DEFAULT 'Low',
                 subscribed             INTEGER      DEFAULT 0,
                 phedex_group           VARCHAR(100),
                 delete_blocks          INTEGER,
                 PRIMARY KEY (id),
                 CONSTRAINT uq_dbs_dat_sub UNIQUE (dataset_id, site, custodial, auto_approve, move, priority)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_algo (
                 id             INTEGER       AUTO_INCREMENT,
                 app_name       VARCHAR(100),
                 app_ver        VARCHAR(100),
                 app_fam        VARCHAR(100),
                 pset_hash      VARCHAR(700),
                 config_content LONGTEXT,
                 in_dbs         INTEGER,
                 PRIMARY KEY (id),
                 CONSTRAINT uq_dbs_alg UNIQUE (app_name, app_ver, app_fam, pset_hash)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
              """CREATE TABLE dbsbuffer_algo_dataset_assoc (
                   id         INTEGER AUTO_INCREMENT,
                   algo_id    INTEGER NOT NULL,
                   dataset_id INTEGER NOT NULL,
                   in_dbs     INTEGER DEFAULT 0,
                   PRIMARY KEY (id)
                 ) ENGINE = InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_workflow (
                 id                           INTEGER       AUTO_INCREMENT,
                 name                         VARCHAR(700),
                 task                         VARCHAR(700),
                 block_close_max_wait_time    INTEGER,
                 block_close_max_files        INTEGER,
                 block_close_max_events       INTEGER,
                 block_close_max_size         BIGINT,
                 completed                    INTEGER       DEFAULT 0,
                 PRIMARY KEY (id),
                 CONSTRAINT uq_dbs_wor UNIQUE (name, task)
               ) ENGINE = InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_file (
                 id                    INTEGER      AUTO_INCREMENT,
                 lfn                   VARCHAR(500) NOT NULL,
                 filesize              BIGINT,
                 events                INTEGER,
                 dataset_algo          INTEGER      NOT NULL,
                 block_id              INTEGER,
                 status                VARCHAR(20),
                 in_phedex             INTEGER      DEFAULT 0,
                 workflow              INTEGER,
                 LastModificationDate  INTEGER,
                 PRIMARY KEY (id),
                 CONSTRAINT uq_dbs_fil UNIQUE (lfn)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_file_parent (
                 child  INTEGER NOT NULL,
                 parent INTEGER NOT NULL,
                 CONSTRAINT pk_dbs_fil_par PRIMARY KEY (child, parent)
               ) ENGINE=InnoDB
               """

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_file_runlumi_map (
                 filename    INTEGER NOT NULL,
                 run         INTEGER NOT NULL,
                 lumi        INTEGER NOT NULL
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_location (
                 id       INTEGER      AUTO_INCREMENT,
                 se_name  VARCHAR(255) NOT NULL,
                 PRIMARY KEY (id),
                 CONSTRAINT uq_dbs_loc UNIQUE (se_name)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_file_location (
                 filename INTEGER NOT NULL,
                 location INTEGER NOT NULL,
                 CONSTRAINT pk_dbs_fil_loc PRIMARY KEY (filename, location)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_block (
                 id           INTEGER      AUTO_INCREMENT,
                 dataset_id   INTEGER      NOT NULL,
                 blockname    VARCHAR(250) NOT NULL,
                 location     INTEGER      NOT NULL,
                 create_time  INTEGER,
                 status       VARCHAR(20),
                 deleted      INTEGER      DEFAULT 0,
                 PRIMARY KEY (id),
                 CONSTRAINT uq_dbs_blo UNIQUE (blockname, location)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_checksum_type (
                 id   INTEGER      AUTO_INCREMENT,
                 type VARCHAR(255),
                 PRIMARY KEY (id)
               ) ENGINE=InnoDB"""

        self.create[len(self.create)] = \
            """CREATE TABLE dbsbuffer_file_checksums (
                 fileid  INTEGER,
                 typeid  INTEGER,
                 cksum   VARCHAR(100),
                 CONSTRAINT pk_dbs_fil_che PRIMARY KEY (fileid, typeid)
               ) ENGINE=InnoDB"""

        #
        # Indexes
        #
        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_che_1 ON dbsbuffer_file_checksums (typeid)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_che_2 ON dbsbuffer_file_checksums (fileid)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_dat_sub_1 ON dbsbuffer_dataset_subscription (dataset_id)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_par_1 ON dbsbuffer_file_parent (child)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_par_2 ON dbsbuffer_file_parent (parent)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_blo_1 ON dbsbuffer_block (location)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_blo_2 ON dbsbuffer_block (dataset_id)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_alg_ass_1 ON dbsbuffer_algo_dataset_assoc (dataset_id)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_alg_ass_2 ON dbsbuffer_algo_dataset_assoc (algo_id)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_run_1 ON dbsbuffer_file_runlumi_map (filename)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_loc_1 ON dbsbuffer_file_location (location)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_loc_2 ON dbsbuffer_file_location (filename)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_dbs_fil_1 ON dbsbuffer_file (workflow)"""

        #
        # Constraints
        #
        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file_checksums
                 ADD CONSTRAINT fk_file_checksums_typeid
                 FOREIGN KEY (typeid)
                 REFERENCES dbsbuffer_checksum_type(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file_checksums
                 ADD CONSTRAINT fk_file_checksums_fileid
                 FOREIGN KEY (fileid)
                 REFERENCES dbsbuffer_file(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_dataset_subscription
                 ADD CONSTRAINT fk_dsetsubscription_datasetid
                 FOREIGN KEY (dataset_id)
                 REFERENCES dbsbuffer_dataset(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file_parent
                 ADD CONSTRAINT fk_file_parent_child
                 FOREIGN KEY (child)
                 REFERENCES dbsbuffer_file(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file_parent
                 ADD CONSTRAINT fk_file_parent_parent
                 FOREIGN KEY (parent)
                 REFERENCES dbsbuffer_file(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_block
                 ADD CONSTRAINT fk_block_location
                 FOREIGN KEY (location)
                 REFERENCES dbsbuffer_location(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_block
                 ADD CONSTRAINT fk_block_dataset_id
                 FOREIGN KEY (dataset_id)
                 REFERENCES dbsbuffer_dataset(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_algo_dataset_assoc
                 ADD CONSTRAINT fk_algodset_assoc_dataset_id
                 FOREIGN KEY (dataset_id)
                 REFERENCES dbsbuffer_dataset(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_algo_dataset_assoc
                 ADD CONSTRAINT fk_algodset_assoc_algo_id
                 FOREIGN KEY (algo_id)
                 REFERENCES dbsbuffer_algo(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file_runlumi_map
                 ADD CONSTRAINT fk_file_runlumi_filename
                 FOREIGN KEY (filename)
                 REFERENCES dbsbuffer_file(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file_location
                 ADD CONSTRAINT fk_file_location_location
                 FOREIGN KEY (location)
                 REFERENCES dbsbuffer_location(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file_location
                 ADD CONSTRAINT fk_file_location_filename
                 FOREIGN KEY (filename)
                 REFERENCES dbsbuffer_file(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE dbsbuffer_file
                 ADD CONSTRAINT fk_file_workflow
                 FOREIGN KEY (workflow)
                 REFERENCES dbsbuffer_workflow(id)
                 ON DELETE CASCADE"""

        checksumTypes = ['cksum', 'adler32', 'md5']
        for i in checksumTypes:
            checksumTypeQuery = """INSERT INTO dbsbuffer_checksum_type
                                   (type)
                                   VALUES ('%s')
                                   """ % (i)
            self.inserts["wmbs_checksum_type_%s" % (i)] = checksumTypeQuery
Пример #56
0
    def __init__(self, logger=None, dbi=None, param=None):
        if dbi == None:
            myThread = threading.currentThread()
            dbi = myThread.dbi
            logger = myThread.logger
        DBCreator.__init__(self, logger, dbi)

        self.create = {}
        self.constraints = {}
        #  //
        # // Define create statements for each table
        #//
        #  //255 chars for tm_task_status is even too much
        self.create['b_tasks'] = """
        CREATE TABLE tasks(
        tm_taskname VARCHAR(255) NOT NULL,
        tm_activity VARCHAR(255),
        panda_jobset_id NUMBER(11),
        tm_task_status VARCHAR(255) NOT NULL,
        tm_task_command VARCHAR(20),
        tm_start_time TIMESTAMP,
        tm_start_injection TIMESTAMP,
        tm_end_injection TIMESTAMP,
        tm_task_failure CLOB,
        tm_job_sw VARCHAR(255) NOT NULL,
        tm_job_arch VARCHAR(255),
        tm_input_dataset VARCHAR(500),
        tm_nonvalid_input_dataset VARCHAR(1) DEFAULT 'T',
        tm_use_parent NUMBER(1),
        tm_site_whitelist VARCHAR(4000),
        tm_site_blacklist VARCHAR(4000),
        tm_split_algo VARCHAR(255) NOT NULL,
        tm_split_args CLOB NOT NULL,
        tm_totalunits NUMBER(38),
        tm_user_sandbox VARCHAR(255) NOT NULL,
        tm_cache_url VARCHAR(255) NOT NULL,
        tm_username VARCHAR(255) NOT NULL,
        tm_user_dn VARCHAR(255) NOT NULL,
        tm_user_vo VARCHAR(255) NOT NULL,
        tm_user_role VARCHAR(255),
        tm_user_group VARCHAR(255),
        tm_publish_name VARCHAR(500),
        tm_publish_groupname VARCHAR(1) DEFAULT 'F',
        tm_asyncdest VARCHAR(255) NOT NULL,
        tm_dbs_url VARCHAR(255) NOT NULL,
        tm_publish_dbs_url VARCHAR(255),
        tm_publication VARCHAR(1) NOT NULL,
        tm_outfiles CLOB,
        tm_tfile_outfiles CLOB,
        tm_edm_outfiles CLOB,
        tm_job_type VARCHAR(255) NOT NULL,
        tm_generator VARCHAR(255),
        tm_events_per_lumi NUMBER(38),
        tm_arguments CLOB,
        panda_resubmitted_jobs CLOB,
        tm_save_logs VARCHAR(1) NOT NULL,
        tw_name VARCHAR(255),
        tm_user_infiles VARCHAR(4000),
        tm_maxjobruntime NUMBER(38),
        tm_numcores NUMBER(38),
        tm_maxmemory NUMBER(38),
        tm_priority NUMBER(38),
        tm_output_dataset CLOB,
        tm_task_warnings CLOB DEFAULT '[]',
        tm_user_webdir VARCHAR(1000),
        tm_scriptexe VARCHAR(255),
        tm_scriptargs VARCHAR(4000),
        tm_extrajdl VARCHAR(1000),
        tm_asourl VARCHAR(4000),
        tm_collector VARCHAR(1000),
        tm_schedd VARCHAR(255),
        tm_dry_run VARCHAR(1),
        tm_user_files CLOB DEFAULT '[]',
        tm_transfer_outputs VARCHAR(1),
        tm_output_lfn VARCHAR(1000),
        tm_ignore_locality VARCHAR(1),
        tm_fail_limit NUMBER(38),
        tm_one_event_mode VARCHAR(1),
        tm_secondary_input_dataset VARCHAR(500),
        tm_primary_dataset VARCHAR(255),
        tm_last_publication TIMESTAMP,
        tm_debug_files VARCHAR(255),
        clusterid NUMBER(10),
        tm_asodb VARCHAR(20),
        tm_ignore_global_blacklist VARCHAR(1),
        tm_submitter_ip_addr VARCHAR(45),
        tm_DDM_reqid NUMBER(38),
        CONSTRAINT taskname_pk PRIMARY KEY(tm_taskname),
        CONSTRAINT check_tm_publication CHECK (tm_publication IN ('T', 'F')),
        CONSTRAINT check_tm_publish_groupname CHECK (tm_publish_groupname IN ('T', 'F')),
        CONSTRAINT check_tm_save_logs CHECK (tm_save_logs IN ('T', 'F')),
        CONSTRAINT check_tm_dry_run CHECK (tm_dry_run IN ('T', 'F')),
        CONSTRAINT check_tm_transfer_outputs CHECK (tm_transfer_outputs IN ('T', 'F')),
        CONSTRAINT check_tm_ignore_locality CHECK (tm_ignore_locality IN ('T', 'F')),
        CONSTRAINT check_tm_one_event_mode CHECK (tm_one_event_mode IN ('T', 'F')),
        CONSTRAINT ck_tm_nonvalid_input_dataset CHECK (tm_nonvalid_input_dataset IN ('T', 'F'))
        )
        """
        self.create['c_jobgroups'] = """
        CREATE TABLE jobgroups(
        tm_jobgroups_id NUMBER(38) NOT NULL,
        tm_taskname VARCHAR(255) NOT NULL,
        panda_jobdef_id NUMBER(11),
        panda_jobdef_status VARCHAR(255) NOT NULL,
        tm_data_blocks CLOB,
        panda_jobgroup_failure CLOB,
        tm_user_dn VARCHAR(255) NOT NULL,
        CONSTRAINT taskname_fk FOREIGN KEY(tm_taskname) references
            tasks(tm_taskname)
            ON DELETE CASCADE,
        CONSTRAINT jobgroup_id_pk PRIMARY KEY(tm_jobgroups_id)
        )
        """
        self.create['c_jobgroups_id_seq'] = """
        CREATE SEQUENCE jobgroups_id_seq
        START WITH 1
        INCREMENT BY 1
        NOMAXVALUE"""
        self.create['c_jobgroups_id_trg'] =  """
Пример #57
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        tablespaceTable = ""
        tablespaceIndex = ""
        if params:
            if params.has_key("tablespace_table"):
                tablespaceTable = "TABLESPACE %s" % params["tablespace_table"]
            if params.has_key("tablespace_index"):
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params["tablespace_index"]

        DBCreator.__init__(self, logger, dbi)

        self.requiredTables = ["01wmbs_fileset",
                               "02wmbs_file_details",
                               "03wmbs_fileset_files",
                               "04wmbs_file_parent",
                               "05wmbs_file_runlumi_map",
                               "05wmbs_location_state",
                               "06wmbs_location",
                               "07wmbs_file_location",
                               "07wmbs_users",
                               "07wmbs_workflow",
                               "08wmbs_sub_types",
                               "08wmbs_workflow_output",
                               "09wmbs_subscription",
                               "10wmbs_subscription_validation",
                               "10wmbs_sub_files_acquired",
                               "10wmbs_sub_files_available",
                               "11wmbs_sub_files_failed",
                               "12wmbs_sub_files_complete",
                               "13wmbs_jobgroup",
                               "14wmbs_job_state",
                               "15wmbs_job",
                               "16wmbs_job_assoc",
                               "17wmbs_job_mask",
                               "18wmbs_checksum_type",
                               "19wmbs_file_checksums",
                               "20wmbs_location_senames",]

        self.create["01wmbs_fileset"] = \
          """CREATE TABLE wmbs_fileset (
             id          INTEGER      PRIMARY KEY AUTO_INCREMENT,
             name        VARCHAR(500) NOT NULL,
             open        INT(1)       NOT NULL DEFAULT 0,
             last_update INTEGER      NOT NULL,
             UNIQUE (name))"""

        self.create["02wmbs_file_details"] = \
          """CREATE TABLE wmbs_file_details (
             id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
             lfn          VARCHAR(500) NOT NULL,
             filesize     BIGINT,
             events       INTEGER,
             first_event  BIGINT       NOT NULL DEFAULT 0,
             merged       INT(1)       NOT NULL DEFAULT 0,
             UNIQUE (lfn))"""

        self.create["03wmbs_fileset_files"] = \
          """CREATE TABLE wmbs_fileset_files (
             fileid      INTEGER   NOT NULL,
             fileset     INTEGER   NOT NULL,
             insert_time INTEGER   NOT NULL,
             UNIQUE (fileid, fileset),
             FOREIGN KEY(fileset) references wmbs_fileset(id))"""

        self.create["04wmbs_file_parent"] = \
          """CREATE TABLE wmbs_file_parent (
             child  INTEGER NOT NULL,
             parent INTEGER NOT NULL,
             FOREIGN KEY (child)  references wmbs_file_details(id)
               ON DELETE CASCADE,
             FOREIGN KEY (parent) references wmbs_file_details(id),
             UNIQUE(child, parent))"""

        self.create["05wmbs_file_runlumi_map"] = \
          """CREATE TABLE wmbs_file_runlumi_map (
             fileid  INTEGER NOT NULL,
             run     INTEGER NOT NULL,
             lumi    INTEGER NOT NULL,
             FOREIGN KEY (fileid) references wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["05wmbs_location_state"] = \
            """CREATE TABLE wmbs_location_state (
               id   INTEGER PRIMARY KEY AUTO_INCREMENT,
               name VARCHAR(100) NOT NULL)"""

        self.create["06wmbs_location"] = \
          """CREATE TABLE wmbs_location (
             id          INTEGER      PRIMARY KEY AUTO_INCREMENT,
             site_name   VARCHAR(255) NOT NULL,
             cms_name    VARCHAR(255),
             ce_name     VARCHAR(255),
             running_slots   INTEGER,
             pending_slots   INTEGER,
             plugin      VARCHAR(255),
             state       INTEGER NOT NULL,
             UNIQUE(site_name),
             FOREIGN KEY (state) REFERENCES wmbs_location_state(id))"""

        self.create["07wmbs_users"] = \
          """CREATE TABLE wmbs_users (
             id        INTEGER      PRIMARY KEY AUTO_INCREMENT,
             cert_dn   VARCHAR(255) NOT NULL,
             name_hn   VARCHAR(255),
             owner     VARCHAR(255),
             grp       VARCHAR(255),
             group_name     VARCHAR(255),
             role_name      VARCHAR(255),
             UNIQUE(cert_dn, group_name, role_name))"""

        self.create["07wmbs_file_location"] = \
          """CREATE TABLE wmbs_file_location (
             fileid   INTEGER NOT NULL,
             location INTEGER NOT NULL,
             UNIQUE(fileid, location),
             FOREIGN KEY(fileid)   REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE,
             FOREIGN KEY(location) REFERENCES wmbs_location(id)
               ON DELETE CASCADE)"""

        self.create["07wmbs_workflow"] = \
          """CREATE TABLE wmbs_workflow (
             id           INTEGER          PRIMARY KEY AUTO_INCREMENT,
             spec         VARCHAR(700)     NOT NULL,
             name         VARCHAR(700)     NOT NULL,
             task         VARCHAR(700)     NOT NULL,
             type         VARCHAR(255),
             owner        INTEGER          NOT NULL,
             alt_fs_close INT(1)           NOT NULL,
             injected     INT(1)           DEFAULT 0,
             priority     INTEGER UNSIGNED DEFAULT 0,
             FOREIGN KEY (owner)
             REFERENCES wmbs_users(id) ON DELETE CASCADE) """


        self.indexes["03_pk_wmbs_workflow"] = \
          """ALTER TABLE wmbs_workflow ADD
               (CONSTRAINT wmbs_workflow_unique UNIQUE (name, spec, task))"""


        self.create["08wmbs_workflow_output"] = \
          """CREATE TABLE wmbs_workflow_output (
             workflow_id           INTEGER NOT NULL,
             output_identifier     VARCHAR(255) NOT NULL,
             output_fileset        INTEGER NOT NULL,
             merged_output_fileset INTEGER,
             FOREIGN KEY(workflow_id)  REFERENCES wmbs_workflow(id)
               ON DELETE CASCADE,
             FOREIGN KEY(output_fileset)  REFERENCES wmbs_fileset(id)
               ON DELETE CASCADE,
             FOREIGN KEY(merged_output_fileset)  REFERENCES wmbs_fileset(id)
               ON DELETE CASCADE)
             """

        self.create["08wmbs_sub_types"] = \
          """CREATE TABLE wmbs_sub_types (
               id   INTEGER      PRIMARY KEY AUTO_INCREMENT,
               name VARCHAR(255) NOT NULL,
               priority INTEGER DEFAULT 0,
               UNIQUE(name))"""

        self.create["09wmbs_subscription"] = \
          """CREATE TABLE wmbs_subscription (
             id          INTEGER      PRIMARY KEY AUTO_INCREMENT,
             fileset     INTEGER      NOT NULL,
             workflow    INTEGER      NOT NULL,
             split_algo  VARCHAR(255) NOT NULL,
             subtype     INTEGER      NOT NULL,
             last_update INTEGER      NOT NULL,
             finished    INT(1)       DEFAULT 0,
             FOREIGN KEY(fileset)
             REFERENCES wmbs_fileset(id) ON DELETE CASCADE,
             FOREIGN KEY(workflow)
             REFERENCES wmbs_workflow(id) ON DELETE CASCADE,
             FOREIGN KEY(subtype)
             REFERENCES wmbs_sub_types(id) ON DELETE CASCADE)"""

        self.create["10wmbs_subscription_validation"] = \
          """CREATE TABLE wmbs_subscription_validation (
             subscription_id INTEGER NOT NULL,
             location_id     INTEGER NOT NULL,
             valid           INTEGER,
             UNIQUE (subscription_id, location_id),
             FOREIGN KEY(subscription_id) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY(location_id) REFERENCES wmbs_location(id)
               ON DELETE CASCADE)"""

        self.create["10wmbs_sub_files_acquired"] = \
          """CREATE TABLE wmbs_sub_files_acquired (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)
             """

        self.create["10wmbs_sub_files_available"] = \
          """CREATE TABLE wmbs_sub_files_available (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)
             """

        self.create["11wmbs_sub_files_failed"] = \
          """CREATE TABLE wmbs_sub_files_failed (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["12wmbs_sub_files_complete"] = \
          """CREATE TABLE wmbs_sub_files_complete (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["13wmbs_jobgroup"] = \
          """CREATE TABLE wmbs_jobgroup (
             id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
             subscription INTEGER      NOT NULL,
             guid         VARCHAR(255),
             output       INTEGER,
             last_update  INTEGER      NOT NULL,
             location     INTEGER,
             UNIQUE(guid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (output) REFERENCES wmbs_fileset(id)
                    ON DELETE CASCADE)"""

        self.create["14wmbs_job_state"] = \
          """CREATE TABLE wmbs_job_state (
             id        INTEGER       PRIMARY KEY AUTO_INCREMENT,
             name      VARCHAR(100))"""

        self.create["15wmbs_job"] = \
          """CREATE TABLE wmbs_job (
             id           INTEGER       PRIMARY KEY AUTO_INCREMENT,
             jobgroup     INTEGER       NOT NULL,
             name         VARCHAR(255),
             state        INTEGER       NOT NULL,
             state_time   INTEGER       NOT NULL,
             retry_count  INTEGER       DEFAULT 0,
             couch_record VARCHAR(255),
             location     INTEGER,
             outcome      INTEGER       DEFAULT 0,
             cache_dir    VARCHAR(700)  DEFAULT 'None',
             fwjr_path    VARCHAR(700),
             FOREIGN KEY (jobgroup)
             REFERENCES wmbs_jobgroup(id) ON DELETE CASCADE,
             FOREIGN KEY (state) REFERENCES wmbs_job_state(id),
             FOREIGN KEY (location) REFERENCES wmbs_location(id))"""

        self.indexes["03_pk_wmbs_job"] = \
          """ALTER TABLE wmbs_job ADD
               (CONSTRAINT wmbs_job_unique UNIQUE (name, cache_dir, fwjr_path))"""


        self.create["16wmbs_job_assoc"] = \
          """CREATE TABLE wmbs_job_assoc (
             job    INTEGER NOT NULL,
             fileid INTEGER NOT NULL,
             FOREIGN KEY (job)  REFERENCES wmbs_job(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid) REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["17wmbs_job_mask"] = \
          """CREATE TABLE wmbs_job_mask (
              job           INTEGER     NOT NULL,
              FirstEvent    BIGINT,
              LastEvent     BIGINT,
              FirstLumi     INTEGER,
              LastLumi      INTEGER,
              FirstRun      INTEGER,
              LastRun       INTEGER,
              inclusivemask BOOLEAN DEFAULT TRUE,
              FOREIGN KEY (job)       REFERENCES wmbs_job(id)
                ON DELETE CASCADE)"""

        self.create["18wmbs_checksum_type"] = \
          """CREATE TABLE wmbs_checksum_type (
              id            INTEGER      PRIMARY KEY AUTO_INCREMENT,
              type          VARCHAR(255) )
              """


        self.create["19wmbs_file_checksums"] = \
          """CREATE TABLE wmbs_file_checksums (
              fileid        INTEGER,
              typeid        INTEGER,
              cksum         VARCHAR(100),
              UNIQUE (fileid, typeid),
              FOREIGN KEY (typeid) REFERENCES wmbs_checksum_type(id)
                ON DELETE CASCADE,
              FOREIGN KEY (fileid) REFERENCES wmbs_file_details(id)
                ON DELETE CASCADE)"""

        self.create["20wmbs_location_senames"] = \
          """CREATE TABLE wmbs_location_senames (
               location   INTEGER,
               se_name    VARCHAR(255),
               UNIQUE(location, se_name),
               FOREIGN KEY (location) REFERENCES wmbs_location(id)
                 ON DELETE CASCADE)"""


        self.constraints["01_idx_wmbs_fileset_files"] = \
          """CREATE INDEX wmbs_fileset_files_idx_fileset ON wmbs_fileset_files(fileset) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_fileset_files"] = \
          """CREATE INDEX wmbs_fileset_files_idx_fileid ON wmbs_fileset_files(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_runlumi_map"] = \
          """CREATE INDEX wmbs_file_runlumi_map_fileid ON wmbs_file_runlumi_map(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_location"] = \
          """CREATE INDEX wmbs_file_location_fileid ON wmbs_file_location(fileid) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_file_location"] = \
          """CREATE INDEX wmbs_file_location_location ON wmbs_file_location(location) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_parent"] = \
          """CREATE INDEX wmbs_file_parent_parent ON wmbs_file_parent(parent) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_file_parent"] = \
          """CREATE INDEX wmbs_file_parent_child ON wmbs_file_parent(child) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_workflow_output"] = \
          """CREATE INDEX idx_wmbs_workf_out_workflow ON wmbs_workflow_output(workflow_id) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_workflow_output"] = \
          """CREATE INDEX idx_wmbs_workf_out_fileset ON wmbs_workflow_output(output_fileset) %s""" % tablespaceIndex

        self.constraints["03_idx_wmbs_workflow_output"] = \
          """CREATE INDEX idx_wmbs_workf_mout_fileset ON wmbs_workflow_output(merged_output_fileset) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_subscription"] = \
          """CREATE INDEX idx_wmbs_subscription_fileset ON wmbs_subscription(fileset) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_subscription"] = \
          """CREATE INDEX idx_wmbs_subscription_subtype ON wmbs_subscription(subtype) %s""" % tablespaceIndex

        self.constraints["03_idx_wmbs_subscription"] = \
          """CREATE INDEX idx_wmbs_subscription_workflow ON wmbs_subscription(workflow) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_acquired"] = \
          """CREATE INDEX idx_wmbs_sub_files_acq_sub ON wmbs_sub_files_acquired(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_acquired"] = \
          """CREATE INDEX idx_wmbs_sub_files_acq_file ON wmbs_sub_files_acquired(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_available"] = \
          """CREATE INDEX idx_wmbs_sub_files_ava_sub ON wmbs_sub_files_available(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_available"] = \
          """CREATE INDEX idx_wmbs_sub_files_ava_file ON wmbs_sub_files_available(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_failed"] = \
          """CREATE INDEX idx_wmbs_sub_files_fail_sub ON wmbs_sub_files_failed(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_failed"] = \
          """CREATE INDEX idx_wmbs_sub_files_fail_file ON wmbs_sub_files_failed(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_complete"] = \
          """CREATE INDEX idx_wmbs_sub_files_comp_sub ON wmbs_sub_files_complete(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_complete"] = \
          """CREATE INDEX idx_wmbs_sub_files_comp_file ON wmbs_sub_files_complete(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_jobgroup"] = \
          """CREATE INDEX idx_wmbs_jobgroup_sub ON wmbs_jobgroup(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_jobgroup"] = \
          """CREATE INDEX idx_wmbs_jobgroup_out ON wmbs_jobgroup(output) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_job"] = \
          """CREATE INDEX idx_wmbs_job_jobgroup ON wmbs_job(jobgroup) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_job"] = \
          """CREATE INDEX idx_wmbs_job_loc ON wmbs_job(location) %s""" % tablespaceIndex

        self.constraints["03_idx_wmbs_job"] = \
          """CREATE INDEX idx_wmbs_job_state ON wmbs_job(state) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_job_assoc"] = \
          """CREATE INDEX idx_wmbs_job_assoc_job ON wmbs_job_assoc(job) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_job_assoc"] = \
          """CREATE INDEX idx_wmbs_job_assoc_file ON wmbs_job_assoc(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_job_mask"] = \
          """CREATE INDEX idx_wmbs_job_mask_job ON wmbs_job_mask(job) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_checksums"] = \
          """CREATE INDEX idx_wmbs_file_checksums_type ON wmbs_file_checksums(typeid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_checksums"] = \
          """CREATE INDEX idx_wmbs_file_checksums_file ON wmbs_file_checksums(fileid) %s""" % tablespaceIndex

        # The transitions class holds all states and allowed transitions, use
        # that to populate the wmbs_job_state table
        for jobState in Transitions().states():
            jobStateQuery = "INSERT INTO wmbs_job_state (name) VALUES ('%s')" % \
                (jobState)
            self.inserts["job_state_%s" % jobState] = jobStateQuery

        self.subTypes = [("Processing", 0), ("Merge", 5), ("Harvesting", 3), ("Cleanup", 5),
                         ("LogCollect", 3), ("Skim", 3), ("Analysis", 0), ("Production", 0),
                         ("MultiProcessing", 0), ("MultiProduction", 0)]
        for pair in self.subTypes:
            subTypeQuery = """INSERT INTO wmbs_sub_types (name, priority)
                                VALUES ('%s', %d)""" % (pair[0], pair[1])
            self.inserts["wmbs_sub_types_%s" % pair[0]] = subTypeQuery

        locationStates = ["Normal", "Down", "Draining", "Aborted"]

        for i in locationStates:
            locationStateQuery = """INSERT INTO wmbs_location_state (name)
                                    VALUES ('%s')""" % i
            self.inserts["wmbs_location_state_%s" % i] = locationStateQuery

        checksumTypes = ['cksum', 'adler32', 'md5']
        for i in checksumTypes:
            checksumTypeQuery = """INSERT INTO wmbs_checksum_type (type) VALUES ('%s')
            """ % (i)
            self.inserts["wmbs_checksum_type_%s" % (i)] = checksumTypeQuery

        return
Пример #58
0
    def __init__(self, logger=None, dbi=None, params=None):
        """
        _init_

        Call the DBCreator constructor and create the list of required tables.
        """
        myThread = threading.currentThread()

        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        tablespaceIndex = ""
        if params:
            if "tablespace_index" in params:
                tablespaceIndex = "USING INDEX TABLESPACE %s" % params[
                    "tablespace_index"]

        DBCreator.__init__(self, logger, dbi)

        self.requiredTables = [
            "01wmbs_fileset",
            "02wmbs_file_details",
            "03wmbs_fileset_files",
            "04wmbs_file_parent",
            "05wmbs_file_runlumi_map",
            "05wmbs_location_state",
            "06wmbs_location",
            "07wmbs_file_location",
            "07wmbs_users",
            "07wmbs_workflow",
            "08wmbs_sub_types",
            "08wmbs_workflow_output",
            "09wmbs_subscription",
            "10wmbs_subscription_validation",
            "10wmbs_sub_files_acquired",
            "10wmbs_sub_files_available",
            "11wmbs_sub_files_failed",
            "12wmbs_sub_files_complete",
            "13wmbs_jobgroup",
            "14wmbs_job_state",
            "15wmbs_job",
            "16wmbs_job_assoc",
            "17wmbs_job_mask",
            "18wmbs_checksum_type",
            "19wmbs_file_checksums",
            "20wmbs_location_pnns",
        ]

        self.create["01wmbs_fileset"] = \
          """CREATE TABLE wmbs_fileset (
             id          INTEGER      PRIMARY KEY AUTO_INCREMENT,
             name        VARCHAR(700) NOT NULL,
             open        INT(1)       NOT NULL DEFAULT 0,
             last_update INTEGER      NOT NULL,
             UNIQUE (name))"""

        self.create["02wmbs_file_details"] = \
          """CREATE TABLE wmbs_file_details (
             id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
             lfn          VARCHAR(700) NOT NULL,
             filesize     BIGINT,
             events       INTEGER,
             first_event  BIGINT       NOT NULL DEFAULT 0,
             merged       INT(1)       NOT NULL DEFAULT 0,
             UNIQUE (lfn))"""

        self.create["03wmbs_fileset_files"] = \
          """CREATE TABLE wmbs_fileset_files (
             fileid      INTEGER   NOT NULL,
             fileset     INTEGER   NOT NULL,
             insert_time INTEGER   NOT NULL,
             UNIQUE (fileid, fileset),
             FOREIGN KEY(fileset) references wmbs_fileset(id))"""

        self.create["04wmbs_file_parent"] = \
          """CREATE TABLE wmbs_file_parent (
             child  INTEGER NOT NULL,
             parent INTEGER NOT NULL,
             FOREIGN KEY (child)  references wmbs_file_details(id)
               ON DELETE CASCADE,
             FOREIGN KEY (parent) references wmbs_file_details(id),
             UNIQUE(child, parent))"""

        self.create["05wmbs_file_runlumi_map"] = \
          """CREATE TABLE wmbs_file_runlumi_map (
             fileid  INTEGER NOT NULL,
             run     INTEGER NOT NULL,
             lumi    INTEGER NOT NULL,
             FOREIGN KEY (fileid) references wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["05wmbs_location_state"] = \
            """CREATE TABLE wmbs_location_state (
               id   INTEGER PRIMARY KEY AUTO_INCREMENT,
               name VARCHAR(100) NOT NULL)"""

        self.create["06wmbs_location"] = \
          """CREATE TABLE wmbs_location (
             id          INTEGER      PRIMARY KEY AUTO_INCREMENT,
             site_name   VARCHAR(255) NOT NULL,
             state       INTEGER NOT NULL,
             cms_name    VARCHAR(255),
             ce_name     VARCHAR(255),
             running_slots   INTEGER,
             pending_slots   INTEGER,
             plugin      VARCHAR(255),
             UNIQUE(site_name),
             FOREIGN KEY (state) REFERENCES wmbs_location_state(id))"""

        self.create["07wmbs_users"] = \
          """CREATE TABLE wmbs_users (
             id        INTEGER      PRIMARY KEY AUTO_INCREMENT,
             cert_dn   VARCHAR(255) NOT NULL,
             name_hn   VARCHAR(255),
             owner     VARCHAR(255),
             grp       VARCHAR(255),
             group_name     VARCHAR(255),
             role_name      VARCHAR(255),
             UNIQUE(cert_dn, group_name, role_name))"""

        self.create["07wmbs_file_location"] = \
          """CREATE TABLE wmbs_file_location (
             fileid   INTEGER NOT NULL,
             location INTEGER NOT NULL,
             UNIQUE(fileid, location),
             FOREIGN KEY(fileid)   REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE,
             FOREIGN KEY(location) REFERENCES wmbs_location(id)
               ON DELETE CASCADE)"""

        self.create["07wmbs_workflow"] = \
          """CREATE TABLE wmbs_workflow (
             id           INTEGER          PRIMARY KEY AUTO_INCREMENT,
             spec         VARCHAR(700)     NOT NULL,
             name         VARCHAR(700)     NOT NULL,
             task         VARCHAR(700)     NOT NULL,
             type         VARCHAR(255),
             owner        INTEGER          NOT NULL,
             alt_fs_close INT(1)           NOT NULL,
             injected     INT(1)           DEFAULT 0,
             priority     INTEGER UNSIGNED DEFAULT 0,
             FOREIGN KEY (owner)
             REFERENCES wmbs_users(id) ON DELETE CASCADE) """


        self.indexes["03_pk_wmbs_workflow"] = \
          """ALTER TABLE wmbs_workflow ADD
               (CONSTRAINT wmbs_workflow_unique UNIQUE (name, spec, task))"""


        self.create["08wmbs_workflow_output"] = \
          """CREATE TABLE wmbs_workflow_output (
             workflow_id           INTEGER NOT NULL,
             output_identifier     VARCHAR(255) NOT NULL,
             output_fileset        INTEGER NOT NULL,
             merged_output_fileset INTEGER,
             FOREIGN KEY(workflow_id)  REFERENCES wmbs_workflow(id)
               ON DELETE CASCADE,
             FOREIGN KEY(output_fileset)  REFERENCES wmbs_fileset(id)
               ON DELETE CASCADE,
             FOREIGN KEY(merged_output_fileset)  REFERENCES wmbs_fileset(id)
               ON DELETE CASCADE)
             """

        self.create["08wmbs_sub_types"] = \
          """CREATE TABLE wmbs_sub_types (
               id   INTEGER      PRIMARY KEY AUTO_INCREMENT,
               name VARCHAR(255) NOT NULL,
               priority INTEGER DEFAULT 0,
               UNIQUE(name))"""

        self.create["09wmbs_subscription"] = \
          """CREATE TABLE wmbs_subscription (
             id          INTEGER      PRIMARY KEY AUTO_INCREMENT,
             fileset     INTEGER      NOT NULL,
             workflow    INTEGER      NOT NULL,
             split_algo  VARCHAR(255) NOT NULL,
             subtype     INTEGER      NOT NULL,
             last_update INTEGER      NOT NULL,
             finished    INT(1)       DEFAULT 0,
             FOREIGN KEY(fileset)
             REFERENCES wmbs_fileset(id) ON DELETE CASCADE,
             FOREIGN KEY(workflow)
             REFERENCES wmbs_workflow(id) ON DELETE CASCADE,
             FOREIGN KEY(subtype)
             REFERENCES wmbs_sub_types(id) ON DELETE CASCADE)"""

        self.create["10wmbs_subscription_validation"] = \
          """CREATE TABLE wmbs_subscription_validation (
             subscription_id INTEGER NOT NULL,
             location_id     INTEGER NOT NULL,
             valid           INTEGER,
             UNIQUE (subscription_id, location_id),
             FOREIGN KEY(subscription_id) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY(location_id) REFERENCES wmbs_location(id)
               ON DELETE CASCADE)"""

        self.create["10wmbs_sub_files_acquired"] = \
          """CREATE TABLE wmbs_sub_files_acquired (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)
             """

        self.create["10wmbs_sub_files_available"] = \
          """CREATE TABLE wmbs_sub_files_available (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)
             """

        self.create["11wmbs_sub_files_failed"] = \
          """CREATE TABLE wmbs_sub_files_failed (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["12wmbs_sub_files_complete"] = \
          """CREATE TABLE wmbs_sub_files_complete (
             subscription INTEGER NOT NULL,
             fileid       INTEGER NOT NULL,
             PRIMARY KEY (subscription, fileid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid)       REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["13wmbs_jobgroup"] = \
          """CREATE TABLE wmbs_jobgroup (
             id           INTEGER      PRIMARY KEY AUTO_INCREMENT,
             subscription INTEGER      NOT NULL,
             guid         VARCHAR(255),
             output       INTEGER,
             last_update  INTEGER      NOT NULL,
             location     INTEGER,
             UNIQUE(guid),
             FOREIGN KEY (subscription) REFERENCES wmbs_subscription(id)
               ON DELETE CASCADE,
             FOREIGN KEY (output) REFERENCES wmbs_fileset(id)
                    ON DELETE CASCADE)"""

        self.create["14wmbs_job_state"] = \
          """CREATE TABLE wmbs_job_state (
             id        INTEGER       PRIMARY KEY AUTO_INCREMENT,
             name      VARCHAR(100))"""

        self.create["15wmbs_job"] = \
          """CREATE TABLE wmbs_job (
             id           INTEGER       PRIMARY KEY AUTO_INCREMENT,
             jobgroup     INTEGER       NOT NULL,
             name         VARCHAR(255),
             state        INTEGER       NOT NULL,
             state_time   INTEGER       NOT NULL,
             retry_count  INTEGER       DEFAULT 0,
             couch_record VARCHAR(255),
             location     INTEGER,
             outcome      INTEGER       DEFAULT 0,
             cache_dir    VARCHAR(767)  DEFAULT 'None',
             fwjr_path    VARCHAR(767),
             FOREIGN KEY (jobgroup)
             REFERENCES wmbs_jobgroup(id) ON DELETE CASCADE,
             FOREIGN KEY (state) REFERENCES wmbs_job_state(id),
             FOREIGN KEY (location) REFERENCES wmbs_location(id))"""

        self.indexes["03_pk_wmbs_job"] = \
          """ALTER TABLE wmbs_job ADD
               (CONSTRAINT wmbs_job_unique UNIQUE (name, cache_dir, fwjr_path))"""


        self.create["16wmbs_job_assoc"] = \
          """CREATE TABLE wmbs_job_assoc (
             job    INTEGER NOT NULL,
             fileid INTEGER NOT NULL,
             FOREIGN KEY (job)  REFERENCES wmbs_job(id)
               ON DELETE CASCADE,
             FOREIGN KEY (fileid) REFERENCES wmbs_file_details(id)
               ON DELETE CASCADE)"""

        self.create["17wmbs_job_mask"] = \
          """CREATE TABLE wmbs_job_mask (
              job           INTEGER     NOT NULL,
              FirstEvent    BIGINT,
              LastEvent     BIGINT,
              FirstLumi     INTEGER,
              LastLumi      INTEGER,
              FirstRun      INTEGER,
              LastRun       INTEGER,
              inclusivemask BOOLEAN DEFAULT TRUE,
              FOREIGN KEY (job)       REFERENCES wmbs_job(id)
                ON DELETE CASCADE)"""

        self.create["18wmbs_checksum_type"] = \
          """CREATE TABLE wmbs_checksum_type (
              id            INTEGER      PRIMARY KEY AUTO_INCREMENT,
              type          VARCHAR(255) )
              """


        self.create["19wmbs_file_checksums"] = \
          """CREATE TABLE wmbs_file_checksums (
              fileid        INTEGER,
              typeid        INTEGER,
              cksum         VARCHAR(100),
              UNIQUE (fileid, typeid),
              FOREIGN KEY (typeid) REFERENCES wmbs_checksum_type(id)
                ON DELETE CASCADE,
              FOREIGN KEY (fileid) REFERENCES wmbs_file_details(id)
                ON DELETE CASCADE)"""

        self.create["20wmbs_location_pnns"] = \
          """CREATE TABLE wmbs_location_pnns (
               location   INTEGER,
               pnn    VARCHAR(255),
               UNIQUE(location, pnn),
               FOREIGN KEY (location) REFERENCES wmbs_location(id)
                 ON DELETE CASCADE)"""


        self.constraints["01_idx_wmbs_fileset_files"] = \
          """CREATE INDEX wmbs_fileset_files_idx_fileset ON wmbs_fileset_files(fileset) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_fileset_files"] = \
          """CREATE INDEX wmbs_fileset_files_idx_fileid ON wmbs_fileset_files(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_runlumi_map"] = \
          """CREATE INDEX wmbs_file_runlumi_map_fileid ON wmbs_file_runlumi_map(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_location"] = \
          """CREATE INDEX wmbs_file_location_fileid ON wmbs_file_location(fileid) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_file_location"] = \
          """CREATE INDEX wmbs_file_location_location ON wmbs_file_location(location) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_parent"] = \
          """CREATE INDEX wmbs_file_parent_parent ON wmbs_file_parent(parent) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_file_parent"] = \
          """CREATE INDEX wmbs_file_parent_child ON wmbs_file_parent(child) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_workflow_output"] = \
          """CREATE INDEX idx_wmbs_workf_out_workflow ON wmbs_workflow_output(workflow_id) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_workflow_output"] = \
          """CREATE INDEX idx_wmbs_workf_out_fileset ON wmbs_workflow_output(output_fileset) %s""" % tablespaceIndex

        self.constraints["03_idx_wmbs_workflow_output"] = \
          """CREATE INDEX idx_wmbs_workf_mout_fileset ON wmbs_workflow_output(merged_output_fileset) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_subscription"] = \
          """CREATE INDEX idx_wmbs_subscription_fileset ON wmbs_subscription(fileset) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_subscription"] = \
          """CREATE INDEX idx_wmbs_subscription_subtype ON wmbs_subscription(subtype) %s""" % tablespaceIndex

        self.constraints["03_idx_wmbs_subscription"] = \
          """CREATE INDEX idx_wmbs_subscription_workflow ON wmbs_subscription(workflow) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_acquired"] = \
          """CREATE INDEX idx_wmbs_sub_files_acq_sub ON wmbs_sub_files_acquired(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_acquired"] = \
          """CREATE INDEX idx_wmbs_sub_files_acq_file ON wmbs_sub_files_acquired(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_available"] = \
          """CREATE INDEX idx_wmbs_sub_files_ava_sub ON wmbs_sub_files_available(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_available"] = \
          """CREATE INDEX idx_wmbs_sub_files_ava_file ON wmbs_sub_files_available(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_failed"] = \
          """CREATE INDEX idx_wmbs_sub_files_fail_sub ON wmbs_sub_files_failed(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_failed"] = \
          """CREATE INDEX idx_wmbs_sub_files_fail_file ON wmbs_sub_files_failed(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_files_complete"] = \
          """CREATE INDEX idx_wmbs_sub_files_comp_sub ON wmbs_sub_files_complete(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_files_complete"] = \
          """CREATE INDEX idx_wmbs_sub_files_comp_file ON wmbs_sub_files_complete(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_sub_jobgroup"] = \
          """CREATE INDEX idx_wmbs_jobgroup_sub ON wmbs_jobgroup(subscription) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_sub_jobgroup"] = \
          """CREATE INDEX idx_wmbs_jobgroup_out ON wmbs_jobgroup(output) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_job"] = \
          """CREATE INDEX idx_wmbs_job_jobgroup ON wmbs_job(jobgroup) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_job"] = \
          """CREATE INDEX idx_wmbs_job_loc ON wmbs_job(location) %s""" % tablespaceIndex

        self.constraints["03_idx_wmbs_job"] = \
          """CREATE INDEX idx_wmbs_job_state ON wmbs_job(state) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_job_assoc"] = \
          """CREATE INDEX idx_wmbs_job_assoc_job ON wmbs_job_assoc(job) %s""" % tablespaceIndex

        self.constraints["02_idx_wmbs_job_assoc"] = \
          """CREATE INDEX idx_wmbs_job_assoc_file ON wmbs_job_assoc(fileid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_job_mask"] = \
          """CREATE INDEX idx_wmbs_job_mask_job ON wmbs_job_mask(job) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_checksums"] = \
          """CREATE INDEX idx_wmbs_file_checksums_type ON wmbs_file_checksums(typeid) %s""" % tablespaceIndex

        self.constraints["01_idx_wmbs_file_checksums"] = \
          """CREATE INDEX idx_wmbs_file_checksums_file ON wmbs_file_checksums(fileid) %s""" % tablespaceIndex

        # The transitions class holds all states and allowed transitions, use
        # that to populate the wmbs_job_state table
        for jobState in Transitions().states():
            jobStateQuery = "INSERT INTO wmbs_job_state (name) VALUES ('%s')" % \
                (jobState)
            self.inserts["job_state_%s" % jobState] = jobStateQuery

        self.subTypes = [("Processing", 0), ("Merge", 5), ("Harvesting", 3),
                         ("Cleanup", 5), ("LogCollect", 3), ("Skim", 3),
                         ("Production", 0)]
        for pair in self.subTypes:
            subTypeQuery = """INSERT INTO wmbs_sub_types (name, priority)
                                VALUES ('%s', %d)""" % (pair[0], pair[1])
            self.inserts["wmbs_sub_types_%s" % pair[0]] = subTypeQuery

        locationStates = ["Normal", "Down", "Draining", "Aborted"]

        for i in locationStates:
            locationStateQuery = """INSERT INTO wmbs_location_state (name)
                                    VALUES ('%s')""" % i
            self.inserts["wmbs_location_state_%s" % i] = locationStateQuery

        checksumTypes = ['cksum', 'adler32', 'md5']
        for i in checksumTypes:
            checksumTypeQuery = """INSERT INTO wmbs_checksum_type (type) VALUES ('%s')
            """ % (i)
            self.inserts["wmbs_checksum_type_%s" % (i)] = checksumTypeQuery

        return
Пример #59
0
    def __init__(self, logger = None, dbi = None, params = None):
        """
        _init_

        Call the DBCreator constructor and initialize the schema

        """
        myThread = threading.currentThread()
        if logger == None:
            logger = myThread.logger
        if dbi == None:
            dbi = myThread.dbi

        DBCreator.__init__(self, logger, dbi)

        #
        # Tables, functions, procedures and sequences
        #
        self.create[len(self.create)] = \
            """CREATE TABLE t0_config (
                 run_id   int           not null,
                 config   varchar2(255) not null,
                 primary key(run_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE run_status (
                 id     int          not null,
                 name   varchar2(25) not null,
                 primary key(id),
                 constraint run_sta_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE processing_style (
                 id     int          not null,
                 name   varchar2(25) not null,
                 primary key(id),
                 constraint pro_sty_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE event_scenario (
                 id     int          not null,
                 name   varchar2(25) not null,
                 primary key(id),
                 constraint eve_sce_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE data_tier (
                 id     int          not null,
                 name   varchar2(25) not null,
                 primary key(id),
                 constraint dat_tie_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE cmssw_version (
                 id     int           not null,
                 name   varchar2(255) not null,
                 primary key(id),
                 constraint cms_ver_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE stream (
                 id     int           not null,
                 name   varchar2(255) not null,
                 primary key(id),
                 constraint str_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE trigger_label (
                 id     int           not null,
                 name   varchar2(255) not null,
                 primary key(id),
                 constraint tri_lab_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE primary_dataset (
                 id     int           not null,
                 name   varchar2(255) not null,
                 primary key(id),
                 constraint pri_dat_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE storage_node (
                 id     int           not null,
                 name   varchar2(255) not null,
                 primary key(id),
                 constraint sto_nod_name_uq unique(name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE run (
                 run_id             int           not null,
                 status             int           default 1 not null,
                 last_updated       int           not null,
                 express_released   int           default 0 not null,
                 hltkey             varchar2(255) not null,
                 start_time         int           not null,
                 end_time           int           default 0 not null,
                 close_time         int           default 0 not null,
                 lumicount          int           default 0 not null,
                 process            varchar2(255),
                 acq_era            varchar2(255),
                 lfn_prefix         varchar2(255),
                 bulk_data_type     varchar2(255),
                 bulk_data_loc      varchar2(255),
                 dqmuploadurl       varchar2(255),
                 ah_timeout         int,
                 ah_dir             varchar2(255),
                 cond_timeout       int,
                 db_host            varchar2(255),
                 valid_mode         int,
                 primary key(run_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE run_trig_primds_assoc (
                 run_id      int not null,
                 primds_id   int not null,
                 trig_id     int not null,
                 primary key(run_id, primds_id, trig_id)
               ) ORGANIZATION INDEX COMPRESS 2"""

        self.create[len(self.create)] = \
            """CREATE TABLE run_primds_stream_assoc (
                 run_id      int not null,
                 primds_id   int not null,
                 stream_id   int not null,
                 primary key(run_id, primds_id)
               )"""

        self.create[len(self.create)] = \
            """CREATE TABLE run_primds_scenario_assoc (
                 run_id        int not null,
                 primds_id     int not null,
                 scenario_id   int not null,
                 primary key(run_id, primds_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE run_stream_style_assoc (
                 run_id      int not null,
                 stream_id   int not null,
                 style_id    int not null,
                 primary key(run_id, stream_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE run_stream_cmssw_assoc (
                 run_id             int not null,
                 stream_id          int not null,
                 online_version     int not null,
                 override_version   int not null,
                 primary key(run_id, stream_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE run_stream_fileset_assoc (
                 run_id      int not null,
                 stream_id   int not null,
                 fileset     int not null,
                 primary key(run_id, stream_id),
                 constraint run_str_fil_ass_fil_uq unique(fileset)
                   using index
                     (create unique index idx_run_stream_fileset_assoc_1 on run_stream_fileset_assoc (fileset))
               )"""

        self.create[len(self.create)] = \
            """CREATE TABLE reco_release_config (
                 run_id         int not null,
                 primds_id      int not null,
                 fileset        int not null,
                 delay          int not null,
                 delay_offset   int not null,
                 released       int default 0 not null,
                 primary key(run_id, primds_id),
                 constraint rec_rel_con_fil_uq unique(fileset)
                   using index
                     (create unique index idx_reco_release_config_1 on reco_release_config (fileset))
               )"""

        self.create[len(self.create)] = \
            """CREATE TABLE stream_special_primds_assoc (
                 stream_id   int not null,
                 primds_id   int not null,
                 primary key(stream_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE primds_error_primds_assoc (
                 parent_id   int not null,
                 error_id    int not null,
                 primary key(parent_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE lumi_section (
                 run_id    int not null,
                 lumi_id   int not null,
                 primary key(run_id, lumi_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE lumi_section_closed (
                 run_id      int   not null,
                 stream_id   int   not null,
                 lumi_id     int   not null,
                 filecount   int   not null,
                 insert_time int   not null,
                 close_time  int   default 0 not null,
                 primary key(run_id, stream_id, lumi_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE lumi_section_split_active (
                 subscription   int not null,
                 run_id         int not null,
                 lumi_id        int not null,
                 primary key(subscription, run_id, lumi_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE streamer (
                 id            int not null,
                 run_id        int not null,
                 stream_id     int not null,
                 lumi_id       int not null,
                 insert_time   int not null,
                 used          int default 0 not null,
                 deleted       int default 0 not null,
                 primary key(id)
               )"""

        self.create[len(self.create)] = \
            """CREATE TABLE repack_config (
                 run_id               int not null,
                 stream_id            int not null,
                 proc_version         int not null,
                 max_size_single_lumi int not null,
                 max_size_multi_lumi  int not null,
                 min_size             int not null,
                 max_size             int not null,
                 max_edm_size         int not null,
                 max_over_size        int not null,
                 max_events           int not null,
                 max_files            int not null,
                 primary key (run_id, stream_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE express_config (
                 run_id         int             not null,
                 stream_id      int             not null,
                 proc_version   int             not null,
                 write_tiers    varchar2(255)   not null,
                 global_tag     varchar2(255)   not null,
                 max_events     int             not null,
                 max_size       int             not null,
                 max_files      int             not null,
                 max_latency    int             not null,
                 alca_skim      varchar2(1000),
                 dqm_seq        varchar2(1000),
                 primary key (run_id, stream_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE prompt_calib (
                 run_id        int not null,
                 stream_id     int not null,
                 finished      int default 0 not null,
                 subscription  int,
                 primary key (run_id, stream_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE prompt_calib_file (
                 fileid      int not null,
                 run_id      int not null,
                 stream_id   int not null,
                 primary key (fileid, run_id, stream_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE reco_config (
                 run_id         int            not null,
                 primds_id      int            not null,
                 do_reco        int            not null,
                 cmssw_id       int            not null,
                 reco_split     int            not null,
                 write_reco     int            not null,
                 write_dqm      int            not null,
                 write_aod      int            not null,
                 proc_version   int            not null,
                 alca_skim      varchar2(1000),
                 dqm_seq        varchar2(1000),
                 global_tag     varchar2(255),
                 primary key (run_id, primds_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE phedex_config (
                 run_id         int          not null,
                 primds_id      int          not null,
                 node_id        int          not null,
                 custodial      int          not null,
                 request_only   char(1)      not null,
                 priority       varchar2(10) not null,
                 primary key (run_id, primds_id, node_id)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE promptskim_config (
                 run_id          int not null,
                 primds_id       int not null,
                 tier_id         int not null,
                 skim_name       varchar2(255) not null,
                 node_id         int not null,
                 cmssw_id        int not null,
                 two_file_read   int not null,
                 proc_version    int not null,
                 global_tag      varchar2(255),
                 config_url      varchar2(255),
                 primary key (run_id, primds_id, tier_id, skim_name)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE TABLE workflow_monitoring (
                 workflow   int not null,
                 tracked    int default 0 not null,
                 closeout   int default 0 not null,  
                 primary key (workflow)
               ) ORGANIZATION INDEX"""

        self.create[len(self.create)] = \
            """CREATE FUNCTION checkForZeroState (value IN int)
               RETURN int DETERMINISTIC IS
               BEGIN
                 IF value = 0 THEN
                   RETURN 0;
                 ELSE
                   RETURN NULL;
                 END IF;
               END checkForZeroState;
               """

        self.create[len(self.create)] = \
            """CREATE FUNCTION checkForZeroOneState (value IN int)
               RETURN int DETERMINISTIC IS
               BEGIN
                 IF value = 0 THEN
                   RETURN 0;
                 ELSIF value = 1 THEN
                   RETURN 1;
                 ELSE
                   RETURN NULL;
                 END IF;
               END checkForZeroOneState;
               """

        self.create[len(self.create)] = \
            """CREATE SEQUENCE cmssw_version_SEQ
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE
               CACHE 10
               """

        self.create[len(self.create)] = \
            """CREATE SEQUENCE stream_SEQ
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE
               CACHE 10
               """

        self.create[len(self.create)] = \
            """CREATE SEQUENCE trigger_label_SEQ
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE
               CACHE 100
               """

        self.create[len(self.create)] = \
            """CREATE SEQUENCE primary_dataset_SEQ
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE
               CACHE 100
               """

        self.create[len(self.create)] = \
            """CREATE SEQUENCE storage_node_SEQ
               START WITH 1
               INCREMENT BY 1
               NOMAXVALUE
               CACHE 100
               """

        #
        # Indexes
        #
        #
        # Usual rules is to put an index on all FK. I don't follow it
        # strictly here because some tables are only used rarely and
        # with well defined filter conditions. In these cases I might
        # just add an index on the condition I need.
        #
        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_run_1 ON run (checkForZeroState(express_released))"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_run_2 ON run (status)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_run_primds_stream_1 ON run_primds_stream_assoc (run_id, stream_id)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_reco_release_config_2 ON reco_release_config (checkForZeroState(released))"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_lumi_section_closed_1 ON lumi_section_closed (checkForZeroState(close_time))"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_streamer_1 ON streamer (run_id, stream_id, lumi_id)"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_streamer_2 ON streamer (checkForZeroState(used))"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_streamer_3 ON streamer (checkForZeroOneState(deleted))"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_prompt_calib_1 ON prompt_calib (checkForZeroState(finished))"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_workflow_monitoring_0 ON workflow_monitoring (checkForZeroState(tracked))"""

        self.indexes[len(self.indexes)] = \
            """CREATE INDEX idx_workflow_monitoring_1 ON workflow_monitoring (checkForZeroState(closeout))"""
        #
        # Constraints
        #
        self.constraints[len(self.constraints)] = \
            """ALTER TABLE t0_config
                 ADD CONSTRAINT t0_conf_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run
                 ADD CONSTRAINT run_sta_fk
                 FOREIGN KEY (status)
                 REFERENCES run_status(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_trig_primds_assoc
                 ADD CONSTRAINT run_tri_pri_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_trig_primds_assoc
                 ADD CONSTRAINT run_tri_pri_pri_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_trig_primds_assoc
                 ADD CONSTRAINT run_tri_pri_tri_id_fk
                 FOREIGN KEY (trig_id)
                 REFERENCES trigger_label(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_primds_stream_assoc
                 ADD CONSTRAINT run_pri_tri_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_primds_stream_assoc
                 ADD CONSTRAINT run_pri_tri_pri_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_primds_stream_assoc
                 ADD CONSTRAINT run_pri_tri_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_primds_scenario_assoc
                 ADD CONSTRAINT run_pri_sce_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_primds_scenario_assoc
                 ADD CONSTRAINT run_pri_sce_pri_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_primds_scenario_assoc
                 ADD CONSTRAINT run_pri_sce_sce_id_fk
                 FOREIGN KEY (scenario_id)
                 REFERENCES event_scenario(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_style_assoc
                 ADD CONSTRAINT run_str_sty_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_style_assoc
                 ADD CONSTRAINT run_str_sty_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_style_assoc
                 ADD CONSTRAINT run_str_sty_sty_id_fk
                 FOREIGN KEY (style_id)
                 REFERENCES processing_style(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_cmssw_assoc
                 ADD CONSTRAINT run_str_cms_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_cmssw_assoc
                 ADD CONSTRAINT run_str_cms_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_cmssw_assoc
                 ADD CONSTRAINT run_str_cms_onl_ver_fk
                 FOREIGN KEY (online_version)
                 REFERENCES cmssw_version(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_cmssw_assoc
                 ADD CONSTRAINT run_str_cms_ove_ver_fk
                 FOREIGN KEY (override_version)
                 REFERENCES cmssw_version(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_fileset_assoc
                 ADD CONSTRAINT run_str_fil_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_fileset_assoc
                 ADD CONSTRAINT run_str_fil_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE run_stream_fileset_assoc
                 ADD CONSTRAINT run_str_fil_fil_id_fk
                 FOREIGN KEY (fileset)
                 REFERENCES wmbs_fileset(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE reco_release_config
                 ADD CONSTRAINT rec_rel_con_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE reco_release_config
                 ADD CONSTRAINT rec_rel_con_pri_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE reco_release_config
                 ADD CONSTRAINT rec_rel_con_fil_id_fk
                 FOREIGN KEY (fileset)
                 REFERENCES wmbs_fileset(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE stream_special_primds_assoc
                 ADD CONSTRAINT str_spe_pri_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE stream_special_primds_assoc
                 ADD CONSTRAINT str_spe_pri_pri_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE primds_error_primds_assoc
                 ADD CONSTRAINT pri_err_pri_par_id_fk
                 FOREIGN KEY (parent_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE primds_error_primds_assoc
                 ADD CONSTRAINT pri_err_pri_err_id_fk
                 FOREIGN KEY (error_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE lumi_section
                 ADD CONSTRAINT lum_sec_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE lumi_section_closed
                 ADD CONSTRAINT lum_sec_clo_rl_id_fk
                 FOREIGN KEY (run_id, lumi_id)
                 REFERENCES lumi_section(run_id, lumi_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE lumi_section_closed
                 ADD CONSTRAINT lum_sec_clo_stre_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE lumi_section_split_active
                 ADD CONSTRAINT lum_sec_spli_act_rl_id_fk
                 FOREIGN KEY (run_id, lumi_id)
                 REFERENCES lumi_section(run_id, lumi_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE lumi_section_split_active
                 ADD CONSTRAINT lum_sec_spli_act_stre_id_fk
                 FOREIGN KEY (subscription)
                 REFERENCES wmbs_subscription(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE streamer
                 ADD CONSTRAINT str_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE streamer
                 ADD CONSTRAINT str_rl_id_fk
                 FOREIGN KEY (run_id, lumi_id)
                 REFERENCES lumi_section(run_id, lumi_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE streamer
                 ADD CONSTRAINT str_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE repack_config
                 ADD CONSTRAINT rep_con_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE repack_config
                 ADD CONSTRAINT rep_con_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE express_config
                 ADD CONSTRAINT exp_con_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE express_config
                 ADD CONSTRAINT exp_con_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE prompt_calib
                 ADD CONSTRAINT pro_cal_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE prompt_calib
                 ADD CONSTRAINT pro_cal_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE prompt_calib
                 ADD CONSTRAINT pro_cal_sub_fk
                 FOREIGN KEY (subscription)
                 REFERENCES wmbs_subscription(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE prompt_calib_file
                 ADD CONSTRAINT pro_cal_fil_fil_id_fk
                 FOREIGN KEY (fileid)
                 REFERENCES wmbs_file_details(id)
                 ON DELETE CASCADE"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE prompt_calib_file
                 ADD CONSTRAINT pro_cal_fil_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE prompt_calib_file
                 ADD CONSTRAINT pro_cal_fil_str_id_fk
                 FOREIGN KEY (stream_id)
                 REFERENCES stream(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE reco_config
                 ADD CONSTRAINT rec_con_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE reco_config
                 ADD CONSTRAINT rec_con_primds_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE reco_config
                 ADD CONSTRAINT rec_con_cms_id_fk
                 FOREIGN KEY (cmssw_id)
                 REFERENCES cmssw_version(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE phedex_config
                 ADD CONSTRAINT phe_con_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE phedex_config
                 ADD CONSTRAINT phe_con_primds_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE phedex_config
                 ADD CONSTRAINT phe_con_nod_id_fk
                 FOREIGN KEY (node_id)
                 REFERENCES storage_node(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE promptskim_config
                 ADD CONSTRAINT pro_con_run_id_fk
                 FOREIGN KEY (run_id)
                 REFERENCES run(run_id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE promptskim_config
                 ADD CONSTRAINT pro_con_primds_id_fk
                 FOREIGN KEY (primds_id)
                 REFERENCES primary_dataset(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE promptskim_config
                 ADD CONSTRAINT pro_con_tie_id_fk
                 FOREIGN KEY (tier_id)
                 REFERENCES data_tier(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE promptskim_config
                 ADD CONSTRAINT pro_con_nod_id_fk
                 FOREIGN KEY (node_id)
                 REFERENCES storage_node(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE promptskim_config
                 ADD CONSTRAINT pro_con_cms_id_fk
                 FOREIGN KEY (cmssw_id)
                 REFERENCES cmssw_version(id)"""

        self.constraints[len(self.constraints)] = \
            """ALTER TABLE workflow_monitoring
                 ADD CONSTRAINT wor_mon_wor_fk
                 FOREIGN KEY (workflow)
                 REFERENCES wmbs_workflow(id)
                 ON DELETE CASCADE"""

        subTypes = ["Express", "Repack"]
        for name in subTypes:
            sql = """INSERT INTO wmbs_sub_types
                     (ID, NAME)
                     SELECT wmbs_sub_types_SEQ.nextval, '%s'
                     FROM DUAL
                     WHERE NOT EXISTS (
                       SELECT id FROM wmbs_sub_types WHERE name = '%s'
                     )
                     """ % (name, name)
            self.inserts[len(self.inserts)] = sql

        runStates = { 1 : "Active",
                      2 : "CloseOutRepack",
                      3 : "CloseOutRepackMerge",
                      4 : "CloseOutPromptReco",
                      5 : "CloseOutRecoMerge",
                      6 : "CloseOutAlcaSkim",
                      7 : "CloseOutAlcaSkimMerge",
                      8 : "CloseOutExport",
                      9 : "CloseOutT1Skimming",
                      10 : "Complete" }
        for id, name in runStates.items():
            sql = """INSERT INTO run_status
                     (ID, NAME)
                     VALUES (%d, '%s')
                     """ % (id, name)
            self.inserts[len(self.inserts)] = sql

        processingStyles = { 1 : "Bulk",
                             2 : "Express",
                             3 : "Register",
                             4 : "Convert",
                             5 : "RegisterAndConvert",
                             6 : "Ignore" }
        for id, name in processingStyles.items():
            sql = """INSERT INTO processing_style
                     (ID, NAME)
                     VALUES (%d, '%s')
                     """ % (id, name)
            self.inserts[len(self.inserts)] = sql

        eventScenarios = { 1 : "pp",
                           2 : "cosmics",
                           3 : "hcalnzs",
                           4 : "HeavyIons",
                           5 : "AlCaTestEnable",
                           6 : "AlCaP0",
                           7 : "AlCaPhiSymEcal",
                           8 : "AlCaLumiPixels",
                           9 : "DataScouting" }
        for id, name in eventScenarios.items():
            sql = """INSERT INTO event_scenario
                     (ID, NAME)
                     VALUES (%d, '%s')
                     """ % (id, name)
            self.inserts[len(self.inserts)] = sql

        dataTiers = { 1 : "RAW",
                      2 : "RECO",
                      3 : "FEVT",
                      4 : "FEVTHLTALL",
                      5 : "AOD",
                      6 : "ALCARECO",
                      7 : "DQM",
                      8 : "ALCAPROMPT" }
        for id, name in dataTiers.items():
            sql = """INSERT INTO data_tier
                     (ID, NAME)
                     VALUES (%d, '%s')
                     """ % (id, name)
            self.inserts[len(self.inserts)] = sql

        return