示例#1
0
    def checkExtentDir(self):
        taxonExtentDir = self.extentDir.get()
        if os.path.isdir(taxonExtentDir):
            dbConn = getDbConnection(
                optparse.Values(self.dbPane.getDbOptions()))

            try:
                for subdir in [x[0] for x in os.walk(taxonExtentDir)]:
                    if subdir != taxonExtentDir:
                        for shpFile in [
                                f for f in os.listdir(subdir)
                                if f.endswith('.shp')
                        ]:
                            match = self.shapeFilePattern.match(shpFile)
                            if match:
                                taxonKey = int(match.groups()[0])
                                taxons = dbConn.execute(
                                    "SELECT scientific_name AS taxon_name " +
                                    "  FROM master.taxon WHERE taxon_key = %s"
                                    % taxonKey)
                                for taxon in taxons:
                                    foundInDirTaxonName = path.basename(subdir)
                                    if taxon.taxon_name != foundInDirTaxonName.replace(
                                            "_", " "):
                                        print(
                                            "Taxon %s's name %s does not match with subdir %s"
                                            % (taxonKey, taxon.taxon_name,
                                               foundInDirTaxonName))
            finally:
                if dbConn:
                    dbConn.close()
示例#2
0
    def processYearPartition(self, year):
        opts = self.dbPane.getDbOptions()

        try:
            dbSession = getDbConnection(optparse.Values(opts)).getSession()
            dbSession.execute(
                "TRUNCATE TABLE allocation.allocation_data_partition_udi")
            dbSession.execute(
                "SELECT allocation.populate_allocation_data_partition_udi(%s)"
                % year)
            dbSession.execute(
                "VACUUM ANALYZE allocation.allocation_data_partition_udi")

            opts['sqlfile'] = None
            opts[
                'sqlcmd'] = "SELECT allocation.generate_insert_cell_catch_partition_statements(%s)" % year
            opts['threads'] = 16
            sp.process(optparse.Values(opts))

            # Post insertions operation to finalize the target cell catch partition for immediate use
            cellCatchPartition = "cell_catch_p%s" % year
            dbSession.execute("VACUUM ANALYZE web_partition.%s" %
                              cellCatchPartition)
            for indexSql in dbSession.execute(
                    "SELECT web_partition.maintain_cell_catch_indexes('%s') AS cmd"
                    % cellCatchPartition).fetchall():
                dbSession.execute(indexSql.cmd)
        finally:
            dbSession.close()
示例#3
0
    def processExtentDir(self):
        taxonExtentDir = self.extentDir.get()
        if os.path.isdir(taxonExtentDir):
            opts = self.dbPane.getDbOptions()
            rawConn = getDbConnection(
                optparse.Values(opts)).getSession().connection().connection
            cursor = rawConn.cursor()

            try:
                for subdir in [x[0] for x in os.walk(taxonExtentDir)]:
                    if subdir != taxonExtentDir:
                        for shpFile in [
                                f for f in os.listdir(subdir)
                                if f.endswith('.shp')
                        ]:
                            match = self.shapeFilePattern.match(shpFile)
                            if match:
                                self.processExtentShapeFile(
                                    cursor, path.join(subdir, shpFile),
                                    match.groups())
                                rawConn.commit()

                print("Vacuuming distribution.taxon_extent afterward...")
                cursor.execute("vacuum analyze distribution.taxon_extent")
                rawConn.commit()
                cursor.close()
            finally:
                if rawConn:
                    rawConn.close()
示例#4
0
    def postAggregationOperations(self, summaryTable):
        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))

        print("Updating allocation data unit price...")
        if 'threads' not in opts or opts['threads'] == 0:
            opts['threads'] = 8
        opts['sqlfile'] = "sql/update_allocation_data_unit_price.sql"
        sp.process(optparse.Values(opts))
        dbConn.execute(
            "UPDATE allocation.allocation_data SET unit_price = %s WHERE unit_price IS NULL"
            % SummarizeCommandPane.GLOBAL_AVERAGE_UNIT_PRICE)
        dbConn.execute("VACUUM ANALYZE allocation.allocation_data")

        print("Vacuum and analyze target summary table(s)...")
        if summaryTable:
            dbConn.execute("VACUUM ANALYZE allocation.%s" % summaryTable)
        else:
            # if input summaryTable = None, it's really the signal to vacuum analyze all summary tables
            for tab in SummarizeCommandPane.SUMMARY_TABLES:
                if tab:
                    dbConn.execute("VACUUM ANALYZE allocation.%s" % tab)

        print("Summarization process completed...")

        dbConn.close()
    def setupCommandPane(self):
        if not self.mainDbPane.isConnectionTestedSuccessfully():
            popup_message("Connection not yet tested",
                          "The Main DB Connection has not been tested successfully.\n" +
                          "Once the Main DB Connection has been tested successfully, you can click that button again.")
            return

        if self.sourceDbPane.db_type.get() != 'sqlserver':
            popup_message("DB Connection not SQL Server",
                          "The Source DB Connection should be a SQL Server instance.\n" +
                          "Once the Source DB Connection has been re-configured, you can click that button again.")
            return

        for child in self.cmdFrame.winfo_children(): child.destroy()

        dbConn = getDbConnection(optparse.Values(self.mainDbPane.getDbOptions()))
        self.dbSession = dbConn.getSession()
        self.dataTransfer = self.dbSession.query(DataTransfer).filter_by(target_schema_name='allocation').order_by(DataTransfer.id).all()

        add_buttons(self.cmdFrame,
                    [[tab.target_table_name, partial(self.processTable, tab), "blue"] for tab in self.dataTransfer],
                    0, 0, "horizontal")

        add_buttons(self.cmdFrame,
                    [["Pull all allocation db tables", self.pullAllAllocationData, "red"],
                    ["Drop foreign keys", partial(drop_foreign_key, self.mainDbPane), "red"],
                    ["Restore foreign keys", partial(restore_foreign_key, self.mainDbPane), "red"]],
                    1, 0, "horizontal")

        grid_panel(self.cmdFrame)
示例#6
0
    def postAggregationOperations(self):
        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))
        if 'threads' not in opts or opts['threads'] == 0:
            opts['threads'] = 2

        print("Merging Unknown fishing entity in catch data...")
        dbConn.execute(
            "UPDATE web.v_fact_data SET fishing_entity_id = 213 WHERE fishing_entity_id = 223"
        )

        print("Vacuuming v_fact_data afterward...")
        dbConn.execute("vacuum analyze web.v_fact_data")

        # And now refresh all materialized views as most are dependent on data in the v_fact_data table
        opts[
            'sqlcmd'] = "SELECT 'refresh materialized view web.' || table_name FROM matview_v('web') WHERE table_name NOT LIKE 'TOTALS%'"
        sp.process(optparse.Values(opts))
        opts[
            'sqlcmd'] = "SELECT 'vacuum analyze web.' || table_name FROM matview_v('web') WHERE table_name NOT LIKE 'TOTALS%'"
        sp.process(optparse.Values(opts))

        print("Aggregation process completed...")

        dbConn.close()
示例#7
0
    def postAggregationOperations(self, summaryTable):
        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))
        # Not necessary to update price in the allocation data table because the price will be added in the aggregate step to account for end use type
  #       print("Updating allocation data unit price...")
  #       if 'threads' not in opts or opts['threads'] == 0:
  #           opts['threads'] = 8
  #      opts['sqlfile'] = "sql/update_allocation_data_unit_price.sql"

        # #Uncomment on January 2019
        # #opts['sqlfile'] = "sql/layer3gear_update.sql"
        # #print("Changing layer3gear to sau_gear")

        sp.process(optparse.Values(opts))
  #      # dbConn.execute("UPDATE allocation.allocation_data SET unit_price = %s WHERE unit_price IS NULL" % SummarizeCommandPane.GLOBAL_AVERAGE_UNIT_PRICE)
        dbConn.execute("VACUUM ANALYZE allocation.allocation_data")

        print("Vacuum and analyze target summary table(s)...")
        if summaryTable:
            dbConn.execute("VACUUM ANALYZE allocation.%s" % summaryTable)
        else:
            # if input summaryTable = None, it's really the signal to vacuum analyze all summary tables
            for tab in SummarizeCommandPane.SUMMARY_TABLES:
                if tab:
                    dbConn.execute("VACUUM ANALYZE allocation.%s" % tab)

        print("Summarization process completed...")

        dbConn.close()
示例#8
0
    def setupCommandPane(self):
        if not self.mainDbPane.isConnectionTestedSuccessfully():
            popup_message("Connection not yet tested",
                          "The Main DB Connection has not been tested successfully.\n" +
                          "Once the Main DB Connection has been tested successfully, you can click that button again.")
            return

        for child in self.cmdFrame.winfo_children(): child.destroy()

        sourceDbOpts = optparse.Values(self.sourceDbPane.getDbOptions())
        self.sourceDbSession = getDbConnection(sourceDbOpts).getSession()
        self.mainDbSession = getDbConnection(optparse.Values(self.mainDbPane.getDbOptions())).getSession()

        self.dataTransfer = self.mainDbSession.query(DataTransfer).filter(func.lower(DataTransfer.source_database_name)==func.lower(sourceDbOpts.dbname)) \
            .order_by(DataTransfer.id).all()

        button_data = []        
        column=0                       
        row=0
                  
        for tab in self.dataTransfer:
             button_data.append([tab.target_table_name, partial(self.processTable, tab), "blue"])
             column += 1 
        
             if column >= self.buttonsPerRow:
                 add_buttons(self.cmdFrame, button_data, row, 0, "horizontal")
                 button_data = []            
                 column = 0                    
                 row += 1
                                            
        if button_data != []:
            add_buttons(self.cmdFrame, button_data, row, 0, "horizontal")
            row += 1

        if not self.silentMode:
            row = add_buttons(self.cmdFrame,
                              [["Pull all integration db tables", self.pullAllIntegrationDbData, "red"],
                               ["Drop foreign keys", partial(drop_foreign_key, self.mainDbPane), "red"],
                               ["Restore foreign keys", partial(restore_foreign_key, self.mainDbPane), "red"]],
                              row, 0, "horizontal")

        grid_panel(self.cmdFrame)
示例#9
0
def getAirconModeId(stateValue):
  conn = db.getDbConnection()
  with conn:

    cur = conn.cursor()

    # Get Stateid for specified stateValue
    sql = "SELECT StateId FROM REFERENCE_AIRCON_MODE WHERE Value = :value"
    cur.execute(sql, { "value": stateValue })

    return cur.fetchone()[0]
示例#10
0
    def kickoffSqlProcessor(self, summaryTable, isPostOpsRequired=True):
        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))
        dbConn.execute("TRUNCATE allocation.%s" % summaryTable)
        opts['sqlfile'] = "sql/summarize_%s.sql" % summaryTable
        if 'threads' not in opts or opts['threads'] == 0:
            opts['threads'] = 8
        sp.process(optparse.Values(opts))

        if isPostOpsRequired:
            self.postAggregationOperations(summaryTable)
示例#11
0
    def kickoffSqlProcessor(self, summaryTable, isPostOpsRequired=True):
        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))
        dbConn.execute("TRUNCATE allocation.%s" % summaryTable)
        opts['sqlfile'] = "sql/summarize_%s.sql" % summaryTable
        if 'threads' not in opts or opts['threads'] == 0:
            opts['threads'] = 8
        sp.process(optparse.Values(opts))

        if isPostOpsRequired:
            self.postAggregationOperations(summaryTable)
示例#12
0
    def recreateIndexes(self):
        if len(self.index_create_cmds) == 0:
            popup_message("Prior index drop",
                          "A prior execution of index drop has not been detected. This invocation is aborted.")
            return

        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))

        for index_create_cmd in self.index_create_cmds:
            dbConn.execute(index_create_cmd)

        dbConn.close()
示例#13
0
def getReferenceCodeAndValue(table, typeName, makerName):
  conn = db.getDbConnection()
  with conn:

    cur = conn.cursor()

    # Get code and human readable value for specified code type and maker
    sql = "\
      SELECT Code, CodeValue\
      FROM " + table + "\
      WHERE TypeName = :type\
      AND MakerName = :maker"
    cur.execute(sql, { "type": typeName, "maker": makerName })
    return cur.fetchone()
示例#14
0
    def recreateIndexes(self):
        if len(self.index_create_cmds) == 0:
            popup_message(
                "Prior index drop",
                "A prior execution of index drop has not been detected. This invocation is aborted."
            )
            return

        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))

        for index_create_cmd in self.index_create_cmds:
            dbConn.execute(index_create_cmd)

        dbConn.close()
示例#15
0
    def run(self):
        dbconn = getDbConnection(self.options)
        proc_name = self.name

        while True:
            next_cmd = self.cmd_queue.get()
            if next_cmd is None:
                # Poison pill means we should exit
                print('%s: Exiting' % proc_name)
                break
            print('%s: %s' % (proc_name, next_cmd))
            dbconn.execute(next_cmd)
        return

        dbconn.close()
示例#16
0
    def run(self):
        dbconn = getDbConnection(self.options)
        proc_name = self.name

        while True:
            next_cmd = self.cmd_queue.get()
            if next_cmd is None:
                # Poison pill means we should exit
                print('%s: Exiting' % proc_name)
                break
            print('%s: %s' % (proc_name, next_cmd))
            dbconn.execute(next_cmd)
        return

        dbconn.close()
示例#17
0
    def process(self, entity_layer_id):
        if not self.dbPane.isConnectionTestedSuccessfully():
            popup_message("Connection not yet tested",
                          "The DB Connection has not been tested successfully.\n" +
                          "Once the DB Connection has been tested successfully, you can click the Process button again.")
            return

        dbOpts = self.dbPane.getDbOptions()
        dbSession = getDbConnection(optparse.Values(dbOpts)).getSession()

        dbSession.execute("SELECT * FROM web_cache.maintain_catch_csv_partition(%s)" % entity_layer_id)

        dbOpts['sqlfile'] = "sql/populate_catch_data_in_csv.sql"
        dbOpts['sqlcmd'] = "select format('vacuum analyze web_cache.%s', table_name) from schema_v('web_cache') where table_name not like 'TOTAL%'"
        dbOpts['threads'] = 4
        sp.process(optparse.Values(dbOpts))
示例#18
0
    def dropIndexes(self):
        if len(self.index_create_cmds) > 0:
            popup_message("Index drop previously executed",
                          "A prior execution of index drop has been detected. This invocation is aborted.")
            return

        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))
        indexes = dbConn.execute("SELECT (schemaname || '.' || indexname) index_name, indexdef index_create_cmd \
                                    FROM pg_indexes \
                                   WHERE tablename = 'v_fact_data' AND indexname NOT LIKE 'v_fact_data_pkey'")
        for index in indexes:
            self.index_create_cmds.append(index.index_create_cmd)
            dbConn.execute("DROP INDEX %s" % index.index_name)

        dbConn.close()
示例#19
0
    def setupCommandPane(self):
        if not self.mainDbPane.isConnectionTestedSuccessfully():
            popup_message(
                "Connection not yet tested",
                "The Main DB Connection has not been tested successfully.\n" +
                "Once the Main DB Connection has been tested successfully, you can click that button again."
            )
            return

        if self.sourceDbPane.db_type.get() != 'sqlserver':
            popup_message(
                "DB Connection not SQL Server",
                "The Source DB Connection should be a SQL Server instance.\n" +
                "Once the Source DB Connection has been re-configured, you can click that button again."
            )
            return

        for child in self.cmdFrame.winfo_children():
            child.destroy()

        dbConn = getDbConnection(
            optparse.Values(self.mainDbPane.getDbOptions()))
        self.dbSession = dbConn.getSession()
        self.dataTransfer = self.dbSession.query(DataTransfer).filter_by(
            target_schema_name='allocation').order_by(DataTransfer.id).all()

        add_buttons(
            self.cmdFrame,
            [[tab.target_table_name,
              partial(self.processTable, tab), "blue"]
             for tab in self.dataTransfer], 0, 0, "horizontal")

        add_buttons(self.cmdFrame,
                    [[
                        "Pull all allocation db tables",
                        self.pullAllAllocationData, "red"
                    ],
                     [
                         "Drop foreign keys",
                         partial(drop_foreign_key, self.mainDbPane), "red"
                     ],
                     [
                         "Restore foreign keys",
                         partial(restore_foreign_key, self.mainDbPane), "red"
                     ]], 1, 0, "horizontal")

        grid_panel(self.cmdFrame)
示例#20
0
def drop_foreign_key(dbPane):
    dbConn = getDbConnection(optparse.Values(dbPane.getDbOptions()))

    dbConn.execute("TRUNCATE TABLE admin.database_foreign_key")

    dbConn.execute(
        "WITH db(db_owner) AS (" + "SELECT u.usename " +
        "  FROM pg_database d" +
        "  JOIN pg_user u ON (u.usesysid = d.datdba)" +
        " WHERE d.datname = current_database()" + ")" +
        "INSERT INTO admin.database_foreign_key(drop_fk_cmd, add_fk_cmd) " +
        "SELECT f.* " +
        "  FROM get_foreign_key_cmd_by_db_owner((SELECT db_owner FROM db)) AS f"
        + " WHERE COALESCE(f.drop_fk_cmd, '') <> ''")

    dbConn.execute("SELECT exec(drop_fk_cmd) FROM admin.database_foreign_key")

    print("Foreign keys successfully dropped.")
示例#21
0
def getReference(table):
  conn = db.getDbConnection()
  with conn:

    cur = conn.cursor()

    # Get NEC code for lights
    sql = "SELECT * FROM " + table
    cur.execute(sql)

    rows = cur.fetchall()

    map = []
    for r in rows:
      row = { r[0]: r[1] }
      map.append(row)
   
    return json.dumps(map)
示例#22
0
def drop_foreign_key(dbPane):
    dbConn = getDbConnection(optparse.Values(dbPane.getDbOptions()))

    dbConn.execute("TRUNCATE TABLE admin.database_foreign_key")

    dbConn.execute("WITH db(db_owner) AS (" +
                   "SELECT u.usename " +
                   "  FROM pg_database d" +
                   "  JOIN pg_user u ON (u.usesysid = d.datdba)" +
                   " WHERE d.datname = current_database()" +
                   ")" +
                   "INSERT INTO admin.database_foreign_key(drop_fk_cmd, add_fk_cmd) " +
                   "SELECT f.* " +
                   "  FROM get_foreign_key_cmd_by_db_owner((SELECT db_owner FROM db)) AS f" +
                   " WHERE COALESCE(f.drop_fk_cmd, '') <> ''")

    dbConn.execute("SELECT exec(drop_fk_cmd) FROM admin.database_foreign_key")

    print("Foreign keys successfully dropped.")
示例#23
0
def getReferenceCode(table, state, makerName, hasInputMask=False):
  conn = db.getDbConnection()
  with conn:

    cur = conn.cursor()

    # Get NEC code for lights
    sql = "\
      SELECT Code\
      FROM " + table + "\
      WHERE StateId = :state\
      AND MakerName = :maker"
    cur.execute(sql, { "state": state, "maker": makerName })

    code = cur.fetchone()[0]
    if (hasInputMask):
      return code
    else:
      return code.decode("hex")
示例#24
0
    def simplifyExtent(self):
        taxonKey = int(self.taxonKeyToSimplify.get())

        print("Simplifying taxon: %s" % taxonKey)

        dbConn = getDbConnection(optparse.Values(self.dbPane.getDbOptions()))

        dbConn.execute((
            "WITH ext(taxon_key, geom) AS (" +
            "  SELECT e.taxon_key, (st_dump(geom)).geom" +
            "    FROM distribution.taxon_extent e" +
            "   WHERE e.taxon_key = %(tk)s" + ")" +
            "UPDATE distribution.taxon_extent e" +
            "   SET geom = (SELECT ST_MULTI(ST_Union(ST_Buffer(ST_SimplifyPreserveTopology(geom, 0.01), 0.25)))"
            + "                         FROM ext" +
            "                        GROUP BY ext.taxon_key)" +
            " WHERE e.taxon_key = %(tk)s") % {"tk": taxonKey})

        print("Taxon %s simplified. Please review resulting extent." %
              taxonKey)
示例#25
0
    def dropIndexes(self):
        if len(self.index_create_cmds) > 0:
            popup_message(
                "Index drop previously executed",
                "A prior execution of index drop has been detected. This invocation is aborted."
            )
            return

        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))
        indexes = dbConn.execute(
            "SELECT (schemaname || '.' || indexname) index_name, indexdef index_create_cmd \
                                    FROM pg_indexes \
                                   WHERE tablename = 'v_fact_data' AND indexname NOT LIKE 'v_fact_data_test_pkey'"
        )
        for index in indexes:
            self.index_create_cmds.append(index.index_create_cmd)
            dbConn.execute("DROP INDEX %s" % index.index_name)

        dbConn.close()
示例#26
0
    def postAggregationOperations(self):
        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))
        if 'threads' not in opts or opts['threads'] == 0:
            opts['threads'] = 8

        print("Merging Unknown fishing entity in catch data...")
        dbConn.execute("UPDATE web.v_fact_data SET fishing_entity_id = 213 WHERE fishing_entity_id = 223")

        print("Vacuuming v_fact_data afterward...")
        dbConn.execute("vacuum analyze web.v_fact_data")

        # And now refresh all materialized views as most are dependent on data in the v_fact_data table
        opts['sqlcmd'] = "SELECT 'refresh materialized view web.' || table_name FROM matview_v('web') WHERE table_name NOT LIKE 'TOTALS%'"
        sp.process(optparse.Values(opts))
        opts['sqlcmd'] = "SELECT 'vacuum analyze web.' || table_name FROM matview_v('web') WHERE table_name NOT LIKE 'TOTALS%'"
        sp.process(optparse.Values(opts))

        print("Aggregation process completed...")

        dbConn.close()
示例#27
0
    def process(self, entity_layer_id):
        if not self.dbPane.isConnectionTestedSuccessfully():
            popup_message(
                "Connection not yet tested",
                "The DB Connection has not been tested successfully.\n" +
                "Once the DB Connection has been tested successfully, you can click the Process button again."
            )
            return

        dbOpts = self.dbPane.getDbOptions()
        dbSession = getDbConnection(optparse.Values(dbOpts)).getSession()

        dbSession.execute(
            "SELECT * FROM web_cache.maintain_catch_csv_partition(%s)" %
            entity_layer_id)

        dbOpts['sqlfile'] = "sql/populate_catch_data_in_csv.sql"
        dbOpts[
            'sqlcmd'] = "select format('vacuum analyze web_cache.%s', table_name) from schema_v('web_cache') where table_name not like 'TOTAL%'"
        dbOpts['threads'] = 1
        sp.process(optparse.Values(dbOpts))
示例#28
0
    def processYearPartition(self, year):
        opts = self.dbPane.getDbOptions()

        try:
            dbSession = getDbConnection(optparse.Values(opts)).getSession()
            dbSession.execute("TRUNCATE TABLE allocation.allocation_data_partition_udi")
            dbSession.execute("SELECT allocation.populate_allocation_data_partition_udi(%s)" % year)
            dbSession.execute("VACUUM ANALYZE allocation.allocation_data_partition_udi")

            opts['sqlfile'] = None
            opts['sqlcmd'] = "SELECT allocation.generate_insert_cell_catch_partition_statements(%s)" % year
            opts['threads'] = 16
            sp.process(optparse.Values(opts))

            # Post insertions operation to finalize the target cell catch partition for immediate use
            cellCatchPartition = "cell_catch_p%s" % year
            dbSession.execute("VACUUM ANALYZE web_partition.%s" % cellCatchPartition)
            for indexSql in dbSession.execute(
                "SELECT web_partition.maintain_cell_catch_indexes('%s') AS cmd" % cellCatchPartition).fetchall():
                dbSession.execute(indexSql.cmd)
        finally:
            dbSession.close()
示例#29
0
    def simplifyExtent(self):
        taxonKey = int(self.taxonKeyToSimplify.get())

        print("Simplifying taxon: %s" % taxonKey)

        dbConn = getDbConnection(optparse.Values(self.dbPane.getDbOptions()))

        dbConn.execute(
            ("WITH ext(taxon_key, geom) AS (" +
             "  SELECT e.taxon_key, (st_dump(geom)).geom" +
             "    FROM distribution.taxon_extent e" +
             "   WHERE e.taxon_key = %(tk)s" +
             ")" +
             "UPDATE distribution.taxon_extent e" +
             "   SET geom = (SELECT ST_MULTI(ST_Union(ST_Buffer(ST_SimplifyPreserveTopology(geom, 0.01), 0.25)))" +
             "                         FROM ext" +
             "                        GROUP BY ext.taxon_key)" +
             " WHERE e.taxon_key = %(tk)s")
            % {"tk": taxonKey}
        )

        print("Taxon %s simplified. Please review resulting extent." % taxonKey)
示例#30
0
    def checkExtentDir(self):
        taxonExtentDir = self.extentDir.get()
        if os.path.isdir(taxonExtentDir):
            dbConn = getDbConnection(optparse.Values(self.dbPane.getDbOptions()))

            try:
                for subdir in [x[0] for x in os.walk(taxonExtentDir)]:
                    if subdir != taxonExtentDir:
                        for shpFile in [f for f in os.listdir(subdir) if f.endswith('.shp')]:
                            match = self.shapeFilePattern.match(shpFile)
                            if match:
                                taxonKey = int(match.groups()[0])
                                taxons = dbConn.execute("SELECT scientific_name AS taxon_name " +
                                                         "  FROM master.taxon WHERE taxon_key = %s"
                                                         % taxonKey)
                                for taxon in taxons:
                                    foundInDirTaxonName = path.basename(subdir)
                                    if taxon.taxon_name != foundInDirTaxonName.replace("_", " "):
                                        print("Taxon %s's name %s does not match with subdir %s"
                                              % (taxonKey, taxon.taxon_name, foundInDirTaxonName))
            finally:
                if dbConn:
                    dbConn.close()
示例#31
0
    def processExtentDir(self):
        taxonExtentDir = self.extentDir.get()
        if os.path.isdir(taxonExtentDir):
            opts = self.dbPane.getDbOptions()
            rawConn = getDbConnection(optparse.Values(opts)).getSession().connection().connection
            cursor = rawConn.cursor()

            try:
                for subdir in [x[0] for x in os.walk(taxonExtentDir)]:
                    if subdir != taxonExtentDir:
                        for shpFile in [f for f in os.listdir(subdir) if f.endswith('.shp')]:
                            match = self.shapeFilePattern.match(shpFile)
                            if match:
                                self.processExtentShapeFile(cursor, path.join(subdir, shpFile), match.groups())
                                rawConn.commit()


                print("Vacuuming distribution.taxon_extent afterward...")
                cursor.execute("vacuum analyze distribution.taxon_extent")
                rawConn.commit()
                cursor.close()
            finally:
                if rawConn:
                    rawConn.close()
示例#32
0
    def postAggregationOperations(self, summaryTable):
        opts = self.dbPane.getDbOptions()
        dbConn = getDbConnection(optparse.Values(opts))

        print("Updating allocation data unit price...")
        if 'threads' not in opts or opts['threads'] == 0:
            opts['threads'] = 8
        opts['sqlfile'] = "sql/update_allocation_data_unit_price.sql"
        sp.process(optparse.Values(opts))
        dbConn.execute("UPDATE allocation.allocation_data SET unit_price = %s WHERE unit_price IS NULL" % SummarizeCommandPane.GLOBAL_AVERAGE_UNIT_PRICE)
        dbConn.execute("VACUUM ANALYZE allocation.allocation_data")

        print("Vacuum and analyze target summary table(s)...")
        if summaryTable:
            dbConn.execute("VACUUM ANALYZE allocation.%s" % summaryTable)
        else:
            # if input summaryTable = None, it's really the signal to vacuum analyze all summary tables
            for tab in SummarizeCommandPane.SUMMARY_TABLES:
                if tab:
                    dbConn.execute("VACUUM ANALYZE allocation.%s" % tab)

        print("Summarization process completed...")

        dbConn.close()
示例#33
0
    def setupCommandPane(self):
        if not self.dbPane.isConnectionTestedSuccessfully():
            popup_message("Connection not yet tested",
                          "The Main DB Connection has not been tested successfully.\n" + \
                          "Once the Main DB Connection has been tested successfully, you can click that button again.")
            return

        for child in self.cmdFrame.winfo_children():
            child.destroy()

        i = 0
        row = 0
        column = 0

        try:
            opts = self.dbPane.getDbOptions()
            dbSession = getDbConnection(optparse.Values(opts)).getSession()

            # Rebuild the allocation_data partitions to make sure we are using the freshest allocation data
            partitions = dbSession.execute(
                "SELECT ('allocation_data_partition.' || table_name) AS partition_name"
                + "  FROM schema_v('allocation_data_partition') " +
                " WHERE table_name NOT LIKE 'TOTALS%'" +
                " ORDER BY 1").fetchall()

            for partition in partitions:
                dbSession.execute("DROP TABLE %s" % partition.partition_name)

            dbSession.execute(
                "SELECT allocation.maintain_allocation_data_partition()")

            opts['sqlfile'] = "sql/insert_allocation_data_eez_hs.sql"
            if 'threads' not in opts or opts['threads'] == 0:
                opts['threads'] = 8
            sp.process(optparse.Values(opts))

            # Add buttons to command pane
            self.yearList = dbSession.execute(
                "SELECT replace(table_name, 'allocation_data_', '')::INT AS year "
                + "  FROM schema_v('allocation_data_partition') " +
                " WHERE table_name NOT LIKE 'TOTALS%'" +
                " ORDER BY 1").fetchall()

            row = 0
            button_data = []

            for par in self.yearList:
                button_data.append([
                    par.year,
                    partial(self.processYearPartition, par.year), "blue"
                ])
                column += 1

                if column >= self.buttonsPerRow:
                    add_buttons(self.cmdFrame, button_data, row, 0,
                                "horizontal")
                    button_data = []
                    column = 0
                    row += 1

            if button_data != []:
                add_buttons(self.cmdFrame, button_data, row, 0, "horizontal")
                row += 1

            # Calling maintain cell catch just in case the cell catch partitions are not present for any reason
            dbSession.execute(
                "SELECT web_partition.maintain_cell_catch_partition()")
        finally:
            dbSession.close()

        for child in self.cmdFrame.winfo_children():
            child.grid_configure(padx=5, pady=5)

        if self.aggregateAllBt == None:
            self.aggregateAllBt = tk.Button(
                self.parent,
                text="Aggregate All Year Partitions",
                fg="red",
                command=self.aggregateAllPartition,
                height=1)
            self.parent.add(self.aggregateAllBt)

            # Adding a filler pane for look only
            self.fillerPane = ttk.Panedwindow(self.parent, orient=VERTICAL)
            self.parent.add(self.fillerPane)
示例#34
0
    def rollupExtent(self):
        if not self.taxonLevelToRollupFor or self.taxonLevelToRollupFor == 0:
            print("Taxon level should be between 1 and 5. Please try again.")
            return

        print("Processing input taxon level: %s" %
              (self.taxonLevelToRollupFor.get()))

        dbConn = getDbConnection(optparse.Values(self.dbPane.getDbOptions()))

        try:
            # First off refresh the materialized views that we rely on to indicate which taxon has/has not an extent already
            self.refreshMaterializedViews(dbConn)

            # Get the list of target taxon keys to rollup for the input taxon level
            rollups = dbConn.execute("SELECT * FROM distribution.get_rollup_taxon_list(%s::int) ORDER BY children_distributions_found" \
                                     % int(self.taxonLevelToRollupFor.get()))

            curTaxonKey = None
            for rollup in rollups:
                curTaxonKey = rollup.taxon_key
                childrenTaxa = rollup.children_taxon_keys
                print("Rollup for %s using lower-level taxons %s [%s]" %
                      (curTaxonKey, childrenTaxa,
                       datetime.now().strftime('%Y/%m/%d %H:%M:%S')))

                try:
                    if len(childrenTaxa) == 1:
                        dbConn.execute((
                            "INSERT INTO distribution.taxon_extent(taxon_key, is_extended, is_rolled_up, geom) "
                            +
                            "SELECT %s, is_extended, TRUE, geom FROM distribution.taxon_extent WHERE taxon_key = %s"
                        ) % (curTaxonKey, childrenTaxa[0]))
                    else:
                        last_seq = dbConn.execute(
                            "SELECT f.seq FROM distribution.extent_rollup_dumpout_polygons(%s, ARRAY%s::int[]) AS f(seq)"
                            % (curTaxonKey, childrenTaxa)).fetchone()[0]
                        while last_seq and last_seq > 1:
                            last_seq = dbConn.execute(
                                "SELECT f.seq FROM distribution.extent_rollup_purge_contained_polygons(%s, %s) AS f(seq)"
                                % (curTaxonKey, last_seq)).fetchone()[0]

                        dbConn.execute((
                            "INSERT INTO distribution.taxon_extent(taxon_key, is_rolled_up, geom) "
                            +
                            "SELECT taxon_key, TRUE, st_multi(st_collectionextract(st_union(st_buffer(st_simplifypreservetopology(geom, 0.01), 0.25)), 3)) "
                            +
                            "  FROM distribution.taxon_extent_rollup_polygon WHERE taxon_key = %s "
                            + " GROUP BY taxon_key") % curTaxonKey)

                        dbConn.execute(
                            "DELETE FROM distribution.taxon_extent_rollup_polygon WHERE taxon_key = %s"
                            % curTaxonKey)

                    dbConn.execute((
                        "INSERT INTO distribution.taxon_extent_rollup(taxon_key, children_distributions_found, children_taxon_keys) "
                        + "VALUES (%s, %s, ARRAY%s::int[])") %
                                   (curTaxonKey,
                                    rollup.children_distributions_found,
                                    childrenTaxa))
                except Exception:
                    print(
                        "Exception encountered during the processing of taxon: %s"
                        % curTaxonKey)
                    print(sys.exc_info())
                    if curTaxonKey:
                        dbConn.execute(
                            "DELETE FROM distribution.taxon_extent WHERE taxon_key = %s"
                            % curTaxonKey)
                finally:
                    self.refreshMaterializedViews(dbConn)
        finally:
            dbConn.close()

        print(
            "All taxon extent rollup operations for input taxon level completed."
        )
示例#35
0
def process(opts):
    if not opts.dbname:
        print("dbname is a required input parameter")
        parser.print_help()
        exit(1)

    if not opts.username:
        print("username is a required input parameter")
        parser.print_help()
        exit(1)

    #Start timing
    start = time.clock()

    # Establish communication queues
    global cmd_queue
    cmd_queue = multiprocessing.Queue()

    # Start SQL processors
    print('Creating %d SQL processors' % opts.threads)
    processors = [Processor(opts, cmd_queue) for i in range(opts.threads)]

    for p in processors:
        p.start()

    # Create a db connection and fetch SQL commands from db server
    db_connection = getDbConnection(opts)

    # Enqueue SQL commands
    if 'sqlfile' in vars(opts) and opts.sqlfile:
        with open(opts.sqlfile) as fileObj:
            sql_cmd_buffer = ''
            for line in fileObj:
                for ch in line:
                    if ch == ';':
                        sql_cmds = db_connection.execute(sql_cmd_buffer + ch)
                        wait_till_cmd_queue_empty()
                        for cmd in sql_cmds:
                            cmd_queue.put(cmd[0])
                        sql_cmd_buffer = ''
                    else:
                        sql_cmd_buffer += ch

    wait_till_cmd_queue_empty()

    if 'sqlcmd' in vars(opts) and opts.sqlcmd:
        sql_cmds = db_connection.execute(opts.sqlcmd)
        for cmd in sql_cmds:
            cmd_queue.put(cmd[0])
        db_connection.close()
    else:
        db_connection.close()

    # Add a poison pill for each consumer
    for i in range(opts.threads):
        cmd_queue.put(None)

    for p in processors:
        p.join()

    # Stop timing and report duration
    end = time.clock()
    duration = end - start
    hours, remainder = divmod(duration, 3600)
    minutes, seconds = divmod(remainder, 60)
    print('Completed in %d:%d:%f' % (hours, minutes, seconds))
示例#36
0
def restore_foreign_key(dbPane):
    getDbConnection(optparse.Values(dbPane.getDbOptions())).execute("SELECT exec(add_fk_cmd) FROM admin.database_foreign_key")

    print("Foreign keys successfully added.")
示例#37
0
def process(opts):
    if not opts.dbname:
        print("dbname is a required input parameter")
        parser.print_help()
        exit(1)

    if not opts.username:
        print("username is a required input parameter")
        parser.print_help()
        exit(1)

    #Start timing
    start = time.clock()

    # Establish communication queues
    global cmd_queue
    cmd_queue = multiprocessing.Queue()

    # Start SQL processors
    print('Creating %d SQL processors' % opts.threads)
    processors = [Processor(opts, cmd_queue)
                  for i in range(opts.threads)]

    for p in processors:
        p.start()

    # Create a db connection and fetch SQL commands from db server
    db_connection = getDbConnection(opts)

    # Enqueue SQL commands
    if 'sqlfile' in vars(opts) and opts.sqlfile:
        with open(opts.sqlfile) as fileObj:
            sql_cmd_buffer = ''
            for line in fileObj:
                for ch in line:
                    if ch == ';':
                        sql_cmds = db_connection.execute(sql_cmd_buffer + ch)
                        wait_till_cmd_queue_empty()
                        for cmd in sql_cmds:
                            cmd_queue.put(cmd[0])
                        sql_cmd_buffer = ''
                    else:
                        sql_cmd_buffer += ch

    wait_till_cmd_queue_empty()

    if 'sqlcmd' in vars(opts) and opts.sqlcmd:
        sql_cmds = db_connection.execute(opts.sqlcmd)
        for cmd in sql_cmds:
            cmd_queue.put(cmd[0])
        db_connection.close()
    else:
        db_connection.close()

    # Add a poison pill for each consumer
    for i in range(opts.threads):
        cmd_queue.put(None)

    for p in processors:
        p.join()

    # Stop timing and report duration
    end = time.clock()
    duration = end - start
    hours, remainder = divmod(duration, 3600)
    minutes, seconds = divmod(remainder, 60)
    print('Completed in %d:%d:%f' % (hours, minutes, seconds))
示例#38
0
def restore_foreign_key(dbPane):
    getDbConnection(optparse.Values(dbPane.getDbOptions())).execute(
        "SELECT exec(add_fk_cmd) FROM admin.database_foreign_key")

    print("Foreign keys successfully added.")
示例#39
0
    def setupCommandPane(self):
        if not self.dbPane.isConnectionTestedSuccessfully():
            popup_message("Connection not yet tested",
                          "The Main DB Connection has not been tested successfully.\n" + \
                          "Once the Main DB Connection has been tested successfully, you can click that button again.")
            return

        for child in self.cmdFrame.winfo_children(): child.destroy()

        i = 0
        row = 0
        column = 0
                     
        try:
            opts = self.dbPane.getDbOptions()
            dbSession = getDbConnection(optparse.Values(opts)).getSession()

            # Rebuild the allocation_data partitions to make sure we are using the freshest allocation data
            partitions = dbSession.execute(
                "SELECT ('allocation_data_partition.' || table_name) AS partition_name" +
                "  FROM schema_v('allocation_data_partition') " +
                " WHERE table_name NOT LIKE 'TOTALS%'" +
                " ORDER BY 1").fetchall()

            for partition in partitions:
                dbSession.execute("DROP TABLE %s" % partition.partition_name)

            dbSession.execute("SELECT allocation.maintain_allocation_data_partition()")

            opts['sqlfile'] = "sql/insert_allocation_data_eez_hs.sql"
            if 'threads' not in opts or opts['threads'] == 0:
                opts['threads'] = 8
            sp.process(optparse.Values(opts))

            # Add buttons to command pane
            self.yearList = dbSession.execute(
                "SELECT replace(table_name, 'allocation_data_', '')::INT AS year " +
                "  FROM schema_v('allocation_data_partition') " +
                " WHERE table_name NOT LIKE 'TOTALS%'" +
                " ORDER BY 1").fetchall()

            row = 0
            button_data = []

            for par in self.yearList:
                button_data.append([par.year, partial(self.processYearPartition, par.year), "blue"])
                column += 1

                if column >= self.buttonsPerRow:
                    add_buttons(self.cmdFrame, button_data, row, 0, "horizontal")
                    button_data = []
                    column = 0
                    row += 1

            if button_data != []:
                add_buttons(self.cmdFrame, button_data, row, 0, "horizontal")
                row += 1

            # Calling maintain cell catch just in case the cell catch partitions are not present for any reason
            dbSession.execute("SELECT web_partition.maintain_cell_catch_partition()")
        finally:
            dbSession.close()

        for child in self.cmdFrame.winfo_children():
            child.grid_configure(padx=5, pady=5)

        if self.aggregateAllBt == None:
            self.aggregateAllBt = tk.Button(self.parent, text="Aggregate All Year Partitions", fg="red", command=self.aggregateAllPartition, height=1)
            self.parent.add(self.aggregateAllBt)

            # Adding a filler pane for look only
            self.fillerPane = ttk.Panedwindow(self.parent, orient=VERTICAL)
            self.parent.add(self.fillerPane)
示例#40
0
    def rollupExtent(self):
        if not self.taxonLevelToRollupFor or self.taxonLevelToRollupFor == 0:
            print("Taxon level should be between 1 and 5. Please try again.")
            return

        print("Processing input taxon level: %s" % (self.taxonLevelToRollupFor.get()))

        dbConn = getDbConnection(optparse.Values(self.dbPane.getDbOptions()))

        try:
            # First off refresh the materialized views that we rely on to indicate which taxon has/has not an extent already
            self.refreshMaterializedViews(dbConn)

            # Get the list of target taxon keys to rollup for the input taxon level
            rollups = dbConn.execute("SELECT * FROM distribution.get_rollup_taxon_list(%s::int) ORDER BY children_distributions_found" \
                                     % int(self.taxonLevelToRollupFor.get()))

            curTaxonKey = None
            for rollup in rollups:
                curTaxonKey = rollup.taxon_key
                childrenTaxa = rollup.children_taxon_keys
                print("Rollup for %s using lower-level taxons %s [%s]" % (curTaxonKey, childrenTaxa, datetime.now().strftime('%Y/%m/%d %H:%M:%S')))

                try:
                    if len(childrenTaxa) == 1:
                        dbConn.execute(
                            ("INSERT INTO distribution.taxon_extent(taxon_key, is_extended, is_rolled_up, geom) " +
                            "SELECT %s, is_extended, TRUE, geom FROM distribution.taxon_extent WHERE taxon_key = %s")
                            % (curTaxonKey, childrenTaxa[0]))
                    else:
                        last_seq = dbConn.execute("SELECT f.seq FROM distribution.extent_rollup_dumpout_polygons(%s, ARRAY%s::int[]) AS f(seq)"
                                                  % (curTaxonKey, childrenTaxa)).fetchone()[0]
                        while last_seq and last_seq > 1:
                            last_seq = dbConn.execute(
                                "SELECT f.seq FROM distribution.extent_rollup_purge_contained_polygons(%s, %s) AS f(seq)"
                                % (curTaxonKey, last_seq)).fetchone()[0]

                        dbConn.execute(
                            ("INSERT INTO distribution.taxon_extent(taxon_key, is_rolled_up, geom) " +
                            "SELECT taxon_key, TRUE, st_multi(st_collectionextract(st_union(st_buffer(st_simplifypreservetopology(geom, 0.01), 0.25)), 3)) " +
                            "  FROM distribution.taxon_extent_rollup_polygon WHERE taxon_key = %s " +
                            " GROUP BY taxon_key")
                            % curTaxonKey)

                        dbConn.execute("DELETE FROM distribution.taxon_extent_rollup_polygon WHERE taxon_key = %s" % curTaxonKey)

                    dbConn.execute(
                        ("INSERT INTO distribution.taxon_extent_rollup(taxon_key, children_distributions_found, children_taxon_keys) " +
                        "VALUES (%s, %s, ARRAY%s::int[])")
                        % (curTaxonKey, rollup.children_distributions_found, childrenTaxa))
                except Exception:
                    print("Exception encountered during the processing of taxon: %s" % curTaxonKey)
                    print(sys.exc_info())
                    if curTaxonKey:
                        dbConn.execute("DELETE FROM distribution.taxon_extent WHERE taxon_key = %s" % curTaxonKey)
                finally:
                    self.refreshMaterializedViews(dbConn)
        finally:
            dbConn.close()

        print("All taxon extent rollup operations for input taxon level completed.")