Beispiel #1
0
def move_expired_messages_to_archive_by_recipient(
        recipient: Recipient,
        message_retention_days: int,
        realm: Realm,
        chunk_size: int = MESSAGE_BATCH_SIZE) -> int:
    # This function will archive appropriate messages and their related objects.
    query = SQL("""
    INSERT INTO zerver_archivedmessage ({dst_fields}, archive_transaction_id)
        SELECT {src_fields}, {archive_transaction_id}
        FROM zerver_message
        WHERE zerver_message.recipient_id = {recipient_id}
            AND zerver_message.date_sent < {check_date}
        LIMIT {chunk_size}
    ON CONFLICT (id) DO UPDATE SET archive_transaction_id = {archive_transaction_id}
    RETURNING id
    """)
    check_date = timezone_now() - timedelta(days=message_retention_days)

    return run_archiving_in_chunks(
        query,
        type=ArchiveTransaction.RETENTION_POLICY_BASED,
        realm=realm,
        recipient_id=Literal(recipient.id),
        check_date=Literal(check_date.isoformat()),
        chunk_size=chunk_size,
    )
Beispiel #2
0
def exportFilterIdsFile(config, conn, pg_path, site_id, out_file):
    lpis_table = "decl_{}_{}".format(config.site_short_name, config.year)

    srid = get_srid(conn, lpis_table)

    with conn.cursor() as cursor:
        query = SQL("""
            select
                lpis."NewID" as "SEQ_ID"
            from l4c_practices ap
            inner join {} lpis on lpis.ori_id = ap.orig_id
            where ap.site_id = {} and ap.year = {}
            and not lpis.is_deleted order by lpis."NewID" """)
        query = query.format(Identifier(lpis_table), Literal(site_id),
                             Literal(config.year))
        query_str = query.as_string(conn)
        print(query_str)

        name = os.path.basename(out_file)
        table_name = os.path.splitext(name)[0].lower()
        command = get_export_table_command(config.ogr2ogr_path, out_file,
                                           pg_path, "-nln", table_name, "-sql",
                                           query_str, "-a_srs",
                                           "EPSG:" + str(srid), "-gt", 100000)
        run_command(command)
Beispiel #3
0
def getSiteConfigKey(conn, key, site_id):
    value = ''
    with conn.cursor() as cursor:
        query = SQL("""
            select value from config
            where key = {} and site_id = {}
            """)
        query = query.format(Literal(key), Literal(site_id))
        print(query.as_string(conn))

        cursor.execute(query)
        for row in cursor:
            value = row[0]
        conn.commit()

        # get the default value if not found
        if value == '':
            query = SQL(
                """ select value from config where key = {} and site_id is null """
            )
            query = query.format(Literal(key))
            print(query.as_string(conn))

            cursor.execute(query)
            for row in cursor:
                value = row[0]
            conn.commit()

    return value
Beispiel #4
0
    def _business_logic(request_data: dict) -> tuple:
        # By this point, our award_id has been validated and cleaned up by
        # TinyShield.  We will either have an internal award id that is an
        # integer or a generated award id that is a string.
        award_id = request_data['award_id']
        hide_edge_cases = request_data.get('hide_edge_cases')
        hide_edges_awarded_amount = ''
        hide_edges_end_date = ''
        award_id_column = 'award_id' if type(
            award_id) is int else 'generated_unique_award_id'
        if hide_edge_cases:
            hide_edges_awarded_amount = "and ca.base_and_all_options_value > 0 and ca.total_obligation > 0"
            hide_edges_end_date = "where tf.period_of_perf_potential_e is not null"
            sql = COUNT_ACTIVITY_HIDDEN_SQL.format(
                award_id_column=Identifier(award_id_column),
                award_id=Literal(award_id),
                hide_edges_awarded_amount=SQL(hide_edges_awarded_amount),
                hide_edges_end_date=SQL(hide_edges_end_date))
        else:
            sql = COUNT_ACTIVITY_SQL.format(
                award_id_column=Identifier(award_id_column),
                award_id=Literal(award_id))
        overall_count_results = execute_sql_to_ordered_dictionary(sql)
        overall_count = overall_count_results[0][
            'rollup_contract_count'] if overall_count_results else 0
        sql = ACTIVITY_SQL.format(
            award_id_column=Identifier(award_id_column),
            award_id=Literal(award_id),
            limit=Literal(request_data['limit'] + 1),
            offset=Literal((request_data['page'] - 1) * request_data['limit']),
            hide_edges_awarded_amount=SQL(hide_edges_awarded_amount),
            hide_edges_end_date=SQL(hide_edges_end_date))

        return execute_sql_to_ordered_dictionary(sql), overall_count
Beispiel #5
0
def countRaceSetting(logger):
    '''

    This function queries the database and returns the counts of each main race: AA, NHPI, MR sorted by treatment setting.
    Parameters
    ----------
    logger : {logging.Logger}
        The logger used for logging error information
    '''

    try:
        rd = {"AA": [], "NHPI": [], "MR": []}
        for race in table1_config["inputs"]["races"]:
            counts = [0] * len(table1_config["params"]["settings"]["all"])
            count = 0
            for setting in table1_config["params"]["settings"]["all"]:
                query = SQL('''
                SELECT count(*)
                FROM tejas.race_age_t1new t1
                INNER JOIN tejas.restofusers t2
                ON t1.siteid = t2.siteid
                AND t1.backgroundid = t2.backgroundid
                WHERE t1.visit_type = {} AND t1.race = {}
                ''').format(Literal(setting), Literal(race))
                data = [d[0] for d in pgIO.getAllData(query)]
                counts[count] += data[0]
                count += 1
            rd[race] = counts
    except Exception as e:
        logger.error('countRaceSetting failed because of {}'.format(e))
    return rd
Beispiel #6
0
    def __get_where_clauses(self, properties=[], bbox=[]):
        """
        Generarates WHERE conditions to be implemented in query.
        Private method mainly associated with query method
        :param properties: list of tuples (name, value)
        :param bbox: bounding box [minx,miny,maxx,maxy]

        :returns: psycopg2.sql.Composed or psycopg2.sql.SQL
        """

        where_conditions = []
        if properties:
            property_clauses = [
                SQL('{} = {}').format(Identifier(k), Literal(v))
                for k, v in properties
            ]
            where_conditions += property_clauses
        if bbox:
            bbox_clause = SQL('{} && ST_MakeEnvelope({})').format(
                Identifier(self.geom),
                SQL(', ').join([Literal(bbox_coord) for bbox_coord in bbox]))
            where_conditions.append(bbox_clause)

        if where_conditions:
            where_clause = SQL(' WHERE {}').format(
                SQL(' AND ').join(where_conditions))
        else:
            where_clause = SQL('')

        return where_clause
Beispiel #7
0
    def check_string_concatenation(self,
                                   label_col,
                                   other_columns,
                                   constraint={},
                                   sep='.',
                                   convert_to_base26={}):
        """
        Check that the label_column is the concatenation of the other columns with the given separator

        Input:

        - ``label_col`` -- the label_column
        - ``other_columns`` --  the other columns from which we can deduce the label
        - ``constraint`` -- a dictionary, as passed to the search method
        - ``sep`` -- the separator for the join
        - ``convert_to_base26`` -- a dictionary where the keys are columns that we need to convert to base26, and the values is that the shift that we need to apply
        """
        oc_converted = [
            SQL('to_base26({0} + {1})').format(
                Identifier(col), Literal(int(convert_to_base26[col])))
            if col in convert_to_base26 else Identifier(col)
            for col in other_columns
        ]
        #intertwine the separator
        oc = [
            oc_converted[i // 2] if i % 2 == 0 else Literal(sep)
            for i in range(2 * len(oc_converted) - 1)
        ]

        return self._run_query(
            SQL(" != ").join([SQL(" || ").join(oc),
                              Identifier(label_col)]), constraint)
def _execute_indexing_task(
    model_name, table_name, target_index, start_id, end_id, notify_url
):
    elasticsearch = elasticsearch_connect()

    deleted, mature = get_existence_queries(model_name, table_name)
    query = SQL(
        "SELECT *, {deleted}, {mature} "
        "FROM {table_name} "
        "WHERE id BETWEEN {start_id} AND {end_id};"
    ).format(
        deleted=deleted,
        mature=mature,
        table_name=Identifier(table_name),
        start_id=Literal(start_id),
        end_id=Literal(end_id),
    )
    log.info(f"Querying {query}")
    indexer = TableIndexer(elasticsearch)
    p = Process(
        target=_launch_reindex,
        args=(model_name, table_name, target_index, query, indexer, notify_url),
    )
    p.start()
    log.info("Started indexing task")
    def _build_where(**where_columns):
        """
        Accepts a keyword arguments that are valid subaward columns names and
        the value upon which they will be filtered.  Will return a SQL where
        clause along the lines of:

            where column1 > value1 or column2 > value2

        Uses psycopg Identifier, Literal, and SQL to ensure everything is
        properly formatted.
        """
        wheres = []
        for column, value in where_columns.items():
            if value is not None:
                wheres.append(
                    SQL("{} > {}").format(Identifier(column), Literal(value)))

        if wheres:
            # Because our where clause is embedded in a dblink, we need to
            # wrap it in a literal again to get everything escaped properly.
            where = SQL("where {}").format(SQL(" or ").join(wheres))
            where = Literal(convert_composable_query_to_string(where))
            where = convert_composable_query_to_string(where)
            return where[1:-1]  # Remove outer quotes

        return ""
Beispiel #10
0
 def datetime_span2sql(cls, fieldname, datetime_span):
     dt_start, dt_end = datetime_span
     sql_list = [
         SQL("{} >= {}").format(Identifier(fieldname), Literal(dt_start)),
         SQL("{} <= {}").format(Identifier(fieldname), Literal(dt_end)),
     ]
     sql = PostgresTool.join(SQL(" AND "), sql_list)
     return sql
Beispiel #11
0
 def params(self):
     return dict(
         table=Identifier(*self.table),
         xmin_start=Literal(self.xmin_start),
         xmin_end=Literal(self.xmin_end),
         limit=Literal(self.limit),
         pk=Identifier(self.pk),
     )
def get_l2a_products_from_db(config, conn):
    with conn.cursor() as cursor:
        query = SQL("""
            with products as (
                select product.site_id,
                    product.name,
                    product.created_timestamp as date,
                    product.processor_id,
                    product.product_type_id,
                    product.full_path,
                    product.tiles
                from product
                where product.site_id = {}
                    and product.product_type_id = 1
            )
            select products.date,
                    products.tiles,
                    products.name,
                    products.full_path
            from products
            where date between {} and {}
            order by date;
            """)

        site_id_filter = Literal(config.site_id)
        start_date_filter = Literal(config.start_date)
        end_date_filter = Literal(config.end_date)
        query = query.format(site_id_filter, start_date_filter,
                             end_date_filter)
        # print(query.as_string(conn))
        cursor.execute(query)

        results = cursor.fetchall()
        conn.commit()

        products = []
        log_time = datetime.datetime.now().strftime("%Y-%m-%d %H-%M-%S")
        for (dt, tiles, prdName, full_path) in results:
            #print("Full path: {}".format(full_path))
            try:
                files = os.listdir(full_path)
            except:
                print(
                    "Product {} found in DB but not on disk".format(full_path))
                continue
            l2aTilePaths = fnmatch.filter(files, "S2*_OPER_SSC_L2VALD_*.HDR")
            if len(l2aTilePaths) == 0:
                l2aTilePaths = fnmatch.filter(files,
                                              "L8_TEST_L8C_L2VALD_*.HDR")

            for file in l2aTilePaths:
                relHdrFilePath = os.path.join(prdName, file)
                fullHdrFilePath = os.path.join(full_path, file)
                #print("Full HDR file path: {}".format(fullHdrFilePath))
                #config.logging.info('[%s] L2A HDR PATH from PRODUCT:[ %s ]',log_time,fullHdrFilePath)
                products.append(L2AProduct(relHdrFilePath, fullHdrFilePath))

        return products
Beispiel #13
0
 def query(self):
     return SQL("select *, xmin, now() as etl_ts "
                "from {table} where {column} > {state} "
                "order by {column} limit {limit};").format(
                    table=Identifier(*self.table),
                    column=Identifier(self.column),
                    state=Literal(self.state),
                    limit=Literal(self.limit),
                )
def get_radar_products(config, conn, site_id):
    with conn.cursor() as cursor:
        query = SQL("""
            select *
            from (
                select
                    greatest(substr(split_part(product.name, '_', 4), 2), split_part(product.name, '_', 5)) :: date as date,
                    site_tiles.tile_id,
                    product.orbit_type_id,
                    split_part(product.name, '_', 6) as polarization,
                    product.product_type_id,
                    product.name,
                    product.full_path
                from sp_get_site_tiles({} :: smallint, 1 :: smallint) as site_tiles
                inner join shape_tiles_s2 on shape_tiles_s2.tile_id = site_tiles.tile_id
                inner join product on ST_Intersects(product.geog, shape_tiles_s2.geog)
                where product.satellite_id = 3
                    and product.site_id = {}
            ) products
            where date between {} and {}
            order by date;
            """)

        site_id_filter = Literal(site_id)
        start_date_filter = Literal(config.season_start)
        end_date_filter = Literal(config.season_end)
        query = query.format(site_id_filter, site_id_filter, start_date_filter,
                             end_date_filter)
        print(query.as_string(conn))
        cursor.execute(query)

        results = cursor.fetchall()
        conn.commit()

        products = []
        for (
                dt,
                tile_id,
                orbit_type_id,
                polarization,
                radar_product_type,
                name,
                full_path,
        ) in results:
            if config.products is None or name in config.products:
                products.append(
                    RadarProduct(
                        dt,
                        tile_id,
                        orbit_type_id,
                        polarization,
                        radar_product_type,
                        full_path,
                    ))

        return products
Beispiel #15
0
def run_archiving_in_chunks(
    query: SQL,
    type: int,
    realm: Optional[Realm] = None,
    chunk_size: int = MESSAGE_BATCH_SIZE,
    **kwargs: Composable,
) -> int:
    # This function is carefully designed to achieve our
    # transactionality goals: A batch of messages is either fully
    # archived-and-deleted or not transactionally.
    #
    # We implement this design by executing queries that archive messages and their related objects
    # (such as UserMessage, Reaction, and Attachment) inside the same transaction.atomic() block.
    assert type in (ArchiveTransaction.MANUAL,
                    ArchiveTransaction.RETENTION_POLICY_BASED)

    message_count = 0
    while True:
        start_time = time.time()
        with transaction.atomic():
            archive_transaction = ArchiveTransaction.objects.create(
                type=type, realm=realm)
            new_chunk = move_rows(
                Message,
                query,
                src_db_table=None,
                chunk_size=Literal(chunk_size),
                returning_id=True,
                archive_transaction_id=Literal(archive_transaction.id),
                **kwargs,
            )
            if new_chunk:
                move_related_objects_to_archive(new_chunk)
                delete_messages(new_chunk)
                message_count += len(new_chunk)
            else:
                archive_transaction.delete()  # Nothing was archived
        total_time = time.time() - start_time

        # This line needs to be outside of the atomic block, to capture the actual moment
        # archiving of the chunk is finished (since Django does some significant additional work
        # when leaving the block).
        if len(new_chunk) > 0:
            logger.info(
                "Archived %s messages in %.2fs in transaction %s.",
                len(new_chunk),
                total_time,
                archive_transaction.id,
            )

        # We run the loop, until the query returns fewer results than chunk_size,
        # which means we are done:
        if len(new_chunk) < chunk_size:
            break

    return message_count
def get_radar_products(config, conn, site_id):
    with conn.cursor() as cursor:
        query = SQL("""
            with products as (
                select product.site_id,
                    site_tiles.tile_id,
                    product.name,
                    case
                        when substr(product.name, 17, 8) > substr(product.name, 33, 8) then substr(product.name, 17, 8)
                        else substr(product.name, 33, 8)
                    end :: date as date,
                    coalesce(product.orbit_type_id, 1) as orbit_type_id,
                    substr(product.name, 49, 2) as polarization,
                    product.processor_id,
                    product.product_type_id,
                    substr(product.name, length(product.name) - strpos(reverse(product.name), '_') + 2) as radar_product_type,
                    product.orbit_id,
                    product.full_path
                from sp_get_site_tiles({} :: smallint, 1 :: smallint) as site_tiles
                inner join shape_tiles_s2 on shape_tiles_s2.tile_id = site_tiles.tile_id
                inner join product on ST_Intersects(product.geog, shape_tiles_s2.geog)
                where product.satellite_id = 3
                    and product.site_id = {}
            )
            select products.date,
                    products.tile_id,
                    products.orbit_type_id,
                    products.polarization,
                    products.radar_product_type,
                    products.full_path
            from products
            where date between {} and {}
            order by date;
            """)

        site_id_filter = Literal(site_id)
        start_date_filter = Literal(config.season_start)
        end_date_filter = Literal(config.season_end)
        query = query.format(site_id_filter, site_id_filter, start_date_filter,
                             end_date_filter)
        # print(query.as_string(conn))
        cursor.execute(query)

        results = cursor.fetchall()
        conn.commit()

        products = []
        for (dt, tile_id, orbit_type_id, polarization, radar_product_type,
             full_path) in results:
            if config.file_type == radar_product_type:
                if config.polarisation == "" or config.polarisation == polarization:
                    products.append(
                        RadarProduct(dt, tile_id, orbit_type_id, polarization,
                                     radar_product_type, full_path))

        return products
Beispiel #17
0
 def check_array_len_eq_constant(self, column, limit, constraint={}, array_dim = 1):
     """
     Length of array equal to constant
     """
     return self._run_query(SQL("array_length({0}, {1}) != {2}").format(
         Identifier(column),
         Literal(int(array_dim)),
         Literal(int(limit))
         ),
         constraint)
Beispiel #18
0
def ageBinnedCategorisedSUD(logger):
    '''
    
    Finds percentage of the age-binned sample that has 
    SUD of a particular substance 
    
    Decorators:
        lD.log
    
    Arguments:
        logger {logging.Logger} -- logs error information
    '''
    try:
        countDict = {}

        for sudcat in table2_config["params"]["sudcats"].keys():
            list1 = []
            for race in table2_config["inputs"]["races"]:
                list2 = []
                for lower, upper in zip(['1', '12', '18', '35', '50'], ['11', '17', '34', '49', '100']):
                    query = SQL('''
                    SELECT 
                        count(*) 
                    FROM 
                        sarah.test2 t1
                    INNER JOIN 
                        sarah.test4 t2
                    ON
                        t1.patientid = t2.patientid
                    WHERE 
                        t1.race = {}
                    AND 
                        t1.age BETWEEN {} AND {}
                    AND 
                        t2.{} = true
                    ''').format(
                        Literal(race),
                        Literal(lower),
                        Literal(upper),
                        Identifier(sudcat)
                    )
                    data = [d[0] for d in pgIO.getAllData(query)]
                    list2.append(data[0])
                list1.append(list2)
            countDict[sudcat] = list1

        # Change counts to percentage of the race sample
        resultsDict = {}
        for row in countDict:
            resultsDict[row] = divByAgeBins(countDict[row])

    except Exception as e:
        logger.error('Failed to find categorised SUD counts because of {}'.format(e))

    return resultsDict 
def process_db_ndvi_products(config, conn, site_id):
    with conn.cursor() as cursor:
        query = SQL("""
            with products as (
                select product.site_id,
                    product.name,
                    product.created_timestamp as date,
                    product.processor_id,
                    product.product_type_id,
                    product.full_path,
                    product.tiles
                from product
                where product.site_id = {}
                    and product.product_type_id = 3
            )
            select products.date,
                    products.tiles,
                    products.full_path
            from products
            where date between {} and {}
            order by date;
            """)

        site_id_filter = Literal(site_id)
        start_date_filter = Literal(config.season_start)
        end_date_filter = Literal(config.season_end)
        query = query.format(site_id_filter, start_date_filter,
                             end_date_filter)
        #print(query.as_string(conn))
        # execute the query
        cursor.execute(query)
        results = cursor.fetchall()
        conn.commit()

        print("Extracted a number of {} products to check".format(
            len(results)))

        for (dt, tiles, full_path) in results:
            tilesPath = os.path.join(full_path, "TILES")
            try:
                tilesDirs = os.listdir(tilesPath)
            except:
                print(
                    "Product {} found in DB but not on disk".format(full_path))
                continue

            for tile in tiles:
                tilePaths = fnmatch.filter(tilesDirs,
                                           "S2AGRI_L3B_A*_T{}".format(tile))
                if len(tilePaths) == 1:
                    subPath = tilePaths[0]
                    m = re.match(
                        "S2AGRI_L3B_A(\d{{8}}T\d{{6}})_T{}".format(tile),
                        subPath)
                    checkTileDir(config, tilesPath, subPath, tile, m.group(1))
Beispiel #20
0
 def check_array_len_col(self, array_column, len_column, constraint={}, shift=0, array_dim = 1):
     """
     Length of array_column matches len_column
     """
     return self._run_query(SQL("array_length({0}, {3}) != {1} + {2}").format(
         Identifier(array_column),
         Identifier(len_column),
         Literal(int(shift)),
         Literal(array_dim),
         ),
         constraint)
Beispiel #21
0
def addmorethan2sudcolumn(logger):
    '''Populates the 'morethan2sud' column in tejas.sud_race_age

    This function counts the number of 'True' for each mental disorder
    for each user in tejas.sud_race_age. If they have more than 1 'True' value,
    their 'morethan2sud' column will be set to 'True'.

    Decorators:
        lD.log

    Arguments:
        logger {logging.Logger} -- logs error information
    '''
    try:
        query = '''
        SELECT
            siteid, backgroundid, alc, cannabis, amphe, halluc, nicotin,
            cocaine, opioids, sedate, others, polysub, inhalant
        FROM tejas.sud_race_age
        '''
        data = pgIO.getAllData(query)

        csvfile = '../data/raw_data/atleast2suduser_keys.csv'

        count = 0
        output = open(csvfile, 'w+')
        csv_output = csv.writer(output)
        for row in data:
            if sum(list(row[2:])) >= 2:
                csv_output.writerow(row)
        readCSV = csv.reader(open(csvfile), delimiter=",")
        for user in tqdm(readCSV):
            updateQuery = SQL('''
            UPDATE tejas.sud_race_age
            SET morethan2sud = true
            WHERE siteid = {}
            AND backgroundid = {}
            ''').format(Literal(user[0]), Literal(str(user[1])))
            value = pgIO.commitData(updateQuery)
            # print(type(user[0]))

        #Update column's null values to false
        updateQuery2 = '''
        UPDATE tejas.sud_race_age
        SET morethan2sud = false
        WHERE morethan2sud is null
        '''
        print(pgIO.commitData(updateQuery2))


    except Exception as e:
        logger.error('adding morethan2sud column to the database failed because of {}'.format(e))

    return
Beispiel #22
0
def table_exists(cnx: Union[str, psycopg2.extensions.connection],
                 table_name: str, schema_name: str) -> bool:
    """

    :param cnx: an open connection or database connection string
    :param table_name: the name of the table
    :param schema_name: the name of the schema in which the table resides
    :return: ``True`` if the table exists, otherwise ``False``
    """
    query = SQL(_PHRASEBOOK.gets('table_exists')).format(
        table=Literal(table_name), schema=Literal(schema_name))
    return execute_scalar(cnx=cnx, query=query)
Beispiel #23
0
 def _create_place_classtype_table(self, sql_tablespace, phrase_class, phrase_type):
     """
         Create table place_classtype of the given phrase_class/phrase_type if doesn't exit.
     """
     table_name = 'place_classtype_{}_{}'.format(phrase_class, phrase_type)
     with self.db_connection.cursor() as db_cursor:
         db_cursor.execute(SQL("""
                 CREATE TABLE IF NOT EXISTS {{}} {} 
                 AS SELECT place_id AS place_id,st_centroid(geometry) AS centroid FROM placex 
                 WHERE class = {{}} AND type = {{}}""".format(sql_tablespace))
                           .format(Identifier(table_name), Literal(phrase_class),
                                   Literal(phrase_type)))
Beispiel #24
0
def relabelVar(logger):
    '''Relabels column values
    
    This function relabels Race and Settings values to standardised values.
    
    Decorators:
        lD.log
    
    Arguments:
        logger {logging.Logger} -- logs error information
    '''
    try:
        # relabel_sex_success = []
        # for sex in table1_config["inputs"]["sexes"]:
        #     sex_query = SQL('''
        #     UPDATE sarah.test2
        #     SET sex = {}
        #     WHERE sex in {}
        #     ''').format(
        #         Literal(sex),
        #         Literal(tuple(table1_config["params"]["sexes"][sex]))
        #         )
        #     relabel_sex_success.append(pgIO.commitData(sex_query))
        # if False in relabel_sex_success:
        #     print("Relabelling sex not successful!")

        relabel_race_success = []
        for race in table1_config["inputs"]["races"]:
            race_query = SQL('''
            UPDATE sarah.test2
            SET race = {}
            WHERE race in {}
            ''').format(Literal(race),
                        Literal(tuple(table1_config["params"]["races"][race])))
            relabel_race_success.append(pgIO.commitData(race_query))
        if False in relabel_race_success:
            print("Relabelling race not successful!")

        relabel_setting_success = []
        for setting in table1_config["inputs"]["settings"]:
            setting_query = SQL('''
            UPDATE sarah.test2
            SET visit_type = {}
            WHERE visit_type in {}
            ''').format(
                Literal(setting),
                Literal(tuple(table1_config["params"]["settings"][setting])))
            relabel_setting_success.append(pgIO.commitData(setting_query))
        if False in relabel_setting_success:
            print("Relabelling setting not successful!")

    except Exception as e:
        logger.error('Failed to update table test2 because {}'.format(e))
Beispiel #25
0
def exportPracticesFile(config, conn, pg_path, practice, site_id, out_file):
    lpis_table = "decl_{}_{}".format(config.site_short_name, config.year)
    lut_table = "lut_{}_{}".format(config.site_short_name, config.year)

    srid = get_srid(conn, lpis_table)

    with conn.cursor() as cursor:
        query = SQL("""
                select
                lpis."NewID" as "SEQ_ID",
                lpis.ori_id as "FIELD_ID",
                ap.country as "COUNTRY",
                ap.year as "YEAR",
                ap.main_crop as "MAIN_CROP",
                ap.veg_start as "VEG_START",
                ap.h_start as "H_START",
                ap.h_end as "H_END",
                ap.practice as "PRACTICE",
                ap.p_type as "P_TYPE",
                ap.p_start as "P_START",
                ap.p_end as "P_END",
                lpis."GeomValid",
                lpis."Duplic",
                lpis."Overlap",
                lpis."Area_meters" as "Area_meter",
                lpis."ShapeInd",
                lut.ctnum as "CTnum",
                lut.ct as "CT",
                lut.lc as "LC",                
                lpis."S1Pix",
                lpis."S2Pix"
            from l4c_practices ap
            inner join {} lpis on lpis.ori_id = ap.orig_id
            natural join {} lut
            where ap.site_id = {} and ap.year = {} and ap.practice_short_name = {}
            and not lpis.is_deleted order by lpis."NewID" """)
        query = query.format(Identifier(lpis_table), Identifier(lut_table),
                             Literal(site_id), Literal(config.year),
                             Literal(practice))
        query_str = query.as_string(conn)
        print(query_str)

        # TODO: change natural join with inner join lut_nld_2019_2019 lut using (ori_crop) or something like this

        name = os.path.basename(out_file)
        table_name = os.path.splitext(name)[0].lower()
        command = get_export_table_command(config.ogr2ogr_path, out_file,
                                           pg_path, "-nln", table_name, "-sql",
                                           query_str, "-a_srs",
                                           "EPSG:" + str(srid), "-gt", 100000)
        run_command(command)
def get_ndvi_products_from_db(config, conn, site_id):
    with conn.cursor() as cursor:
        query = SQL("""
            with products as (
                select product.site_id,
                    product.name,
                    product.created_timestamp as date,
                    product.processor_id,
                    product.product_type_id,
                    product.full_path,
                    product.tiles
                from product
                where product.site_id = {}
                    and product.product_type_id = 3
            )
            select products.date,
                    products.tiles,
                    products.full_path
            from products
            where date between {} and {}
            order by date;
            """)

        site_id_filter = Literal(site_id)
        start_date_filter = Literal(config.season_start)
        end_date_filter = Literal(config.season_end)
        query = query.format(site_id_filter, start_date_filter,
                             end_date_filter)
        # print(query.as_string(conn))
        cursor.execute(query)

        results = cursor.fetchall()
        conn.commit()

        products = []
        for (dt, tiles, full_path) in results:
            for tile in tiles:
                ndviTilePath = os.path.join(full_path, "TILES")
                acq_date = dt.strftime("%Y%m%dT%H%M%S")
                ndviTilePath = os.path.join(
                    ndviTilePath, "S2AGRI_L3B_A{}_T{}".format(acq_date, tile))
                ndviTilePath = os.path.join(ndviTilePath, "IMG_DATA")
                ndviTilePath = os.path.join(
                    ndviTilePath,
                    "S2AGRI_L3B_SNDVI_A{}_T{}.TIF".format(acq_date, tile))
                products.append(NdviProduct(dt, tile, ndviTilePath))
                #if not os.path.exists(ndviTilePath) :
                #    print ("FILE DOES NOT EXISTS: ".format(ndviTilePath))

        return products
Beispiel #27
0
def set_interface(connection, component: str, consumers: List[ConsumerRecord], producers: List[ProducerRecord]) -> None:
    _guarantee_consumer_uniqueness(consumers)
    _guarantee_producer_uniqueness(producers)

    consumers_for_db = [
        (c.sub_component, c.interface_host, c.interface_type, c.primary, c.secondary, c.tertiary, c.optional)
        for c
        in consumers
    ]
    producers_for_db = [
        (p.sub_component, p.interface_host, p.interface_type, p.primary, p.secondary, p.tertiary, p.deprecated)
        for p
        in producers
    ]

    try:
        sql_delete_consumers = SQL(SQL_DELETE_CONSUMERS).format(component=Literal(component))
        sql_delete_producers = SQL(SQL_DELETE_PRODUCERS).format(component=Literal(component))
        sql_insert_consumers = SQL(SQL_INSERT_CONSUMERS).format(component=Literal(component))
        sql_insert_producers = SQL(SQL_INSERT_PRODUCERS).format(component=Literal(component))

        with connection.cursor() as cursor:
            # delete consumers before deleting producers
            cursor.execute(SQL_LOCK_EXCLUSIVE_CONSUMERS)
            if consumers_for_db:
                execute_values(
                    cursor,
                    sql_delete_consumers + SQL(SQL_CONSUMERS_NOT_IN),
                    consumers_for_db,
                    page_size=len(consumers_for_db),  # sending chunkwise would delete all elements not in chunk
                )
            else:
                cursor.execute(sql_delete_consumers)
            if producers_for_db:
                execute_values(
                    cursor,
                    sql_delete_producers + SQL(SQL_PRODUCERS_NOT_IN),
                    producers_for_db,
                    page_size=len(producers_for_db),  # sending chunkwise would delete all elements not in chunk
                )
            else:
                cursor.execute(sql_delete_producers)
            # insert producers before inserting consumers
            execute_values(cursor, sql_insert_producers, producers_for_db)
            execute_values(cursor, sql_insert_consumers, consumers_for_db)
        connection.commit()
    except UniqueViolation as e:
        raise InterfaceEntryDuplication(f'The interface specification contains one value multiple times: {e}')
    except RaiseException as e:
        raise InterfaceEntryConflict(f'Error: {e.pgerror.splitlines()[0]}')
Beispiel #28
0
def allAgesGeneralSUD(logger):
    '''

    Finds percentage of the total sample that has any SUD and more than 2 SUD

    Decorators:
        lD.log

    Arguments:
        logger {logging.Logger} -- logs error information
    '''
    try:

        countDict = {"any_sud": [], "morethan2_sud": []}

        # Find number of users in each race who have any SUD
        any_sud = []
        for race in table2_config["inputs"]["races"]:
            query = SQL('''
            WITH subQ AS (
            SELECT *
            FROM
                tejas.sud_race_age
            WHERE
                sud_race_age.race = {}
            )
            SELECT count(*) FROM subQ
            ''').format(Literal(race))
            data = [d[0] for d in pgIO.getAllData(query)]
            countDict["any_sud"].append(data[0])

        # Find number of users in each race who have >2 SUD
        count = {"AA": 0, "NHPI": 0, "MR": 0}

        for race in table2_config["inputs"]["races"]:
            query = SQL('''
            SELECT alc, cannabis, amphe, halluc, nicotin, cocaine,
            opioids, sedate, others, polysub, inhalant
            FROM tejas.sud_race_age
            WHERE sud_race_age.race = {}
            ''').format(Literal(race))
            data = pgIO.getAllData(query)
            for tuple in data:
                if sum(list(tuple)) >= 2:
                    count[race] += 1
        for race in count:
            countDict["morethan2_sud"].append(count[race])
    except Exception as e:
        logger.error('Cannot find general SUD counts because of {}'.format(e))
    return countDict
def insert_account(cursor, account):
    q = SQL("INSERT INTO passwords.account (name) SELECT {0} WHERE NOT "
            "EXISTS (SELECT * FROM passwords.account WHERE name = {1})"
            "RETURNING acct_id").format(Literal(account), Literal(account))
    cursor.execute(q)
    r = cursor.fetchone()
    if r:
        return r[0]
    else:
        cursor.execute(SQL("SELECT acct_id FROM passwords.account "
                           "WHERE name={0}").format(Literal(account)))
        r = cursor.fetchone()
        if r:
            return r[0]
Beispiel #30
0
    def _business_logic(request_data: dict) -> list:
        # By this point, our award_id has been validated and cleaned up by
        # TinyShield.  We will either have an internal award id that is an
        # integer or a generated award id that is a string.
        award_id = request_data["award_id"]
        award_id_column = "id" if type(award_id) is int else "generated_unique_award_id"

        sql = FUNDING_SQL.format(
            award_id_column=Identifier(award_id_column),
            award_id=Literal(award_id),
            order_by=build_composable_order_by(SORTABLE_COLUMNS[request_data["sort"]], request_data["order"]),
            limit=Literal(request_data["limit"] + 1),
            offset=Literal((request_data["page"] - 1) * request_data["limit"]),
        )
        return execute_sql_to_ordered_dictionary(sql)