Beispiel #1
0
def ragged(cursor, **kwargs):
    """Output a result set that may be 'ragged', aka non-tabular, for example
       the results of a 'SELECT * FROM ..' query."""
    if cursor.rowcount == 0:
        return

    batchsize = 100

    # Scan up to the first 'batchsize' count of rows and collect up all fields
    # that we see, these will be emitted as actual columns, anything else that
    # we see for the remainder of the read will be collected into a single
    # "_extra" column
    count = min(batchsize, cursor.rowcount)
    header = []  # Set of all fields seen in preview window
    rows = []
    for i in xrange(count):
        row = cursor.fetchone()
        fields = [item[0] for item in cursor.description]
        for field in fields:
            if field not in header:
                header.append(field)
        rows.append(dict(zip(fields, convert(row))))
    header.append("_extra")

    writer = csv.DictWriter(output, header)
    writer.writer.writerow(header)

    # Output the preview rows
    for row in rows:
        writer.writerow(row)

    # Read and output the remaining rows, collecting new fields into _extra
    while True:
        # UNDONE: Python CQL driver doesn't return None when there are no
        # more rows, instead it happily raises IndexError, so add the
        # following hack internal check here.
        if cursor.rs_idx >= len(cursor.result): break

        row = cursor.fetchone()
        if row is None: break
        row = convert(row)

        result = {}
        fields = [item[0] for item in cursor.description]
        for i in xrange(len(row)):
            field = fields[i]
            if field not in header:
                continue  # UNDONE: Need to append to _extra
            result[field] = row[i]
        writer.writerow(result)
def ragged(cursor, **kwargs):
    """Output a result set that may be 'ragged', aka non-tabular, for example
       the results of a 'SELECT * FROM ..' query."""
    if cursor.rowcount == 0: 
        return

    batchsize = 100

    # Scan up to the first 'batchsize' count of rows and collect up all fields 
    # that we see, these will be emitted as actual columns, anything else that
    # we see for the remainder of the read will be collected into a single 
    # "_extra" column
    count = min(batchsize, cursor.rowcount)
    header = [] # Set of all fields seen in preview window
    rows = []
    for i in xrange(count):
        row = cursor.fetchone()
        fields = [item[0] for item in cursor.description]
        for field in fields:
            if field not in header:
                header.append(field)
        rows.append(dict(zip(fields, convert(row))))
    header.append("_extra")

    writer = csv.DictWriter(output, header)
    writer.writer.writerow(header)

    # Output the preview rows
    for row in rows: 
        writer.writerow(row)

    # Read and output the remaining rows, collecting new fields into _extra
    while True:
        # UNDONE: Python CQL driver doesn't return None when there are no
        # more rows, instead it happily raises IndexError, so add the
        # following hack internal check here.
        if cursor.rs_idx >= len(cursor.result): break

        row = cursor.fetchone()
        if row is None: break
        row = convert(row)

        result = {}
        fields = [item[0] for item in cursor.description]
        for i in xrange(len(row)):
            field = fields[i]
            if field not in header:
                continue # UNDONE: Need to append to _extra
            result[field] = row[i]
        writer.writerow(result)
Beispiel #3
0
def normal(cursor, **kwargs):
    """Output the results of a 'regular' query, consisting of either a
       rectangular result set or scalar result, or nothing."""
    if cursor.description is None:
        return  # No result (for example, a DDL query)

    writer = csv.writer(output)

    # The following is a bizarre way to check for a scalar result, but is
    # the same test used in `cqlsh` and I dont see another way to check in
    # the docs.
    if cursor.description is cql.cursor._COUNT_DESCRIPTION:
        writer.writerow(["Result"])
        writer.writerow(cursor.fetchone())
    else:
        cursor.arraysize = 100  # UNDONE: Paramaterize
        fields = [item[0] for item in cursor.description]
        writer.writerow(fields)
        while True:
            rows = cursor.fetchmany()
            if len(rows) == 0: break
            for row in rows:
                writer.writerow(convert(row))
def normal(cursor, **kwargs):
    """Output the results of a 'regular' query, consisting of either a
       rectangular result set or scalar result, or nothing."""
    if cursor.description is None: 
        return # No result (for example, a DDL query)

    writer = csv.writer(output)

    # The following is a bizarre way to check for a scalar result, but is
    # the same test used in `cqlsh` and I dont see another way to check in
    # the docs.
    if cursor.description is cql.cursor._COUNT_DESCRIPTION:
        writer.writerow(["Result"])
        writer.writerow(cursor.fetchone())
    else:    
        cursor.arraysize = 100 # UNDONE: Paramaterize
        fields = [item[0] for item in cursor.description]
        writer.writerow(fields)
        while True:
            rows = cursor.fetchmany()
            if len(rows) == 0: break
            for row in rows: 
                writer.writerow(convert(row))