示例#1
0
文件: drop_data.py 项目: alkadis/vcv
def main():
    parser = create_parser(description=__doc__, use_instance=False)
    parser.add_argument('-f',
                        dest='force',
                        default=False,
                        action='store_true',
                        help="force deletion without asking for confirmation")
    args = parser.parse_args()

    if not args.force:
        input = raw_input('Delete all data? No backup will be done! '
                          'If so type "yes": ')
        if input != 'yes':
            print 'Answer not "yes", but: "%s"\nAborting.' % input
            exit(1)

    config = config_from_args(args)
    engine = get_engine(config, echo=True)
    conn = engine.connect()

    # the transaction only applies if the DB supports
    # transactional DDL, i.e. Postgresql, MS SQL Server
    trans = conn.begin()

    inspector = reflection.Inspector.from_engine(engine)

    # gather all data first before dropping anything.
    # some DBs lock after things have been dropped in
    # a transaction.

    metadata = MetaData()

    tbs = []
    all_fks = []

    for table_name in inspector.get_table_names():
        fks = []
        for fk in inspector.get_foreign_keys(table_name):
            if not fk['name']:
                continue
            fks.append(ForeignKeyConstraint((), (), name=fk['name']))
        t = Table(table_name, metadata, *fks)
        tbs.append(t)
        all_fks.extend(fks)

    for fkc in all_fks:
        conn.execute(DropConstraint(fkc))

    for table in tbs:
        conn.execute(DropTable(table))

    trans.commit()
示例#2
0
def dump_xlsx_to_sql():
    engine = get_engine('data_analysis_project', 'root', 'Bohemianrhapsody@14')
    df1 = read_excel_data("questions_data.xlsx")
    df2 = read_excel_data("answers_data.xlsx")

    # converting unix epoch time to normal date time format
    df1['creationDate'] = pd.to_datetime(df1['creationDate'], unit='ms')
    df2['creationDate'] = pd.to_datetime(df2['creationDate'], unit='ms')
    # print(df1['creationDate'])
    # print(df2['creationDate'])

    df1.to_sql(con=engine, name='question_data', if_exists='replace')
    df2.to_sql(con=engine, name='answer_data', if_exists='replace')
示例#3
0
def main():
    parser = create_parser(description=__doc__, use_instance=False)
    parser.add_argument('-f', dest='force', default=False, action='store_true',
                        help="force deletion without asking for confirmation")
    args = parser.parse_args()

    if not args.force:
        input = raw_input('Delete all data? No backup will be done! '
                          'If so type "yes": ')
        if input != 'yes':
            print 'Answer not "yes", but: "%s"\nAborting.' % input
            exit(1)

    config = config_from_args(args)
    engine = get_engine(config, echo=True)
    conn = engine.connect()

    # the transaction only applies if the DB supports
    # transactional DDL, i.e. Postgresql, MS SQL Server
    trans = conn.begin()

    inspector = reflection.Inspector.from_engine(engine)

    # gather all data first before dropping anything.
    # some DBs lock after things have been dropped in
    # a transaction.

    metadata = MetaData()

    tbs = []
    all_fks = []

    for table_name in inspector.get_table_names():
        fks = []
        for fk in inspector.get_foreign_keys(table_name):
            if not fk['name']:
                continue
            fks.append(
                ForeignKeyConstraint((), (), name=fk['name'])
                )
        t = Table(table_name, metadata, *fks)
        tbs.append(t)
        all_fks.extend(fks)

    for fkc in all_fks:
        conn.execute(DropConstraint(fkc))

    for table in tbs:
        conn.execute(DropTable(table))

    trans.commit()
示例#4
0
def update():
    engine = get_engine()
    q = engine.query(QUERY).next()
    if not len(q.keys()):
        return
    next = q.values().pop()
    year, num = next.split('-')
    years = range(int(year), datetime.now().year+1)
    offsets = [int(num)] + ([0] * len(years))
    print "CONTINUING", zip(years, offsets)
    for year, offset in zip(years, offsets):
        make_session()
        all_entries(year, offset=offset)
        for paths in traverse_local([year], offset):
            parse_tender(engine, paths)
示例#5
0
def main():
    parser = create_parser(description=__doc__, use_instance=False)
    parser.add_argument('--dump',
                        default=None,
                        required=True,
                        help="Path to the SQL dump file.")
    args = parser.parse_args()

    # check and cleanup dump file
    dump_path = os.path.join(os.getcwd(), args.dump)
    if not os.path.exists(dump_path):
        parser.error('SQL dump file "%s" does not exist.' % args.dump)

    # get an engine to get the driver type and connection details.
    engine = get_engine(config_from_args(args))
    drivername = engine.url.drivername

    error = False
    if drivername == 'postgresql':
        # use the psql command line script for imports.
        # pg_dump by default emits COPY ... FROM STDIN statements
        # which the psycopg2 driver can't handle.
        # pg_dump can emit inserts (--inserts), but that's
        # dead slow to import.
        vars = engine.url.__dict__.copy()
        vars['dump_path'] = dump_path
        command = ('psql -U {username} -h {host} -p {port} -'
                   'd {database} -f {dump_path}').format(**vars)
        print 'Executing command: %s' % command
        if engine.url.password is not None:
            print 'Prefixing it with PGPASSWORD="******"'
            command = 'PGPASSWORD="******" ' % (engine.url.password, command)

        error = subprocess.call(command, shell=True)
    else:
        print(
            'Action for driver "%s" is not defined.\n'
            "Note: sqlite3 has a non-standard executescript() method.")
        exit(1)

    if error:
        print 'Process exited with Error: %s' % error
        exit(error)
def main():
    parser = create_parser(description=__doc__, use_instance=False)
    parser.add_argument('--dump', default=None, required=True,
                        help="Path to the SQL dump file.")
    args = parser.parse_args()

    # check and cleanup dump file
    dump_path = os.path.join(os.getcwd(), args.dump)
    if not os.path.exists(dump_path):
        parser.error('SQL dump file "%s" does not exist.' % args.dump)

    # get an engine to get the driver type and connection details.
    engine = get_engine(config_from_args(args))
    drivername = engine.url.drivername

    error = False
    if drivername == 'postgresql':
        # use the psql command line script for imports.
        # pg_dump by default emits COPY ... FROM STDIN statements
        # which the psycopg2 driver can't handle.
        # pg_dump can emit inserts (--inserts), but that's
        # dead slow to import.
        vars = engine.url.__dict__.copy()
        vars['dump_path'] = dump_path
        command = ('psql -U {username} -h {host} -p {port} -'
                   'd {database} -f {dump_path}').format(**vars)
        print 'Executing command: %s' % command
        if engine.url.password is not None:
            print 'Prefixing it with PGPASSWORD="******"'
            command = 'PGPASSWORD="******" ' % (engine.url.password, command)

        error = subprocess.call(command, shell=True)
    else:
        print ('Action for driver "%s" is not defined.\n'
               "Note: sqlite3 has a non-standard executescript() method.")
        exit(1)

    if error:
        print 'Process exited with Error: %s' % error
        exit(error)
                        '--columns',
                        nargs='+',
                        dest='columns',
                        default=get_default('columns', False),
                        help='List of columns to display')
    parser.add_argument('--db_table',
                        default=test_table.name,
                        help='Table used to read/write data')
    args = parser.parse_args()

    engine = get_engine(db_host=args.host,
                        db_port=args.port,
                        db_user=args.db_user,
                        db_password=args.db_password,
                        db_name=args.db_name,
                        is_mysql=args.mysql,
                        is_postgresql=args.postgresql,
                        tls_ca=args.tls_root_cert,
                        tls_key=args.tls_key,
                        tls_crt=args.tls_cert,
                        sslmode=args.ssl_mode,
                        verbose=args.verbose)
    connection = engine.connect()
    metadata.create_all(engine)

    if args.print:
        print_data(connection, args.columns, table_map[args.db_table])
    elif args.data:
        write_data(args.data, connection, table_map[args.db_table])
    else:
        print('Use --print or --data options')
        exit(1)
示例#8
0
from common import get_engine, get_output_dir, list_countries
from dataset import freeze
import os
import sqlalchemy.sql.expression as sql

engine = get_engine()
document = engine['document'].table.alias('document')
cpv = engine['document_cpv'].table.alias('cpv')
awards = engine['awards'].table.alias('awards')


def documents_query():
    fo = document.join(cpv, document.c.uri == cpv.c.document_uri)
    year = sql.func.substr(document.c.publication_date, 7).label('year')
    cpvs = sql.func.array_to_string(sql.func.array_agg(cpv.c.code),
                                    ';').label('document_cpvs')
    q = sql.select([document, year, cpvs], from_obj=fo, use_labels=True)
    q = q.group_by(*list(document.columns))
    return q


def awards_query():
    fo = awards.join(document, document.c.uri == awards.c.uri)
    fo = fo.join(cpv, document.c.uri == cpv.c.document_uri)
    year = sql.func.substr(document.c.publication_date, 7).label('year')
    cpvs = sql.func.array_to_string(sql.func.array_agg(cpv.c.code),
                                    ';').label('document_cpvs')
    q = sql.select([awards, year, document, cpvs],
                   from_obj=fo,
                   use_labels=True)
    q = q.group_by(*list(awards.columns) + list(document.columns))
示例#9
0
import dataset
from common import get_engine

engine = get_engine()
table = engine['articles']

if __name__ == '__main__':
    q = engine.query("SELECT url, news_org, summary, content, title FROM articles")
    dataset.freeze(q, filename='articles.csv')
示例#10
0
                           2, 0, 1).reshape(1, 3, 160, 320)
    input = np.array(image, dtype=np.float32, order='C') / 255
    output = np.ones((51200))
    flat = (output > 0.4).astype(np.uint8)
    im3 = flat.reshape(160, 320)
    print('np sum', np.sum(flat))
t1 = time.time()
baselap = t1 - t0
i = i - 2
fps = i / baselap
print("Base: ", i, baselap, fps)

# Do inference with TensorRT
trt_outputs = []
with common.get_engine(
        onnx_file_path, engine_file_path
) as engine, engine.create_execution_context() as context:
    inputs, outputs, bindings, stream = common.allocate_buffers(engine)
    # Do inference
    print('Running inference')

    i = 0
    for filename in os.listdir(in_folder):
        i = i + 1
        if i == 2:
            t0 = time.time()
        # Set host input to the image. The common.do_inference function will copy the input to the GPU before executing.
        image = cv2.imread(os.path.join(in_folder, filename))
        image = cv2.resize(image,
                           None,
                           fx=0.5,