Пример #1
0
Файл: db.py Проект: FMCorz/mdk
    def __init__(self, engine, options):

        self.engine = engine
        self.options = options

        if engine in ('mysqli', 'mariadb'):

            if 'fuckfred' in options['passwd']:
                raise Exception('Could not establish connexion with MySQL, bad language used!')

            self.conn = mysql.connect(
                host=options['host'],
                port=int(options['port']),
                user=options['user'],
                passwd=options['passwd'],
                db=''
            )
            self.cur = self.conn.cursor()

        elif engine == 'pgsql':
            # psycopg2.
            self.conn = pgsql.connect(
                host=str(options['host']),
                port=int(options['port']),
                user=str(options['user']),
                password=str(options['passwd'])
            )
            try:
                self.cur = self.conn.cursor()
            except:
                raise Exception('Connexion failed! Make sure the database \'%s\' exists.' % str(options['user']))

        elif engine == 'sqlsrv':
            # pyodbc.
            host = str(options['host'])
            port = int(options['port'])
            user = str(options['user'])
            password = str(options['passwd'])

            # Look for installed ODBC Driver for SQL Server.
            drivers = pyodbc.drivers()
            sqlsrvdriver = next((driver for driver in drivers if "for SQL Server" in driver), None)
            if sqlsrvdriver is None:
                installurl = 'https://sqlchoice.azurewebsites.net/en-us/sql-server/developer-get-started/python'
                raise Exception("You need to install an ODBC Driver for SQL Server. Check out %s for more info." % installurl)

            logging.debug('Using %s' % sqlsrvdriver)

            connectionstr = "DRIVER=%s;SERVER=%s;PORT=%d;UID=%s;PWD=%s" \
                            % (sqlsrvdriver, host, port, user, password)
            self.conn = pyodbc.connect(connectionstr)
            self.conn.autocommit = True
            self.cur = self.conn.cursor()

        else:
            raise Exception('DB engine %s not supported' % engine)
Пример #2
0
 def _get_driver(self, ctype):
     # Given a list of possible drivers for this ctype, look to find
     # a match in the pyodbc.drivvers() list.  If a match is found,
     # return it.  Otherwise (arbitrarily) return the first one.  If
     # the ctype is not known, return None.
     drivers = self._drivers.get(ctype,[])
     for driver in drivers:
         if driver in pyodbc.drivers():
             return driver
     if drivers:
         return drivers[0]
     else:
         return None
Пример #3
0
    else:
        where_to_date = ''

    where_stmt.extend([where_from_date, where_to_date])
    where_lst = [i for i in where_stmt if len(i) > 0]
    if len(where_lst) == 0:
        where_lst = None
    return where_lst, temp_where




##########################################
### tests

print(pyodbc.drivers())

sp1 = mssql.rd_sql(pl_server, pl_db, spatial_table, ['ManagementGroupID', 'SpatialUnitID', 'SpatialUnitName', 'GeoWKT', 'SurfaceWater', 'Groundwater'], username='******', password='******')

print(sp1)
#print('success')

#driver1 = '?driver=ODBC Driver 17 for SQL Server'
#eng_str = 'mssql+pyodbc://' + up + server + '/' + database + driver1
#eng_str1 = "mssql+pyodbc://reader1:reader1@edwdev01/PlanLimits?driver=ODBC Driver 17 for SQL Server"
#engine = sqlalchemy.create_engine(eng_str1)
#engine.connect()

print('success')

Пример #4
0
 def _get_driver(self):
     return pyodbc.drivers()[-1].replace(" ", "+")
Пример #5
0
    def _default_driver(self):
        drivers = pyodbc.drivers()
        if drivers:
            return drivers[0]

        return False
Пример #6
0
def get_handler(
    imstype,
    datasource,
    url=None,
    host=None,
    port=None,
    options={},
    verifySSL=None,
    auth=None,
):
    accepted_imstypes = ["pi", "aspen", "ip21", "piwebapi", "aspenone"]

    if not imstype or imstype.lower() not in accepted_imstypes:
        raise ValueError(f"`imstype` must be one of {accepted_imstypes}")

    if imstype.lower() == "pi":
        if "PI ODBC Driver" not in pyodbc.drivers():
            raise RuntimeError(
                "No PI ODBC driver detected. "
                "Either switch to Web API ('piweb') or install appropriate driver."
            )
        if host is None:
            hostport = get_server_address_pi(datasource)
            if not hostport:
                raise ValueError(
                    f"Unable to locate data source '{datasource}'."
                    "Do you have correct permissions?"
                )
            host, port = hostport
        if port is None:
            port = 5450
        return PIHandlerODBC(host=host, port=port, options=options)

    if imstype.lower() in ["aspen", "ip21"]:
        if "AspenTech SQLplus" not in pyodbc.drivers():
            raise RuntimeError(
                "No Aspen SQLplus ODBC driver detected. Either switch to Web API "
                "('aspenweb') or install appropriate driver."
            )
        if host is None:
            hostport = get_server_address_aspen(datasource)
            if not hostport:
                raise ValueError(
                    f"Unable to locate data source '{datasource}'."
                    "Do you have correct permissions?"
                )
            host, port = hostport
        if port is None:
            port = 10014
        return AspenHandlerODBC(host=host, port=port, options=options)

    if imstype.lower() == "piwebapi":
        return PIHandlerWeb(
            url=url,
            datasource=datasource,
            options=options,
            verifySSL=verifySSL,
            auth=auth,
        )

    if imstype.lower() in ["aspenone"]:
        return AspenHandlerWeb(
            datasource=datasource,
            url=url,
            options=options,
            verifySSL=verifySSL,
            auth=auth,
        )
Пример #7
0
"""
Django settings for website project.

Generated by 'django-admin startproject' using Django 2.1.7.

For more information on this file, see
https://docs.djangoproject.com/en/2.1/topics/settings/

For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.1/ref/settings/
"""

import os

import pyodbc
pyodbc.drivers()

#import pymysql
#pymysql.install_as_MySQLdb()

# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..')

# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.1/howto/deployment/checklist/

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'k3^^!cto+o17@vqlz%jmil*awakpwpi&m_-1aqb(lm_7mqpkbg'

# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
Пример #8
0
            parts = line.replace('\\', '').replace('\'', '"').split(':')
            parsed += '{} "{}":{}'.format(comma, parts[0].lstrip(), parts[1])
            comma = ','

    string = "[{}]".format(parsed[0:-1])
    j = json.loads(string)
    label_map = {}
    for entry in j:
        if 'display_name' in entry:
            label_map[entry['id']] = entry['display_name']
        else:
            label_map[entry['id']] = entry['name']
    return label_map


if 'Microsoft Access Driver (*.mdb, *.accdb)' not in pyodbc.drivers():
    print('No Microsoft Access Driver found.')
    sys.exit(0)

print(
    '----------------------------------------------------------------------------'
)
print(
    'DO NOT RUN THIS SCRIPT ON YOUR MAIN DATABASE WITHOUT CREATING A BACKUP FIRST!'
)
print(
    '----------------------------------------------------------------------------'
)
print()
print()
print('Example database path: c:\\database\\import-test.accdb')
Пример #9
0
 def createConn(self):
     try:
         odbc_driver = ['ODBC', 'SQL Server Native', 'SQL Native', 'SQL Server']
         driver = next(filter(lambda source: any(filter(lambda prefix: source.startswith(prefix),odbc_driver)), pyodbc.drivers()))
         cnxn = pyodbc.connect("Driver={"+ driver +"};"
                               "Server="+ self.server + ";"
                               "Database="+ self.database + ";"
                               "UID=" + self.uid+ ";"
                               "PWD=" + self.password+ ";")
         return cnxn
     except (pyodbc.OperationalError) as e:
         minor.Configure.logger.info("Error {0}".format(str(e, encoding = 'utf-8')))
         return None
Пример #10
0
def connect():
    import pyodbc
    print("Supported Drivers: {}".format(pyodbc.drivers()))
    connection=pyodbc.connect("Driver={SQL Server};Server=localhost;Database=master;UID=test;PwD=test")
    cursor=connection.cursor()
    return cursor
Пример #11
0
import pyodbc as pdb
drivers = [item for item in pdb.drivers()]
driver = drivers[-1]
print("driver:{}".format(driver))
server = 'localhost'
database = 'testdb'
uid = 'sa'
pwd = '3141592654Pi'
con_string = f'DRIVER={driver}; SERVER={server}; DATABASE={database}; UID={uid}; PWD={pwd}'
print(con_string)
Пример #12
0
import pyodbc
import pandas as pd
print(pyodbc.drivers())
conn = pyodbc.connect('DRIVER={MySQL ODBC 8.0 Unicode Driver};SERVER=localhost;PORT=3306;DADABASE=uportal;UID=root;PWD=lazio_2000')
cursor = conn.cursor()


def get_bad_info(tcp_id):
    # SQL must with dbName.table
    cursor.execute("select * from uportal.t_center_travelapply where id='{}'".format(tcp_id))
    rows = cursor.fetchall()
    for row in rows:
        print(row)
    sql = "select * from uportal.T_CENTER_OAATTACHMENT where refId='{}'".format(tcp_id)
    cursor.execute(sql)
    rows = cursor.fetchall()
    for row in rows:
        print(row)
        path = row[3]
        print(path)
        full_path ='D:/oa_attachment/tcp'+path
        print(full_path)
        import xlrd
        book = xlrd.open_workbook(full_path)
        print("The number of worksheets is {0}".format(book.nsheets))
        print("Worksheet name(s): {0}".format(book.sheet_names()))
        sh = book.sheet_by_index(0)
        print("{0} {1} {2}".format(sh.name, sh.nrows, sh.ncols))
        for rx in range(sh.nrows):
            print(sh.row(rx))
        df = pd.read_excel(full_path)
Пример #13
0
        :param rowid: (id) id da linha que se deseja remover.
        """
        query = 'DELETE FROM table_name WHERE user_id = ?;'
        try:
            self.cur.execute(query, (rowid, ))
        except Exception as e:
            self.con.rollback()
            print('\n[x] Falha ao remover registro [x]')
            print(f'[x] Revertendo operação (rollback) [x]: {e}\n')
        else:
            self.con.commit()
            print('\n[!] Registro removido com sucesso [!]')


if __name__ == '__main__':
    print('Driver(s) localizado(s):', drivers())
    # Criando a conexão com o banco.
    database = ConnectDB()

    user = ('Felipe', 35, 'Masculino')
    # Inserindo um registro tabela.
    database.insert_row(data=user)

    users_list = [
        ('Fernanda', 20, 'Feminino'),
        ('João', 50, 'Masculino'),
    ]
    # Inserindo vários registros na tabela.
    database.insert_rows(data=users_list)

    users_tuple = (
Пример #14
0
 def getDrivers(self):
     return pyodbc.drivers()
Пример #15
0
    def __init__(self, engine, options):

        self.engine = engine
        self.options = options

        if engine in ('mysqli', 'mariadb'):
            import MySQLdb as mysql

            if 'fuckfred' in options['passwd']:
                raise Exception(
                    'Could not establish connexion with MySQL, bad language used!'
                )

            self.conn = mysql.connect(host=options['host'],
                                      port=int(options['port']),
                                      user=options['user'],
                                      passwd=options['passwd'],
                                      db='')
            self.cur = self.conn.cursor()

        elif engine == 'pgsql':
            import psycopg2 as pgsql

            self.conn = pgsql.connect(host=str(options['host']),
                                      port=int(options['port']),
                                      user=str(options['user']),
                                      password=str(options['passwd']))
            try:
                self.cur = self.conn.cursor()
            except:
                raise Exception(
                    'Connexion failed! Make sure the database \'%s\' exists.' %
                    str(options['user']))

        elif engine == 'sqlsrv':
            import pyodbc

            host = str(options['host'])
            port = int(options['port'])
            user = str(options['user'])
            password = str(options['passwd'])

            # Look for installed ODBC Driver for SQL Server.
            drivers = pyodbc.drivers()
            sqlsrvdriver = next(
                (driver for driver in drivers if "for SQL Server" in driver),
                None)
            if sqlsrvdriver is None:
                installurl = 'https://sqlchoice.azurewebsites.net/en-us/sql-server/developer-get-started/python'
                raise Exception(
                    "You need to install an ODBC Driver for SQL Server. Check out %s for more info."
                    % installurl)

            logging.debug('Using %s' % sqlsrvdriver)

            connectionstr = "DRIVER=%s;SERVER=%s;PORT=%d;UID=%s;PWD=%s" \
                            % (sqlsrvdriver, host, port, user, password)
            self.conn = pyodbc.connect(connectionstr)
            self.conn.autocommit = True
            self.cur = self.conn.cursor()

        else:
            raise Exception('DB engine %s not supported' % engine)
Пример #16
0
def main(working_dir_, source_date_):
    """
    The parametrized main function for CLI in the cloud
    """
    #
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-arguments
    #    cwd = os.path.realpath(os.path.dirname(__file__)) #os.getcwd() # ./
    working_dir = working_dir_
    date_ = source_date_
    database = 'qai'
    server = 'cd5m7wkqacpdeus2mia12301.public.dabc3424290b.database.windows.net,3342'
    username = '******'
    password = '******'
    #Authentication: SQL Server Authentication
    # NOTE: The following works on a Mac with the MSSQL 13 driver installed - it is here as the
    # default because Art's Anaconda environment doesn't show a non-empty list of drivers from
    # pyodbc
    driver = '/usr/local/lib/libmsodbcsql.13.dylib'  # '{ODBC Driver 13 for SQL Server}'
    drivers = [item for item in pyodbc.drivers()]
    if drivers:
        driver = drivers[0]
    #print('driver:{}'.format(driver))
    #
    cnxn = pyodbc.connect('DRIVER=' + driver + ';SERVER=' + server +
                          ';PORT=1433;DATABASE=' + database + ';UID=' +
                          username + ';PWD=' + password)
    cursor_ = cnxn.cursor()

    print('\n\ndownloading corporate events data ... ', datetime.now())
    print('\n\nprocessing ...', datetime.now())
    query = '''SELECT DISTINCT
        A.SecCode, A.NAME, 
      --  C.ActionTypeCode,
        P.Desc_,
        C.NumNewShares,
        C.NumOldShares,
        FORMAT (C.AnnouncedDate, 'd', 'en-us') as AnnouncedDate,
        FORMAT (C.RecordDate, 'd', 'en-us') as RecordDate,
        FORMAT (C.EffectiveDate, 'd', 'en-us') as EffectiveDate,        
        FORMAT (C.ExpiryDate, 'd', 'en-us') as ExpiryDate,  
        C.OfferCmpyName, 
        C.CashAmt,
        D.RIC,
        K.TICKER,
        C.EffectiveDate  

        FROM SecMSTRX      A
        
        JOIN SecMapX       B
            ON A.SECCODE = B.SECCODE
            AND A.TYPE_ = 1
            AND B.VENTYPE = 33
        
        JOIN DS2CapEvent    C
            ON B.Vencode = C.InfoCode
            
        JOIN DS2xRef P on c.ActionTypeCode = P.code
                
        JOIN    RDCSecMapX  M
            ON  A.SecCode = M.SecCode
            
        JOIN    RDCQuoteInfo    K
            ON      M.VenCode = K.QuoteID
            
        JOIN    RDCRICData      D
            ON      K.QuoteID = D.QuoteID
                   
        WHERE   
            EffectiveDate <= datediff(d, 0, getdate())
            AND  
            EffectiveDate >=\'''' + date_ + ''''
            
        ORDER BY C.EffectiveDate DESC
'''
    with open(os.path.join(working_dir, 'query_corporate_events.txt'),
              'w') as query_file:
        query_file.write(query)
        keep_trying_to_query = True
        result = None
        # the query might fail because the computer got moved to a different location,
        # which resulted in IP change; in this case, try to re-open the connection, then re-do the query
        while keep_trying_to_query:
            try:
                print(
                    '\n\ntrying to execute cursor_.execute(COMPOSED_query)...',
                    datetime.now())
                cursor_.execute(query)
                try:
                    print(
                        '\n\ntrying to execute result = cursor_.fetchall()...',
                        datetime.now())
                    result = cursor_.fetchall()
                    keep_trying_to_query = False
                except Exception as err:
                    try:
                        print(
                            '\n\nexception #5 for cursor_.execute(COMPOSED_query)',
                            err, datetime.now())
                        print(
                            '\n\nexception #6 for result = cursor_.fetchall()',
                            err, datetime.now())
                        cursor_.close()
                        cnxn.close()
                        print("\n\nre-opening server connection...",
                              datetime.now())
                        cnxn = pyodbc.connect('DRIVER=' + driver + ';SERVER=' +
                                              server + ';PORT=1433;DATABASE=' +
                                              database + ';UID=' + username +
                                              ';PWD=' + password)
                        cursor_ = cnxn.cursor()
                    except Exception as err:
                        print('\n\nexception #7 for reconnect', err,
                              datetime.now())
            except Exception as err:
                try:
                    print(
                        '\n\nexception #8 for cursor_.execute(COMPOSED_query)',
                        err, datetime.now())
                    print('\n\nexception #9 for result = cursor_.fetchall()',
                          err, datetime.now())
                    cursor_.close()
                    cnxn.close()
                    print("\n\nre-opening server connection...",
                          datetime.now())
                    cnxn = pyodbc.connect('DRIVER=' + driver + ';SERVER=' +
                                          server + ';PORT=1433;DATABASE=' +
                                          database + ';UID=' + username +
                                          ';PWD=' + password)
                    cursor_ = cnxn.cursor()
                except Exception as err:
                    print('\n\nexception #10 for reconnect', err,
                          datetime.now())
#
        if result is not None:
            table1 = []
            table1.append(
                create_titles([
                    'EffectiveDate', 'ID', 'NAME', 'ActionTypeCode',
                    'NumNewShares', 'NumOldShares', 'AnnouncedDate',
                    'RecordDate', 'ExpiryDate', 'OfferCmpyName', 'CashAmt',
                    'RIC', 'Ticker'
                ]))
            table = []
            print("\n\nquery produced %d rows" % len(result), datetime.now())
            for row in result:
                row3 = []
                #
                #                    A.ID,  -- 0
                #           A.NAME,         -- 1
                #        C.ActionTypeCode,  -- 2
                #        C.NumNewShares,    -- 3
                #        C.NumOldShares,    -- 4
                #        C.AnnouncedDate,   -- 5
                #        C.RecordDate,      -- 6
                #        C.EffectiveDate,   -- 7
                #        C.ExpiryDate,      -- 8
                #        C.OfferCmpyName,   -- 9
                #        C.CashAmt          -- 10
                #
                row3.append(row[7])
                row3.append(int(row[0]))  # SecCode
                row3.append(row[1])  # co.name
                if row[2] is not None:
                    row3.append(row[2])  # ActionTypeCode
                else:
                    row3.append('')
                if row[3] is not None:
                    row3.append(row[3])  # Number of new shares
                else:
                    row3.append('')
                if row[4] is not None:
                    row3.append(row[4])
                else:
                    row3.append('')
                if row[5] is not None:
                    row3.append(row[5])
                else:
                    row3.append('')
                if row[6] is not None:
                    row3.append(row[6])
                else:
                    row3.append('')
                if row[8] is not None:
                    row3.append(row[8])
                else:
                    row3.append('')
                if row3 not in table:
                    table.append(row3)
                if row[9] is not None:
                    row3.append(row[9])
                else:
                    row3.append('')
                if row[10] is not None:
                    row3.append(row[10])
                else:
                    row3.append('')
                if row[11] is not None:
                    row3.append(row[11])
                else:
                    row3.append('')
                if row[12] is not None:
                    row3.append(row[12])
                else:
                    row3.append('')
                if row[11 + 2] is not None:
                    row3.append(row[13])
                else:
                    row3.append('1975-01-01')
                if row3 not in table:
                    table.append(row3)
#
            table = sorted(table, key=operator.itemgetter(13), reverse=True)
            table2 = []
            for row in table:
                table2.append(row[:-1])
            table1 += table2
            now = datetime.now()
            ofp = os.path.join(
                working_dir,
                'corporate_events_data_' + now.strftime("%Y_%m_%d") + '.csv')
            with open(ofp, 'w') as result_file:
                w_r = csv.writer(result_file, dialect='excel')
                w_r.writerows(table1)


#
    print('\n\nexiting ... ', datetime.now())
Пример #17
0
import pyodbc

DRIVER = [drvr for drvr in pyodbc.drivers()
          if drvr.startswith('ODBC Driver')][0]

SERVER = 'MSERVER56'

USER = '******'
PASSWD = 'aaa'

config = dict(server=SERVER,
              port=1433,
              database='xf',
              username=USER,
              password=PASSWD)
conn_str = (
    'SERVER={server},{port};DATABASE={database};UID={username};PWD={password}')
conn = pyodbc.connect(r'DRIVER={%s};' % DRIVER + conn_str.format(**config))
cursor = conn.cursor()


def dbquery(sql_str):
    cursor.execute(sql_str)
    return cursor.fetchall()


print "*** S&P XF DB: Connected to ", dbquery("select @@servername")
Пример #18
0
import pyodbc

for x in pyodbc.drivers():
    print(x)
print("")

config = dict(server='localhost',
              port=54320,
              database='test',
              username='******',
              password='******')

connection_str = (r"Driver=PostgreSQL Unicode(x64);"
                  r"Server={server};"
                  r"Port={port};"
                  r"Database={database};"
                  r"UID={username};"
                  r"PWD={password};")

cnxn = pyodbc.connect(connection_str.format(**config))
cursor = cnxn.cursor()
Пример #19
0
import csv
import pyodbc
import random
import numpy as np

[x for x in pyodbc.drivers() if x.startswith('GaussMPP')]
cnxn = pyodbc.connect(
    'DRIVER={GaussMPP};SERVER=localhost;PORT=26000;DATABASE=postgres;UID=omm;PWD=gauss@333'
)
crsr = cnxn.cursor()

file = "cols_insert.csv"
with open(file, "w", newline='') as csvFile:
    writer = csv.writer(csvFile, delimiter=',')
    writer.writerow([
        'keyFieldSize', 'nonKeyFieldSize', 'numOfFixedLengthField',
        'numOfVarLengthField', 'numOfRows', 'timePerRow'
    ])
csvFile.close()

file = "cols_select.csv"
with open(file, "w", newline='') as csvFile:
    writer = csv.writer(csvFile, delimiter=',')
    writer.writerow([
        'keyFieldSize', 'nonKeyFieldSize', 'numOfFixedLengthField',
        'numOfVarLengthField', 'numOfRows', 'timePerRow'
    ])
csvFile.close()

for MAX in range(0, 1000):
    print(MAX)
Пример #20
0
    if not mod_odbc_sessions:
        mod_odbc_sessions = {SESSION_DEFAULT: {}}
except NameError:
    mod_odbc_sessions = {SESSION_DEFAULT: {}}

# Get data from robot
module = GetParams("module")  # Get command executed
session = GetParams("session")  # Get Session name
if not session:
    session = SESSION_DEFAULT

try:
    if module == "get_drivers":
        var_name = GetParams("var_name")
        print(var_name)
        SetVar(var_name, {"drivers": pyodbc.drivers()})

    if module == "listDrivers":
        result = GetParams('result')
        filter_ = GetParams('filter')

        drivers = []
        for driver in pyodbc.drivers():
            if bool(filter_):
                if filter_ in driver:
                    drivers.append(driver)
            else:
                drivers.append(driver)

        SetVar(result, drivers)
Пример #21
0
response = client.get_secret_value(
    SecretId=
    'arn:aws:secretsmanager:us-west-2:399991052688:secret:rmltestsqlserver2016-46PuZx'
)
data = json.loads(response['SecretString'])

server = 'sqlserver2016.cbsebh4l1881.us-west-2.rds.amazonaws.com'
database = 'rmltest'
username = data['username']
password = data['password']

print('user name : ' + data['username'])
print('user name : ' + data['password'])

driver = sorted(pyodbc.drivers()).pop()

cnxn = pyodbc.connect('DRIVER={ODBC Driver 17 for SQL Server};SERVER=' +
                      server + ';DATABASE=' + database + ';UID=' + username +
                      ';PWD=' + password)
print(cnxn)
cursor = cnxn.cursor()

sql_select_Query = "SELECT * FROM Persons"
cursor.execute(sql_select_Query)
records = cursor.fetchall()

print(records)

#close the connection
cnxn.close()
Пример #22
0
import sqlalchemy
import psycopg2
import pyodbc
import psycopg2
import cx_Oracle

assert 'FreeTDS' in pyodbc.drivers()
assert 'ODBC Driver 17 for SQL Server' in pyodbc.drivers()

try:
    # make sure oracle manages to load tns client libraries.
    # Will fail but we want to be sure we don't fail on loading the driver
    cx_Oracle.connect()
except Exception as ex:
    assert 'ORA-12162' in str(ex)


# freetds test
engine = sqlalchemy.create_engine('mssql+pyodbc:///testuser:[email protected]:1433/TEST?driver=FreeTDS')
try:
    engine.execute('select 1 as [Result]')
except Exception as ex:
    assert "Can't open lib" not in str(ex), "Failed because of missing lib: " + str(ex)

print("All is good. All imported successfully")
Пример #23
0
from PyQt5 import QtCore, QtGui
import pyodbc
import numpy as np
import asyncio
from PyQt5.QtWidgets import QApplication, QDialog, QGridLayout, QLabel, QPushButton, QTableView, QTableWidget, QTableWidgetItem
from PyQt5.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery
from PyQt5.QtGui import QIcon, QStandardItemModel, QStandardItem
from PyQt5.QtCore import Qt

import sys
from PyQt5.QtSql import QSqlDatabase, QSqlQueryModel, QSqlQuery

from PyQt5 import QtCore, QtGui, QtWidgets

msa_drivers = [x for x in pyodbc.drivers() if 'ACCESS' in x.upper()]
print(f'MS-ACCESS Drivers : {msa_drivers}')


def createConnection():
    con_String = (
        r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'
        r'DBQ=E:\project\year\code\exe project\project v1.0\v1\filter.accdb;')

    global db
    db = QSqlDatabase.addDatabase('QODBC')

    db.setDatabaseName(con_String)

    if db.open():
        print('connect to DataBase Server successfully')
Пример #24
0
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 11 21:58:10 2020

@author: eric
Miraceti56
"""

import pyodbc

price_data = [[2.00, 3.00, 1.00, 2.40, 100.00, '01/02/2020'],
              [3.00, 3.00, 5.00, 9.40, 300.00, '02/02/2020'],
              [4.00, 2.00, 1.00, 2.40, 200.00, '03/03/2020']]

print('version SQL installée')
print()
print()

for driver in pyodbc.drivers():
    print(driver)
Пример #25
0
def main(start_date_,
         working_dir_,
         nblocks_,
         email_notification_,
         top_,
         archive=False):
    """
    The parametrized main function for CLI in the cloud
    """
    # use the following command:
    # rm -r temp/*; python test.py --top 1000 -s 2018-01-01
    #-dir ./temp/ -nblocks 100 --email-notification
    # on Mac terminal from the dir where you have test.py
    # comand line arguments; use comments below as an example
    #TOP = 10000000
    # reduce TOP value to 10 for debugging; put it to inf for a full run
    #DATE = '2017-01-01'
    # 'from' parameter for historical pricing data
    #WORKING_DIR = './refinitiv_qa_direct_qai_master_and_pricing_tables/'\
    #    +str(time.strftime("%Y-%m-%d"))+'/'
    # dir where all outputs go; it can be dated as above
    #NBLOCKS = 100
    # pricing data are very long queries; they need to be partitioned in blocks
    # as a separate project, optimize queries
    #
    #
    # pylint: disable=too-many-branches
    # pylint: disable=too-many-statements
    # pylint: disable=too-many-locals
    # pylint: disable=too-many-arguments
    top = top_
    date_from = start_date_
    nblocks = nblocks_
    cwd = os.path.realpath(os.path.dirname(__file__))  #os.getcwd() # ./
    working_dir = working_dir_
    # empty the whole working dir
    for root, dirs, files in os.walk(working_dir):
        for f_f in files:
            os.unlink(os.path.join(root, f_f))
        for d_d in dirs:
            shutil.rmtree(os.path.join(root, d_d))
    shutil.copy(os.path.join(cwd, 'master_file_joe.csv'), working_dir)
    #
    database = 'qai'
    server = 'cd5m7wkqacpdeus2mia12301.public.dabc3424290b.database.windows.net,3342'
    username = '******'
    password = '******'
    #Authentication: SQL Server Authentication
    # NOTE: The following works on a Mac with the MSSQL 13 driver installed - it is here as the
    # default because Art's Anaconda environment doesn't show a non-empty list of drivers from
    # pyodbc
    driver = '/usr/local/lib/libmsodbcsql.13.dylib'  # '{ODBC Driver 13 for SQL Server}'
    drivers = [item for item in pyodbc.drivers()]
    if drivers:
        driver = drivers[0]
    #print('driver:{}'.format(driver))
    #
    cnxn = pyodbc.connect('DRIVER=' + driver + ';SERVER=' + server +
                          ';PORT=1433;DATABASE=' + database + ';UID=' +
                          username + ';PWD=' + password)
    cursor_ = cnxn.cursor()
    refinitiv_data_n_columns = 8
    s_s = ""
    if top is not None:
        s_s = ''' TOP ''' + str(top)
    query = '''SELECT''' + s_s + '''
                        A.SecCode                           -- SecCode -- 0
              --  ,       MR1.ID
                ,       MR1.NAME            AS CURRNAME     -- current name -- 1
                ,       G1.ISSUER           AS PITISSUER    -- point-in-time name -- 2
               -- ,       G1.EXCHANGE
                ,       MR1.Country                         -- country -- 3
                ,       G1.StartDate                        -- from -- 4
                ,       G1.EndDate                          -- to -- 5
                ,       K1.TICKER                           -- ticker -- 6
             --   ,       G1.EXCHANGE
              --  ,       I.ISSUER            AS CURRENTISSUE
                --,       I.STATUS
                ,       I.SECTYPE           AS CURRSECTYPE  -- type --7

                FROM            SecMstrX                        A

                JOIN            SECMAPX             M
                                ON                  M.SECCODE = A.SECCODE
                                AND                 M.VenType = 1       -- IDC
                                AND                 TYPE_ = 1           -- NorthAmer Equity
                                AND                 M.EXCHANGE <> 2

                                -- AND     M.RANK = 1   -- VIEW ALL (commented out) OR CURRENT ONLY
                                -- AND     A.COUNTRY = 'USA' -- comment this out for ADR's

                JOIN            Prc.PrcTKChg                    K
                                ON                  M.VENCODE = K.Code

                JOIN            PRC.PRcsCCHG        G
                                ON                  G.CODE =    K.CODE
                                AND                 ISNULL(G.ENDDATE,'1/1/2059')
                                BETWEEN             K.STARTDATE AND ISNULL(K.ENDDATE,'1/1/2059')

                --JOIN PRCCODE2 Y
                --ON Y.TYPE_ = 2 AND ASCII(G.EXCHANGE) = Y.CODE

                JOIN            PRC.PRCINFO         I
                                ON                  I.CODE =    G.CODE
                                AND                 I.SECTYPE   NOT IN ('X','P','E','I','S','U','W','0','7','T','Q','R','V')

                JOIN            SECMAPX             MP1
                                ON                  MP1.VENCODE =   I.CODE
                                AND                 MP1.RANK =      M.RANK
                                AND                 MP1.VENTYPE =   1
                                AND                 MP1.EXCHANGE =  M.EXCHANGE

                JOIN            SECMSTRX            MR1
                                ON                  MR1.SECCODE =   MP1.SECCODE
                                AND                 MR1.TYPE_ =     1

                JOIN            SECMAPX             MP2
                                ON                  MP2.SECCODE =   MR1.SECCODE
                                AND                 MP2.VENTYPE = 1
                                AND                 MP2.RANK =      M.RANK
                JOIN            PRC.PRCTKCHG        K1
                                ON                  K1.CODE =       MP2.VENCODE
                                --AND ISNULL(K1.ENDDATE,'1/1/2059') BETWEEN K.STARTDATE AND ISNULL(K.ENDDATE,'1/1/2059')

                JOIN            PRC.PRCSCCHG        G1
                                ON                  G1.CODE =       K1.CODE
                                AND                 ISNULL(G1.ENDDATE,'1/1/2059')
                                BETWEEN             K1.STARTDATE    AND     ISNULL(K1.ENDDATE,'1/1/2059')

                 GROUP BY       A.SecCode
                 ,              MR1.ID
                 ,              MR1.NAME
                 ,              G1.ISSUER
                 ,              G1.EXCHANGE
                 ,              MR1.Country
                 ,              G1.StartDate
                 ,              G1.EndDate
                 ,              K1.TICKER
                 ,              G1.EXCHANGE
                 ,              I.ISSUER
                 ,              I.STATUS
                 ,              I.SECTYPE

                 ORDER BY       MR1.ID
                 ,              G1.STARTDATE
                 '''
    # output the query string to a file
    with open(os.path.join(working_dir, 'query_master_table.txt'),
              "w") as query_file:
        query_file.write(query)
    print('\n\nexecuting the query ... ', datetime.now())
    try:
        print('\n\ntrying to execute cursor_.execute(query) ...',
              datetime.now())
        cursor_.execute(query)
    except Exception as err:
        print('\n\nexception #1 for cursor_.execute(query)', err,
              datetime.now())
    print('\n\nfetching query result ... ', datetime.now())
    try:
        print('\n\ntrying to execute result = cursor_.fetchall()...',
              datetime.now())
        result = cursor_.fetchall()
    except Exception as err:
        print('\n\nexception #2 for result = cursor_.fetchall()', err,
              datetime.now())

    tickers = []
    print('\n\nwriting .csv file (master table) ... ', datetime.now())
    with tqdm(total=len(result), file=sys.stdout) as pbar:
        table_master = []
        table_merged = []
        for row in result:
            pbar.set_description('progress at %s' % datetime.now())
            pbar.update(1)
            row1 = []
            row3 = []
            #                   A.SecCode                           -- SecCode -- 0
            #              --  ,       MR1.ID
            #                ,       MR1.NAME            AS CURRNAME     -- current name -- 1
            #                ,       G1.ISSUER           AS PITISSUER    -- point-in-time name -- 2
            #               -- ,       G1.EXCHANGE
            #                ,       MR1.Country                         -- country -- 3
            #                ,       G1.StartDate                        -- from -- 4
            #                ,       G1.EndDate                          -- to -- 5
            #                ,       K1.TICKER                           -- ticker -- 6
            #             --   ,       G1.EXCHANGE
            #              --  ,       I.ISSUER            AS CURRENTISSUE
            #                --,       I.STATUS
            #                ,       I.SECTYPE           AS CURRSECTYPE  -- type --7
            date_to = datetime.date(datetime.now())
            if row[5] is not None:  # to
                date_to = datetime.date(row[5])
            else:
                date_to = datetime.date(datetime.now())
            if date_to > datetime.date(datetime.now()):
                date_to = datetime.date(datetime.now())
    #
            row1.append(str(row[6]))  # ticker
            tickers.append(row[6])
            row1.append(str(row[2]))  # point-in-time name
            row1.append(str(date_to))  # to
            #
            row1.append(str(row[0]))  # SecCode
            row3.append(int(row[0]))  # int for sorting
            row1.append(datetime.date(row[4]))  # from
            row3.append(datetime.date(row[4]))
            row1.append(date_to)  # to
            row3.append(date_to)
            row1.append(str(row[2]))  # point-in-time name
            row3.append(str(row[2]))
            row1.append(str(row[6]))  # ticker
            row3.append(str(row[6]))
            row1.append(str(row[3]))  # country
            row3.append(str(row[3]))
            row1.append(str(row[1]))  # current name
            row3.append(str(row[1]))
            row1.append(str(row[7]))  # type
            row3.append(str(row[7]))
            if row1 not in table_merged:
                table_merged.append(row1)
            if row3 not in table_merged:
                table_master.append(row3)

        with open(os.path.join(working_dir, 'master_table.csv'),
                  'w') as result_file:
            table_master1 = []
            table_master1.append(
                create_titles([
                    'SecCode', 'From', 'To', 'Point-in-time name', 'Ticker',
                    'Country', 'Current name', 'Type'
                ]))
            table_master = sorted(table_master, key=lambda item: item[0])
            #         sorted(table_master, key=operator.itemgetter(0))
            table_master1 += table_master
            w_r = csv.writer(result_file, dialect='excel')
            w_r.writerows(table_master1)

        print('\n\npost-processing 1 ... ', datetime.now())

        with open(os.path.join(working_dir, 'master_file_joe.csv'),
                  'r') as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            nrow = 0
            for row in csv_reader:
                row1 = []  # change True to False to use the list
                if (str(row[3]) in ('C', 'BAC', 'AAPL')
                        or True) and nrow != 0:  # skip titles
                    row1.append(str(row[3]))
                    row1.append(str(row[4]))
                    row1.append(str(row[2]))
                    for _ in range(refinitiv_data_n_columns):
                        row1.append('')  # fill in with blanks for merged .csv
                    for r_r in row:
                        row1.append(r_r)
                    table_merged.append(row1)
                nrow += 1

        print('\n\npost-processing 2 ... ', datetime.now())

        with open(
                os.path.join(working_dir,
                             'master_table_merged_art_vs_joe.csv'),
                'w') as result_file:
            w_r = csv.writer(result_file, dialect='excel')
            table_merged1 = sorted(table_merged,
                                   key=operator.itemgetter(0, 1, 2))
            table_merged2 = []
            table_merged2.append(
                create_titles([
                    '', '', '', 'SecCode', 'From', 'To', 'Point-in-time name',
                    'Ticker', 'Country', 'Current name', 'Type', 'ID', 'FROM',
                    'TO', 'TICKER', 'NAME', 'TYPE'
                ]))
            table_merged2 += table_merged1
            w_r.writerows(table_merged2)

        print('\n\npost-processing 3 ... ', datetime.now())

        tickers_joe = []  # this should be an array of unique tickers
        i = 0
        with open(os.path.join(working_dir, 'master_file_joe.csv'),
                  'r') as csv_file:
            csv_reader = csv.reader(csv_file, delimiter=',')
            for row in csv_reader:
                if i != 0:  # skip titles at i = 0
                    if row[3] not in tickers_joe:  # unique tickers
                        tickers_joe.append(row[3])
                i += 1

        tikers_art = []  # this should be an array of unique tickers
        for t_t in tickers:
            if t_t not in tikers_art:
                tikers_art.append(t_t)

        print('\n\nnumber of unique tickers in the master: ', len(tikers_art),
              datetime.now())

        if top is None:
            print('\n\npost-processing 4 ... ', datetime.now())

            missing_tikers = []
            for t_j in tickers_joe:
                if t_j not in tikers_art:  # unique tickers
                    missing_tikers.append(t_j)

            missing_tikers1 = []
            for m_t in missing_tikers:
                if m_t not in missing_tikers1:  # unique tickers
                    missing_tikers1.append(m_t)

            print('\n\nnumber of missing tickers: ', len(missing_tikers1),
                  datetime.now())

            tickers_without_suffix = []
            for m_t in missing_tikers1:
                if m_t.find('.') != -1:
                    m_t = m_t.split('.')[0]
                else:
                    m_t = m_t[:
                              -1]  # try to remove the fused suffix for missing tickers
                if m_t not in tickers_without_suffix:
                    tickers_without_suffix.append(m_t)
            print('\n\nnumber of missing tickers without suffix: ',
                  len(tickers_without_suffix), datetime.now())

            query = '''SELECT * FROM PRC.PRCSCCHG WHERE TICKER IN (\''''

            for tws in tickers_without_suffix:
                query += str(tws) + '''\', \''''
            query = query[:-3]
            query += ''')'''

            try:
                print('\n\ntrying to execute cursor_.execute(query)...',
                      datetime.now())
                cursor_.execute(query)
            except Exception as err:
                print('\n\nexception #3 for cursor_.execute(query)', err,
                      datetime.now())

            print('\n\nfetching second query result ... ', datetime.now())
            try:
                print('\n\ntrying to execute result = cursor_.fetchall()...',
                      datetime.now())
                result = cursor_.fetchall()
            except Exception as err:
                print('\n\nexception #4 for result = cursor_.fetchall()', err,
                      datetime.now())

            with open(os.path.join(working_dir, 'addendum_master_table.csv'),
                      'w') as result_file:
                table_addendum = result
                table_addendum = sorted(table_addendum,
                                        key=operator.itemgetter(4))
                table_addendum1 = []
                table_addendum1.append(
                    create_titles([
                        'SecCode', 'From', 'To', 'CUSIP', 'Ticker', 'SEDOL',
                        'Issuer', 'Full ticker', 'Base ticker', 'Group',
                        'Series', 'Exchange'
                    ]))
                table_addendum1 += table_addendum
                w_r = csv.writer(result_file, dialect='excel')
                w_r.writerows(table_addendum1)

            found_tickers = []
            for row in result:
                if str(row[4]) not in found_tickers:
                    found_tickers.append(str(row[4]))

            print('\n\nnumber of found tickers: ', len(found_tickers),
                  datetime.now())

            missing_tikers2 = []
            for m_t in missing_tikers1:
                wosuffix = m_t
                if wosuffix.find('.') != -1:
                    wosuffix = wosuffix.split('.')[0]
                else:
                    wosuffix = wosuffix[:-1]  # try to remove the fused suffix
                if wosuffix not in found_tickers and m_t not in found_tickers:
                    # tickers w/o and with suffix
                    missing_tikers2.append(m_t)

            print('\n\nfinal number of missing tickers: ',
                  len(missing_tikers2), datetime.now())
            print('\n\nwriting missing tickers ... ', datetime.now())

            with open(os.path.join(working_dir, 'missing_tickers.csv'),
                      'w') as result_file:
                w_r = csv.writer(result_file, dialect='excel')
                missing_tikers2.sort()
                missing_tikers3 = []
                for row in missing_tikers2:
                    with open(os.path.join(working_dir, 'master_file_joe.csv'),
                              'r') as csv_file:
                        csv_reader = csv.reader(csv_file, delimiter=',')
                        i = 0
                        for row2 in csv_reader:
                            if row2[3] == row and i != 0:  # skip titles at i = 0
                                row5 = []
                                row5.append(str(row2[3]))
                                row5.append(str(row2[4]))
                                if row5 not in missing_tikers3:  # unique entries
                                    missing_tikers3.append(row5)
                            i += 1
                missing_tikers4 = []
                missing_tikers4.append(create_titles(['Tickers', 'Co. names']))
                missing_tikers4 += missing_tikers3
                w_r.writerows(missing_tikers4)

        # build objects for missing ticker qqq
        #i = 0
        #for t in missing_tikers3:
        #    print(t)
        #    T = TickerNeighborhood(ticker=t[0])
        #    T.current_name = t[1]
        #    print(T)
        #    print(T.ticker)
        #    print(T.name)
        #    list_of_suggested_tickers_for_addendum=[]
        #    list_of_suggested_tickers_for_addendum
        #=T.analyze_the_neighborhood_of_T_while_keeping_in_mind_joes_master_table
        #('master_table_joe.csv')

    print('\n\ndownloading pricing data ... ', datetime.now())

    seccodes = []
    with open(os.path.join(working_dir, 'master_table.csv')) as csv_file:
        csv_reader = csv.reader(csv_file, delimiter=',')
        l_l = 0
        for row in csv_reader:
            if row[0] not in seccodes and l_l > 0:  # skip titles, unique seccodes
                seccodes.append(row[0])
            l_l += 1

    print('\n\ndistinct seccodes = ', len(seccodes), datetime.now())
    print('\n\nprocessing ...', datetime.now())

    query = '''
    --This query returns the fully adjusted Open, High, Low, and Close Pricing data in Local Currency using the Ds2Primqtprc table for North American Equities*/

                    SELECT DISTINCT 

                        A.SecCode                     -- seccode  new col=0
              --  ,       MR1.ID
             --   ,       MR1.NAME AS CURRNAME
             --   ,       G1.ISSUER AS PITISSUER
              --  ,       G1.EXCHANGE
              --  ,       MR1.Country
              --  ,       G1.StartDate
              --  ,       G1.EndDate
                ,       K1.TICKER                     -- ticker new col=1
            --    ,       G1.EXCHANGE
             --   ,       I.ISSUER AS CURRENTISSUE
              --  ,       I.STATUS
              --  ,       I.SECTYPE AS CURRSECTYPE
              --  ,       C1.TotRet
             --   ,       C1.placeholder
             
                ,       C1.Date_               --  market date col=15; new col=2
             , C1.Open_                            --  col=16 open; new col=3
            , C1.High                             --  col=17 high; new col=4
            , C1.Low                              --  col=18 low; new col=5
                , C1.Close_                            --  col=19 close; new col=6
                ,  C1.Volume                          --  col=20 volume; new col=7
                ,  C1.TotRet                          --  col=21 totret; new col=8

                FROM            SecMstrX                        A

                JOIN            SECMAPX             M

                                ON                  M.SECCODE = A.SECCODE
                                AND                 M.VenType = 1       -- IDC
                                AND                 TYPE_ = 1           -- NorthAmer Equity
                                AND                 M.EXCHANGE <> 2

                                -- AND M.EXCHANGE = 1 AND A.TYPE_ = 1
                                -- AND     M.RANK = 1   -- VIEW ALL OR CURRENT ONLY
                                -- AND     A.COUNTRY = 'USA' -- comment this out for ADR's

                JOIN            Prc.PrcTKChg                    K
                                ON                  M.VENCODE = K.Code

                JOIN            PRC.PRcsCCHG        G
                                ON                  G.CODE =    K.CODE
                                AND                 ISNULL(G.ENDDATE,'1/1/2059')
                                BETWEEN             K.STARTDATE AND ISNULL(K.ENDDATE,'1/1/2059')

                -- JOIN            PRC.PRCINFO         I
                --                 ON                  I.CODE =    G.CODE
                --                 AND                 I.SECTYPE   NOT IN ('X','P','E','I','S','U','W','0','7','T','Q','R','V')

                -- JOIN            SECMAPX             MP1
                --                 ON                  MP1.VENCODE =   I.CODE
                --                 AND                 MP1.RANK =      M.RANK
                --                 AND                 MP1.VENTYPE =   1
                --                 AND                 MP1.EXCHANGE =  M.EXCHANGE

                -- JOIN            SECMSTRX            MR1
                --                 ON                  MR1.SECCODE =   MP1.SECCODE
                --                 AND                 MR1.TYPE_ =     1

                -- JOIN            SECMAPX             MP2
                --                 ON                  MP2.SECCODE =   MR1.SECCODE
                --                 AND                 MP2.VENTYPE = 1
                --                 AND                 MP2.RANK =      M.RANK

                JOIN            PRC.PRCTKCHG        K1
                                ON                  K1.CODE =       K.CODE
                                --AND ISNULL(K1.ENDDATE,'1/1/2059') BETWEEN K.STARTDATE AND ISNULL(K.ENDDATE,'1/1/2059')

                -- JOIN            PRC.PRCSCCHG        G1
                --                 ON                  G1.CODE =       K1.CODE
                --                 AND                 ISNULL(G1.ENDDATE,'1/1/2059')
                --                 BETWEEN             K1.STARTDATE    AND     ISNULL(K1.ENDDATE,'1/1/2059')

                JOIN            PRC.PRCDLY          C1
                                ON                  C1.CODE =       K1.CODE



-- only need tables A, K1, C1
                WHERE

                                 A.SECCODE          IN ('''
    #
    block_size = int(len(seccodes) / nblocks) + 1
    with tqdm(total=nblocks, file=sys.stdout) as pbar:
        #     table = []
        list_ = [[] for n in range(20750101)]
        for seccodeblock in list(iterutils.chunked_iter(seccodes, block_size)):
            pbar.set_description('progress at %s' % time.strftime("%c"))
            pbar.update(1)
            query_seccodes = ''
            print('\n\nseccodeblock = ', len(seccodeblock), datetime.now())
            for s_c in seccodeblock:
                query_seccodes += str(s_c) + ''','''
            query_seccodes = query_seccodes[:-1]
            query_date = '''CAST(C1.Date_ AS DATETIME)>= \'''' + date_from + '''\''''
            composed_query = query +\
                            query_seccodes + ''')\n\nAND\n\n''' +\
                            query_date + '''\n\nORDER BY C1.Date_'''
            with open(os.path.join(working_dir, 'query_pricing_data.txt'),
                      'w') as query_file:
                query_file.write(composed_query)
            keep_trying_to_query = True
            result = None
            # the query might fail because the computer got moved to a different location,
            # which resulted in IP change; in this case, try to re-open the connection, then re-do the query
            while keep_trying_to_query:
                try:
                    print(
                        '\n\ntrying to execute cursor_.execute(COMPOSED_query)...',
                        datetime.now())
                    cursor_.execute(composed_query)
                    try:
                        print(
                            '\n\ntrying to execute result = cursor_.fetchall()...',
                            datetime.now())
                        result = cursor_.fetchall()
                        keep_trying_to_query = False
                    except Exception as err:
                        try:
                            print(
                                '\n\nexception #5 for cursor_.execute(COMPOSED_query)',
                                err, datetime.now())
                            print(
                                '\n\nexception #6 for result = cursor_.fetchall()',
                                err, datetime.now())
                            cursor_.close()
                            cnxn.close()
                            print("\n\nre-opening server connection...",
                                  datetime.now())
                            cnxn = pyodbc.connect('DRIVER=' + driver +
                                                  ';SERVER=' + server +
                                                  ';PORT=1433;DATABASE=' +
                                                  database + ';UID=' +
                                                  username + ';PWD=' +
                                                  password)
                            cursor_ = cnxn.cursor()
                        except Exception as err:
                            print('\n\nexception #7 for reconnect', err,
                                  datetime.now())
                except Exception as err:
                    try:
                        print(
                            '\n\nexception #8 for cursor_.execute(COMPOSED_query)',
                            err, datetime.now())
                        print(
                            '\n\nexception #9 for result = cursor_.fetchall()',
                            err, datetime.now())
                        cursor_.close()
                        cnxn.close()
                        print("\n\nre-opening server connection...",
                              datetime.now())
                        cnxn = pyodbc.connect('DRIVER=' + driver + ';SERVER=' +
                                              server + ';PORT=1433;DATABASE=' +
                                              database + ';UID=' + username +
                                              ';PWD=' + password)
                        cursor_ = cnxn.cursor()
                    except Exception as err:
                        print('\n\nexception #10 for reconnect', err,
                              datetime.now())
#
            if result is not None:
                print("\n\nquery produced %d rows" % len(result),
                      datetime.now())
                for row in result:
                    row3 = []
                    #                                     A.SecCode                     -- seccode  new col=0
                    #              --  ,       MR1.ID
                    #             --   ,       MR1.NAME AS CURRNAME
                    #             --   ,       G1.ISSUER AS PITISSUER
                    #              --  ,       G1.EXCHANGE
                    #              --  ,       MR1.Country
                    #              --  ,       G1.StartDate
                    #              --  ,       G1.EndDate
                    #                ,       K1.TICKER                     -- ticker new col=1
                    #            --    ,       G1.EXCHANGE
                    #             --   ,       I.ISSUER AS CURRENTISSUE
                    #              --  ,       I.STATUS
                    #              --  ,       I.SECTYPE AS CURRSECTYPE
                    #              --  ,       C1.TotRet
                    #             --   ,       C1.placeholder
                    #                ,       C1.MarketDate            --  market date col=15; new col=2
                    #                , C1.Open                        --  col=16 open; new col=3
                    #                , C1.High                        --  col=17 high; new col=4
                    #                , C1.Low                         --  col=18 low; new col=5
                    #                , C1.Close                       --  col=19 close; new col=6
                    #                ,  C1.Volume                     --  col=20 volume; new col=7
                    #                ,  C1.TotRet                     --  col=21 totret; new col=8

                    row3.append(int(row[0]))  # SecCode
                    row3.append(row[1])  # ticker
                    if row[2] is not None:
                        date1 = str(row[2])[:-9]  # market date
                        row3.append(date1)
                    else:
                        row3.append('-1.0')
                    if row[3] is not None:
                        row3.append(row[3])  # open
                    else:
                        row3.append('-1.0')
                    if row[4] is not None:
                        row3.append(row[4])  # high
                    else:
                        row3.append('-1.0')
                    if row[5] is not None:
                        row3.append(row[5])  # low
                    else:
                        row3.append('-1.0')
                    if row[6] is not None:
                        row3.append(row[6])  # unadjusted close
                    else:
                        row3.append('-1.0')
                    if row[7] is not None:
                        row3.append(row[7])  # volume
                    else:
                        row3.append('-1.0')
                    if row[8] is not None:
                        row3.append(row[8])  # TotRet
                    else:
                        row3.append('-1.0')
                    idx = int(row[2].strftime('%Y%m%d'))
                    if row3 not in list_[idx]:
                        #    table.append(row3)
                        list_[idx].append(row3)
#
    for i, i_t in enumerate(list_):
        if i_t:
            s_s = str(i)
            year = s_s[:-4]
            month = s_s[4:-2]
            day = s_s[6:]
            date2 = year + '-' + month + '-' + day
            table1 = []
            table2 = []
            table2.append(
                create_titles([
                    'SecCode', 'Ticker', 'Date', 'Open', 'High', 'Low',
                    'Close, unadjusted', 'Volume', 'Total return'
                ]))
            for _, item in enumerate(i_t):
                if item not in table1:
                    table1.append(item)
            table1 = sorted(table1, key=operator.itemgetter(0, 1))
            table2 += table1
            ofp = os.path.join(dir_from_date(date2, 'y', working_dir),
                               'pricing_data_for_' + date2 + '.csv')
            with open(ofp, 'a') as result_file:
                w_r = csv.writer(result_file, dialect='excel')
                w_r.writerows(table2)


#
#
    if archive:
        now = str(date.today())
        print('\n\ncompressing output and timestamping ... ', datetime.now())
        file_name = 'refinitiv_qa_direct_qai_master_and_pricing_tables_' + now
        print(file_name, datetime.now())
        shutil.make_archive(file_name, 'zip', working_dir)

        print('\n\nmoving the data to the timestamped repository ... ',
              datetime.now())
        src = cwd
        data_repo = os.path.join(src, 'RefinitivDataRepository')
        if not os.path.exists(data_repo):
            os.mkdir(data_repo)
        if not os.path.isdir(data_repo):
            raise Exception(f'Data repository is not a directory: {data_repo}')

        output_file_staging_path = os.path.join(src, file_name + '.zip')
        output_file_path = Path(os.path.join(data_repo, file_name + '.zip'))
        print('OUTPUT_FILE_STAGING_PATH = ', output_file_staging_path,
              'OUTPUT_FILE_PATH', output_file_path)
        if os.path.isfile(output_file_staging_path):
            if os.path.isfile(output_file_path):
                new_file_size = os.stat(output_file_staging_path).st_size
                old_file_size = os.stat(output_file_path).st_size
                print('\n\nnew zip size = ', new_file_size,
                      '\told_file_size = ', old_file_size)
                if new_file_size > old_file_size:
                    os.remove(output_file_path)
                    shutil.move(output_file_staging_path, output_file_path)
            else:
                shutil.move(output_file_staging_path, output_file_path)

    if email_notification_:
        print(
            '\n\nemailing the confirmation and the link to compressed data to the author ... ',
            datetime.now())
        alert = '''This is to notify that new compressed data set was
        uploaded to FORA google drive ...'''
        email = 'Alert time: ' + time.strftime("%c") + '\n' + alert
        client_email = [
            '*****@*****.**', '*****@*****.**'
        ]
        #    #{'*****@*****.**', '*****@*****.**', '*****@*****.**'}
        #    MESSAGE = create_message('*****@*****.**',\
        #                            CLIENT_EMAIL, 'Completion alert', EMAIL)
        yagmail.SMTP('*****@*****.**').send(
            client_email, 'Completion alert', email)
        print('\n\nemailed to the user:\n' + alert, datetime.now())

    print('\n\nexiting ... ', datetime.now())
Пример #26
0
def test_postgres_driver_installed():
    """
    Check that pgodbc is installed
    """
    assert 'PostgreSQL Unicode' in pyodbc.drivers()
Пример #27
0
import sqlalchemy
import pymysql
import pyodbc
import psycopg2
import cx_Oracle

assert 'FreeTDS' in pyodbc.drivers()

try:
    # make sure oracle manages to load tns client libraries.
    # Will fail but we want to be sure we don't fail on loading the driver
    cx_Oracle.connect()
except Exception as ex:
    assert 'TNS:net service name is incorrectly specified' in str(ex)

# freetds test
engine = sqlalchemy.create_engine(
    'mssql+pyodbc:///testuser:[email protected]:1433/TEST?driver=FreeTDS')
try:
    engine.execute('select 1 as [Result]')
except Exception as ex:
    assert "Can't open lib" not in str(
        ex), "Failed because of missing lib: " + str(ex)

print("All is good. All imported successfully")
Пример #28
0
import pyodbc
import pandasql as ps
from flask import Flask, render_template, jsonify,request,make_response
import pandas as pd

app = Flask(__name__)

# Get Driver from cat /etc/odbcinst.ini
drivers = [item for item in pyodbc.drivers()]
driver = drivers[-1]
print("driver:{}".format(driver))
server = 'mssql'
database = 'Northwind'
uid = 'sa'
pwd = 'SuperP4ssw0rd!'
conn = f'DRIVER={driver};SERVER={server};DATABASE={database};UID={uid};PWD={pwd}'

# Copy to Clipboard for paste in Excel sheet
def copia (argumento):
    df=pd.DataFrame(argumento)
    df.to_clipboard(index=False,header=True)


@app.route('/')
def index():
    cnxn = pyodbc.connect(conn)
    cursor = cnxn.cursor()
    print ('Using the following SQL Server version:')
    #tsql = "SELECT @@version;"
    tsql = "SELECT * FROM dbo.Customers"
    
Пример #29
0
 def drivers():
     return pyodbc.drivers()
Пример #30
0
# python pyodbc https://blog.csdn.net/huwei2003/article/details/107387919
# https://blog.csdn.net/naichager/article/details/111378060
# pyinstaller -F index.py
import pyodbc
print("支持的数据库:{}".format(pyodbc.drivers()))
# ['SQL Server', 'SQL Server Native Client 11.0', 'ODBC Driver 17 for SQL Server', 'SQL Server Native Client RDA 11.0']
connection = pyodbc.connect(
    "Driver={SQL Server Native Client 11.0};Server=localhost;Database=master;UID=test;PwD=test"
)
cursor = connection.cursor()
cursor.execute("select * from [master].[dbo].[spt_fallback_db]")
row = cursor.fetchone()
print("第一行数据:{}".format(row))
import os
os.system("pause")
Пример #31
0
import win32com.client as client
import pyodbc
import pandas as pd
[x for x in pyodbc.drivers() if x.startswith('Microsoft Access Driver')]

# Access section: Retrieve variable values

# define components of our connection string
conn_str = (r'DRIVER={Microsoft Access Driver (*.mdb, *.accdb)};'
            r'DBQ=C:\Users\coron\OneDrive\Atom\Python\Webinars.accdb;')

# create a connection to the database
cnxn = pyodbc.connect(conn_str)
crsr = cnxn.cursor()

# define the components of a query
table_name = 'Automation'

# define query
query = 'SELECT * FROM {}'.format(table_name)

# define dataframe
data = pd.read_sql(query, cnxn, index_col="FreeItemCode")

# define selected FreeItemCode

webinar_identifier = "WBRLCCOVID20"

# Check data
print(data.columns)
# print(data.head())
Пример #32
0
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 28 15:58:05 2020

@author: Ravelo
"""

import tkinter as tk
import pyodbc as bd

for driver in bd.drivers():
    print(driver)

while True:
    try:
        conn = bd.connect("Driver={SQL Server Native Client 11.0};"
                          "Server={CONSULTORIA\SQLDEV2017};"
                          "Database={LIBERTYcarsANUALDB};"
                          "uid=ADMIN;"
                          " pwd=SOPORTE;")

        #        "Trusted_Connection=yes;")
        print("Conexion establacida satisfactoriamente")
        break
    except:
        print("Parametros errados, conexion NO ESTABLECIDA")
        break

try:
    a = int(input("introduzca a:"))
    b = int(input("introduzca b:"))
Пример #33
0
        tolerance = 10 ** (-places - 3)
        if abs(a - b) <= 0.5 * 10 ** (-places) + tolerance:
            return
        super(TestWdat5, self).assertAlmostEqual(a, b, places=places, msg=msg)


skip_test_odbc = True
skip_test_odbc_message = ''
if sys.platform != 'win32':
    skip_test_odbc_message = 'Windows only'
elif not os.getenv('PTHELMA_TEST_ENHYDRIS_API'):
    skip_test_odbc_message = 'set PTHELMA_TEST_ENHYDRIS_API'
else:
    try:
        import pyodbc
        if 'Microsoft Access Driver (*.mdb)' not in pyodbc.drivers():
            skip_test_odbc_message = \
                'Install ODBC "Microsoft Access Driver (*.mdb)"'
        else:
            skip_test_odbc = False
    except ImportError:
        skip_test_odbc_message = 'Install pyodbc'


@skipIf(skip_test_odbc, skip_test_odbc_message)
class TestOdbc(TestCase):
    class_being_tested = Datafile_odbc
    ref_ts1 = Timeseries(0)
    ref_ts1.read(StringIO(textwrap.dedent('''\
                                          2014-03-30 10:55,12.8,
                                          2014-03-30 10:56,12.8,
Пример #34
0
def mssql_odbc_driver_is_loaded(odbc_driver):
    """
    Check if pyodbc can see the specified ODBC driver.
    :return: boolean
    """
    return odbc_driver in pyodbc.drivers()