def run_test(): # 引入测试逻辑服务类 from services import pony_orm_test_service as pots from db import db print('进行测试业务逻辑') # pony db 数据库引擎初始化 db.init_db(True, True, False) pots.add_data_artist_for_mysql() pots.add_data_artist_for_oracle() pass
def run(): try: print('run,当前时间=>', get_now_time()) # 使用时 再次声明,表示在这里使用的是全局变量,而不是局部变量 global org_id global that_week_min global that_week_max print(org_id, that_week_min, that_week_max) # 开始lmt实际逻辑 # 引入罗美特逻辑服务类 from services import pony_orm_lmt_service as pols from db import db print('...机构号:', org_id, '进行罗美特业务逻辑-数据清洗') # 从通用配置里拿出以下三个配置 is_show_sql, is_mysql_create_tables, is_oracle_create_tables = ccfa[ 'is_show_sql'], ccfa['is_mysql_create_tables'], ccfa[ 'is_oracle_create_tables'] # pony db 数据库引擎初始化 db.init_db(is_show_sql, is_mysql_create_tables, is_oracle_create_tables) # 开始吧 # 首先根据周二日期that_week_min 去查询SCADA_REPORT_XN_MID【ScadaReportXNMid】表 srxm_list_that_week_min, srxm_qr = pols.deal_with_data_for_oracle_srxm_select_where_with_org_id( org_id, that_week_min) # Mysql数据处理 和 MySQLToOracle入库 if len(srxm_list_that_week_min) >= 1: # 查询出数据 去执行数据转移代码 print( '...机构号:', org_id, '...Oracle...srxm_list...len...查询ScadaReportXNMid总条数: ' + str(len(srxm_list_that_week_min)), ' 时间: ', that_week_min, '去执行数据清洗') # 打印一条直线 95个'-' util.print_a_line(95) pols.datas_from_mid_to_week_wash_data_oracle( org_id, srxm_list_that_week_min, srxm_qr, that_week_min, that_week_max) else: # 未查询出数据 输出提示即可 print( '...机构号:', org_id, '...Oracle...srxm_list...len...查询ScadaReportXNMid总条数: ' + str(len(srxm_list_that_week_min)), ' 时间: ', that_week_min, '无需执行数据清洗 跳出程序') return True except Exception as e: print("run Exception Error:%s" % e) return False finally: pass # 打印一条直线 95个'-' util.print_a_line(95) # 走到这里的话 那就代表run执行完毕 return True
def populate(): engine = connect_db(config.dev.DB_URI) init_db(engine) Session = sessionmaker(bind=engine, autoflush=True) session = Session() #test_team_orm(session) #test_team_season_orm(session) #test_player_orm(session) #test_player_season_orm(session) #test_salary_orm(session) #spotrac_salaries(session) session.close()
def get_db_conn(): try: db_conn = init_db(get_db_settings()) except psycopg2.OperationalError: return None, format_error( "Parâmetros de conexão incorretos ou banco de dados não disponível" ) return db_conn, None
def create_app(test_config=None): """ Creates app """ app = Flask(__name__, instance_relative_config=True) app.config.from_mapping( # Make sure to change debug to False in production env DEBUG=config('DEBUG', default=False), SECRET_KEY=config('SECRET_KEY', default='dev'), # CHANGE THIS!!!! LOGFILE=config('LOGFILE', os.path.join( app.instance_path, 'logs/debug.log')), CACHE_TYPE=config('CACHE_TYPE', 'simple'), # Configure caching # Long cache times probably ok for ML api CACHE_DEFAULT_TIMEOUT=config('CACHE_DEFAULT_TIMEOUT', 300), TESTING=config('TESTING', default='TRUE'), ) # Enable CORS header support CORS(app) ############## ### Routes ### ############## @app.route('/', methods=['GET']) def home(): return "This is an example app" @app.route('/image/<sid>', methods=['GET']) def get_image(sid: str = None, num_img: int = 1): if sid is None: raise HTTPException(400, 'Bad Request. Must submit valid SID') return send_file( fetch_images(sid, num_img), mimetype='image/png') ############# ###Logging### ############# # Change logging.INFO to logging.DEBUG to get full logs. Will be a crapload of information. # May significantly impair performance if writing logfile to disk (or network drive). # To enable different services, see README.md gunicorn_logger = logging.getLogger('gunicorn.error') app.logger.handlers.extend(gunicorn_logger.handlers) app.logger.setLevel(gunicorn_logger.level) app.logger.info('Application logging Set') # File logging. Remove in PROD if app.config['TESTING'] == 'TRUE': app.logger.info('Using TESTING log config.') logging.basicConfig( filename=app.config['LOGFILE'], level=logging.INFO, format='[%(asctime)s] [%(name)s] [%(levelname)s] %(message)s', datefmt='%Y-%m-%d %H:%M:%S %z') logging.getLogger('flask_cors').level = logging.INFO # Register database functions. Will allow db.close() to run on teardown from db import db db.init_db() app.logger.info('Database functionality initialized. Click commands available.') return app
def init_app(): db.init_db()
key = get_key() if key == 'q': sys.stdout.write(restoreCursor) sys.stdout.flush() sys.exit(0) elif key == 'r': reset = True elif key == 'p' or key == ' ': pause = True elif key == 's': if reset: reset = not reset if pause: pause = not pause if __name__ == '__main__': db.init_db() reset = False pause = True # chron starts paused user, sound, task = menu.get_user(),\ menu.get_sound(),\ menu.get_task() case = task['pomotime'] * 60 case_break = task['breaktime'] * 60 welcome_msg(user[1]) instance = vlc.Instance() player = instance.media_player_new() BASE_DIR = path.dirname(path.realpath(__file__)) media = instance.media_new(path.join(BASE_DIR, 'sounds/'+sound))
from flask import Flask, jsonify, request from db.db import init_db from db.db import db_session from models.dataModel import * from models.outputModel import * app = Flask(__name__) init_db() from methods import methods from Routes import router @app.route('/<id>/<date_initial>/<date_end>', methods=["GET"]) def display(id, date_initial, date_end): return router.view(id, date_initial, date_end) if __name__ == "__main__": app.run(port=3000)
def maintain_db(args: argparse.Namespace) -> None: """ Initialize the database with Messages and Tasks tables. Just calles init_db() from the db module. """ db.init_db()
from peewee import * from db.db import init_db db = init_db() print(db) class BaseModel(Model): """A base model that will use our Postgresql database""" class Meta: database = db class HealthCheckModel(BaseModel): # memid = AutoField() url = CharField() duration = FloatField() response_code = IntegerField() date = DateTimeField() class Meta: table_name = 'health_check' try: HealthCheckModel.create_table() except Exception as e: print(e)
def run(): try: print('run,当前时间=>', get_now_time()) # 使用时 再次声明,表示在这里使用的是全局变量,而不是局部变量 global org_id global that_day_min global that_day_max print(org_id, that_day_min, that_day_max) # 开始lmt实际逻辑 # 引入罗美特逻辑服务类 from services import pony_orm_lmt_service as pols from db import db print('...机构号:', org_id, '进行罗美特业务逻辑') # 从通用配置里拿出以下三个配置 is_show_sql, is_mysql_create_tables, is_oracle_create_tables = cc[ 'is_show_sql'], cc['is_mysql_create_tables'], cc[ 'is_oracle_create_tables'] # pony db 数据库引擎初始化 db.init_db(is_show_sql, is_mysql_create_tables, is_oracle_create_tables) # 方法1 # # 首先 通过日期查询当前日期是否已经写入lmt Oracle数据库 # srxm_list, srxm_qr = pols.deal_with_data_for_oracle_srxm_select_where(that_day_min) # # 有的话 先删除 # if len(srxm_list) >= 1: # # 删除 # pols.deal_with_data_for_oracle_srxm_del_all(srxm_qr) # 方法2 直接查询和删除 一体化 按道理 第二种性能更好 一块提交事务 # 通过that_day_min和that_day_max 拿出所有的对应日 肯定不会跨年 跨月 所以不用理会 this_year, this_month, days_list = util.get_days_list_from_day_min_to_day_max( that_day_min, that_day_max) # pols.deal_with_data_for_oracle_srxm_del_all_with_where(org_id, that_day_min) # 使用更改版 - 循环日期进行删除 print('...机构号:', org_id, '...循环删除...共需删除天份数为:', str(len(days_list)), '天数对应日期范围:', that_day_min, that_day_max) for this_day in days_list: pols.deal_with_data_for_oracle_srxm_del_all_with_where2( org_id, this_year, this_month, this_day) # 删除之后 在从MySQL数据库查询出来当前日期数据 经过数据再次处理之后 写入lmt Oracle数据库 # 查询所有 不用这个 # pols.deal_with_data_for_mysql_mrm_select_all() # 条件查询 使用 mrm_list, mrm_qr = pols.deal_with_data_for_mysql_mrm_select_where( that_day_min, that_day_max) # Mysql数据处理 和 MySQLToOracle入库 if len(mrm_list) >= 1: # 查询出数据 去执行数据转移代码 print( '...机构号:', org_id, '...MySQL...mrm_list...len...查询MeterReportMonth总条数: ' + str(len(mrm_list)), ' 时间范围: ', that_day_min, ' to ', that_day_max, '去执行数据转移') pols.datas_from_mysql_to_oracle(org_id, mrm_list, mrm_qr) else: # 未查询出数据 输出提示即可 print( '...机构号:', org_id, '...MySQL...mrm_list...len...查询MeterReportMonth总条数: ' + str(len(mrm_list)), ' 时间范围: ', that_day_min, ' to ', that_day_max, '无需执行数据转移') return True except Exception as e: print("run Exception Error:%s" % e) return False finally: pass # 打印一条直线 95个'-' util.print_a_line(95) # 走到这里的话 那就代表run执行完毕 return True