def db_connect(): ''' Create a database connection before each request. ''' logging.debug("Connecting to database.") db.connect() logging.debug("Connected to database.")
def watch(): ch = logging.StreamHandler(sys.stdout) ch.setLevel(logging.DEBUG) formatter = logging.Formatter( '%(asctime)s - %(name)s - %(levelname)s - %(message)s') ch.setFormatter(formatter) logger.setLevel(logging.DEBUG) logger.addHandler(ch) chimptools.logger.setLevel(logging.DEBUG) chimptools.logger.addHandler(ch) db.connect() while True: tasks = Task.select().where(Task.state == 'todo').limit(1) if len(tasks) == 0: logger.info("nothing to do") time.sleep(1) else: Tasker(tasks[0]).process() db.close()
def process_data(f, i, file_num): print(f, "{}/{}".format(i, file_num)) try: data = get_course_data(DATA_DIR + f) except IndexError as e: print("IndexError", e) return db.connect(True) for d in data: d['total'] = sum(d['grades'].values()) d['GPA'] = sum( map(lambda x: gp_mapping[x[0]] * x[1], d['grades'].items())) / d['total'] try: GradeData.create(semester=DATA_DIR[-5:-1], course_no=d['course_no'], course_name=d['course_name'], grades=json.dumps(d['grades']), total=d['total'], GPA=d['GPA'], type=d['type']) except peewee.IntegrityError as e: pass # print(e) db.close()
def extract_nn_stats(results, duo_results, nns, frac, submit_to_nndb=False): db.connect() for network_name, res in results.unstack().iterrows(): network_class, network_number = network_name.split('_') nn = nns[network_name] if network_class == 'Network': res_dict = {'network': network_number} elif network_class == 'ComboNetwork': res_dict = {'combo_network': network_number} elif network_class == 'MultiNetwork': res_dict = {'multi_network': network_number} if all([name not in res_dict for name in ['network', 'combo_network', 'multi_network']]): raise Exception(''.join('Error! No network found for ', network_name)) res_dict['frac'] = frac for stat, val in res.unstack(level=0).iteritems(): res_dict[stat] = val.loc[nn._target_names].values try : duo_res = duo_results.loc[network_name] res_dict.update(duo_res) except KeyError: pass postprocess_slice = PostprocessSlice(**res_dict) if submit_to_nndb is True: postprocess_slice.save() db.close()
def main(): """Main function for populating the database""" donors = [('Toni Morrison', random.sample(range(100, 25000), 3)), ('Mike McHargue', random.sample(range(100, 25000), 3)), ("Flannery O'Connor", random.sample(range(100, 25000), 3)), ('Angelina Davis', random.sample(range(100, 25000), 3)), ('Bell Hooks', random.sample(range(100, 25000), 3))] db.connect() db.drop_tables([Donor, Donation]) db.create_tables([Donor, Donation]) bcrypt = Bcrypt() for donor, donations in donors: try: Donor.create(name=donor, email='.'.join(donor.lower().split()) + '@gmail.com', password=bcrypt.generate_password_hash('password'), total=sum(donations), average=sum(donations) / len(donations)) except: db.rollback() for donor, donations in donors: for donation in donations: try: Donation.create(donor=donor, amount=donation) except: db.rollback()
def create_task(): if 'username' not in session: return redirect(url_for('login')) db.connect() if request.method == 'POST': Task(name=request.form['todo']).save() return redirect(url_for('all_tasks')) return render_template('create.jinja2')
def before_request(): # I have to connect to the database from here, and not the main thread, # because if I made the connection in model.py for eg. and not the main # thread, then when I try to use the model subclasses to access the # database in the views in gradebook.py, then the db connection is going # to be accessed by *not* the main thread. I think there's probably a # more elegant solution. TODO? db.connect()
def create_category(db,catname,meta): try: db.close() db.connect() cat = Categories.create(name=json.dumps(catname), metadata=meta) cat.save() db.close() except: db.rollback() raise
def create(): if request.method == "POST": db.connect() name = request.form['name-input'] donation_amt = request.form['donation-input'] Donation.insert( value=donation_amt, donor=Donor.select().where(Donor.name == name)).execute() return redirect(url_for('home')) else: return render_template('create.jinja2')
def main(): logging.basicConfig( format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO, filename=configure.get('logging', None)) db.connect() if not Message.table_exists(): Message.create_table() updater = Updater(configure['token']) updater.dispatcher.add_handler(MessageHandler(Filters.all, new_message)) updater.start_polling()
def get_order(): if db.is_closed(): db.connect() order = Order.get_or_none(Order.is_processing == False) if not order: return order.is_processing = True order.save() if not db.is_closed(): db.close() return order
def initdb(): """Initialize the database.""" click.echo('Init the db') print("remove database") if os.path.exists(DATABASE): os.remove(DATABASE) print("connect database") db.connect() print("create tables") db.create_tables([User, Task, Record]) print("close database") db.close()
def post(self): parser = reqparse.RequestParser() parser.add_argument('username', help='This field cannot be blank', required=True) parser.add_argument('password', help='This field cannot be blank', required=True) parser.add_argument('repassword', help='This field cannot be blank', required=True) data = parser.parse_args() username = data.username password = data.password repassword = data.repassword with db.connect() as conn: try: if (password == repassword): insert_statement = UserModel.insert().values( username=username, password=password) conn.execute(insert_statement) return jsonify({ 'succeed': 'Successfully created user {}'.format(username) }) else: return jsonify({ 'succeed': False, 'msg': 'The user entered password doe not matches' }) except sqlalchemy.exc.DatabaseError as e: print(e)
def __init__(self, path="./config.json", time=86400, handle=False): db.connect() db.create_tables([App, Version, Config]) if self._first_run(): logging.debug("[+] - First run") logging.debug("[+] - Loading config") self.config = self._read_config(path) logging.debug("[+] - Saving config") self._save_config() else: logging.debug("[+] - Restoring config from database") self.config = self._get_config().config self.urls = self.config["apps"] self.whitelist = self.config["whitelist"] self.admin = self.config["admin"] self.updater = Updater(self.config["token"]) if time: self.time = time self._automatic_update() if handle: dp = self.updater.dispatcher dp.add_handler(CommandHandler('getid', self.get_chatid)) dp.add_handler(CommandHandler('get_whitelist', self.get_whitelist)) dp.add_handler( CommandHandler('add_whitelist', self.add_whitelist, pass_args=True)) dp.add_handler( CommandHandler('del_whitelist', self.del_whitelist, pass_args=True)) dp.add_handler(CommandHandler('get_apps', self.get_apps)) dp.add_handler( CommandHandler('add_app', self.add_apps, pass_args=True)) dp.add_handler( CommandHandler('del_app', self.del_apps, pass_args=True)) dp.add_handler(CommandHandler('help', self.show_help)) dp.add_handler(CommandHandler('update', self.manual_update)) self.updater.start_polling() self.updater.idle() else: self.doit()
def connection(): conn = db.connect() with conn.begin() as transaction: patched_all_accounts = partial(payments.get_all_accounts, conn) patched_account_transactions = partial(payments.get_account_transactions, connection=conn) with patch.object(payments, 'get_all_accounts', patched_all_accounts),\ patch.object(payments, 'get_account_transactions', patched_account_transactions): yield conn transaction.rollback()
def connection(): conn = db.connect() with conn.begin() as transaction: patched_all_accounts = partial(payments.get_all_accounts, conn) patched_account_transactions = partial( payments.get_account_transactions, connection=conn) with patch.object(payments, 'get_all_accounts', patched_all_accounts),\ patch.object(payments, 'get_account_transactions', patched_account_transactions): yield conn transaction.rollback()
def encode_background_task(): db.connect() while True: video_queue = get_from_queue() if video_queue is None: sleep(5) continue video = video_queue.video filename, qualities = video.filename, video.video_qualities() foldername = os.path.join(BASE_PATH, filename.split('.')[0]) create_dir(foldername) command = prepare_command(filename, qualities) succeed = execute(command) if not succeed: continue manifest = create_master_manifest(qualities, foldername) processed = ProcessedVideo(video=video, manifest=manifest) video_queue.delete_instance() processed.save()
def nns_from_NNDB(max=20, only_dim=None): db.connect() non_sliced = get_similar_not_in_table(PostprocessSlice, max=max, only_sep=True, no_particle=False, no_divsum=True, only_dim=only_dim) network = non_sliced.get() style = 'mono' if len(network.target_names) == 2: match_0 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[0]) match_1 = re.compile('^(.f)(.)(ITG|ETG|TEM)_GB').findall(network.target_names[1]) if len(match_0) == 1 and len(match_1) == 1: group_0 = match_0[0] group_1 = match_1[0] if ((group_0[1] == 'e' and group_1[1] == 'i') or (group_0[1] == 'i' and group_1[1] == 'e')): style='duo' else: raise Exception('non-matching target_names. Not sure what to do.. {s}' .format(network.target_names)) matches = [] for target_name in network.target_names: matches.extend(re.compile('^.f.(ITG|ETG|TEM)_GB').findall(target_name)) if matches[1:] == matches[:-1]: if matches[0] == 'ITG': slicedim = 'Ati' elif matches[0] == 'TEM' or matches[0] == 'ETG': slicedim = 'Ate' else: raise Exception('Unequal stability regime. Cannot determine slicedim') nn_list = {network.id: str(network.id) for network in non_sliced} print('Found {:d} {!s} with target {!s}'.format(non_sliced.count(), network.__class__, network.target_names)) nns = OrderedDict() for dbnn in non_sliced: nn = dbnn.to_QuaLiKizNN() nn.label = '_'.join([str(el) for el in [dbnn.__class__.__name__ , dbnn.id]]) nns[nn.label] = nn db.close() return slicedim, style, nns
def run(): db.connect() mbox = connect() message_uids = get_message_uids(mbox) all_msg_gen = fetch_all_messages(mbox, message_uids) # Accept that the first message might be a duplicate. # This is because IMAP fetch will always get the latest message from the mailbox, # even if the UID we specify is higher than the latest one. try: first_msg = next(all_msg_gen) process_message(*first_msg) except pw.IntegrityError as err: if 'UNIQUE constraint failed: rawmsg.checksum' in str(err): log.info('Duplicate first message, carry on.') else: raise err except StopIteration: log.info('No new messages.') for msg in all_msg_gen: process_message(*msg) mbox.logout()
def do_before_request(): """ initialize database and session """ # See http://docs.peewee-orm.com/en/latest/peewee/database.html . # This gives "connection already open" error if in console. # The current (kludgy) fix is to test for the console explicitly. if not in_console(): db.connect() #print " db={}".format(db) #print " request={}".format(request) # See PERMANENT_SESSION_LIFETIME in env/settings.py session.permanent = True # site settings g.debug = umber_debug g.contact_url = contact_url g.about_url = about_url g.help_url = help_url g.umber_url = umber_url g.markup_url = markup_url g.site_url = site_url g.now = Time()
def connect_db(): '''Connect to db before request made''' if db.is_closed(): db.connect()
def before(): if db.is_closed(): db.connect()
def before_request(): db.connect()
ASR = {} page = requests.get(ASR_PLAYERS_URL) tree = html.fromstring(page.content) for p in tree.xpath("//div[@id='room_Alpha']//a/text()"): ASR[p] = 'ALPHA' for p in tree.xpath("//div[@id='room_Beta']//a/text()"): ASR[p] = 'BETA' for p in tree.xpath("//div[@id='room_Gamma']//a/text()"): ASR[p] = 'GAMMA' for p in tree.xpath("//div[@id='room_Delta']//a/text()"): ASR[p] = 'DELTA' for p in tree.xpath("//div[@id='room_Placement_League']//a/text()"): ASR[p] = 'PLACEMENT' return ASR db.connect() cookies = post_kgs({ "type": "LOGIN", "name": "ASRbot", "password": PASSWD, "locale": "en_US" }) ASR = fetch_members() done = False present = {'ALPHA': [], 'BETA': [], 'DELTA': [], 'GAMMA': [], 'PLACEMENT': []} available = {'ALPHA': 0, 'BETA': 0, 'DELTA': 0, 'GAMMA': 0, 'PLACEMENT': 0} while done == False: time.sleep(5) r = requests.get(KGS_URL, cookies=cookies)
def open_db(): if db.is_closed(): db.connect()
import os import random from passlib.hash import pbkdf2_sha256 from model import db, Donor, Donation, User db.connect(os.environ.get('DATABASE_URL', 'sqlite:///my_database.db')) # This line will allow you "upgrade" an existing database by # dropping all existing tables from it. db.drop_tables([Donor, Donation, User]) db.create_tables([Donor, Donation, User]) if os.environ.get('IN_HEROKU') != "true": # Don't create test data in Heroku User(username="******", password=pbkdf2_sha256.hash('test')).save() alice = Donor(name="Alice") alice.save() bob = Donor(name="Bob") bob.save() charlie = Donor(name="Charlie") charlie.save() donors = [alice, bob, charlie]
def connect_db(): """Connects to the specific database.""" db.connect() return db
def connect_db(): print('Processing started') print('Main PID: %s' % (getpid())) db.connect()
def bootstrap(): db.connect() db.create_tables( [RawMsg, MsgMeta, Attachment, Attachment.rawmsg.get_through_model()])
def connect_db(): """Connect to db before request made""" if db.is_closed(): db.connect()
import random from model import db, Donor, Donation, User from passlib.hash import pbkdf2_sha256 db.connect() # This line will allow you "upgrade" an existing database by # dropping all existing tables from it. db.drop_tables([User, Donor, Donation]) db.create_tables([User, Donor, Donation]) alice = Donor(name="Alice") alice.save() bob = Donor(name="Bob") bob.save() charlie = Donor(name="Charlie") charlie.save() donors = [alice, bob, charlie] for x in range(30): Donation(donor=random.choice(donors), value=random.randint(100, 10000)).save() User(name="admin", password=pbkdf2_sha256.hash("password")).save() User(name="bob", password=pbkdf2_sha256.hash("bobbob")).save()
def connect_db(): if db.is_closed(): db.connect()