def post(self): data = request.get_json(); try: qry_transactions = session.query(TransactionsTbl).join(TransactionsTbl.status).order_by(TransactionsTbl.id.asc()).all() list_transactions = [] for transaction in qry_transactions: new_transaction = TransactionsTbl(status_id=1, date_created=now) session.add(new_transaction) session.flush() session.refresh(new_transaction) products = []; total = 0; for product in data['products']: qry_products = session.query(ProductsTbl).filter(ProductsTbl.id == product['id']).first() total += product['amount']*qry_products.price products.append(TransactionProducts(transaction_id=new_transaction.id, product_id=product['id'], amount=product['amount'])) new_stock = qry_products.stock - product['amount']; session.query(ProductsTbl).filter(ProductsTbl.id == product['id']).update({'stock': new_stock}) session.bulk_save_objects(products) session.query(TransactionsTbl).filter(TransactionsTbl.id == new_transaction.id).update({'total': float(total)}) session.commit() except (sqlalchemy.exc.SQLAlchemyError, sqlalchemy.exc.DBAPIError) as e: return dumps(e), 400 return dumps({}), 201
def try_session_commit(self): try: session.flush() except IntegrityError as ex: session.rollback() abort(409, message=f"Integrity Error: {ex.orig}") session.commit()
def main(args): songs = session.query(Songs).filter(Songs.name.isnot(None)).filter(Songs.spotify_id.is_(None)) artists = session.query(Artists) i = 1 for song in songs.all(): song.spotify_id = search_song(song.name, artists.filter_by(id=song.artist_id).first().name) i = i + 1 if i % 10 == 0: print(i) break session.flush() songs = session.query(Songs).filter(Songs.spotify_id.isnot(None)) song_ids = list(songs.values(Songs.spotify_id)) features_by_id = get_song_features(song_ids) print("DAMN : ", features_by_id) for song in songs: if song.spotify_id not in features_by_id: continue f = features_by_id[song.spotify_id] song.mode = f["mode"] song.acousticness = f["acousticness"] song.danceability = f["danceability"] song.energy = f["energy"] song.instrumentalness = f["instrumentalness"] song.liveness = f["liveness"] song.loudness = f["loudness"] song.speechiness = f["speechiness"] song.valence = f["valence"] song.tempo = f["tempo"] i = i + 1 if i % 10 == 0: print(i) session.flush()
def scan(self, root_folder): print "scanning", root_folder.path valid = [x.lower() for x in config.get('base', 'filetypes').split(',')] valid = tuple(valid) print "valid filetypes: ", valid for root, subfolders, files in os.walk(root_folder.path, topdown=False): if root in self.__folders: folder = self.__folders[root] mod_time = datetime.datetime.fromtimestamp( os.path.getmtime(folder.path)) app.logger.debug('mtime: %s , last_scan: %s', mod_time, folder.last_scan) if mod_time < folder.last_scan: app.logger.debug('Folder not modified, skipping files') continue folder.last_scan = datetime.datetime.now() else: app.logger.debug('Adding folder: ' + root) folder = Folder(path=root, parent=root_folder) folder.created = datetime.datetime.fromtimestamp( os.path.getctime(root)) self.__folders[root] = folder #TODO: only scan files if folder mtime changed, but is it windows compat? # need to see how this works on ntfs-3g for f in files: if f.lower().endswith(valid): try: path = os.path.join(root, f) scanned_file = self.__scan_file(path) if (scanned_file): scanned_file.folder = folder session.add(scanned_file) session.flush() except: app.logger.error('Problem adding file: ' + os.path.join(root, f)) app.logger.error(traceback.print_exc()) pass root_folder.last_scan = datetime.datetime.now() session.flush() session.commit()
def decorator(*args, **kwargs): token = None if "x-access-tokens" in request.headers: token = request.headers["x-access-tokens"] data = _check_token(token) current_user = session.query(User).filter( User.id == data["id"]).first() if not current_user: abort(401, message="no user") auth.user = current_user session.flush() return f(*args, **kwargs)
def busroutesPUT(): routeId = request.json.get('id') if routeId: routeId = '%s'%routeId route = BusRoute.query.filter(BusRoute.id==routeId).all() else: route = False if not route: return NotFound().render() route = route[0] route.fromDict(request.json) session.add(route) session.flush() return Success(route).render()
def main(args): with open(args[0], 'r') as file: tt_data = literal_eval(file.read()) for loc in tt_data: currrent_loc = Locations(loc[0], loc[1]) session.add(currrent_loc) session.flush() song_dict = tt_data[loc] for song_id in song_dict: count = song_dict[song_id][0] artist_id = song_dict[song_id][2] create_if_not_exists(Artists, id=artist_id) create_if_not_exists(Songs, tunes_id=song_id, artist_id=artist_id) session.refresh(currrent_loc) session.flush() current_play = Plays(currrent_loc.id, song_id, count) session.add(current_play) print(currrent_loc.id) session.commit()
def main(args): plays = session.query(Plays).all() plays_by_loc = {} for play in plays: if play.location_id not in plays_by_loc: plays_by_loc[play.location_id] = [] plays_by_loc[play.location_id].append((play.count, play.song_id)) plays_by_loc = { k: sorted(v, key=lambda x: x[0], reverse=True)[:50] for k, v in plays_by_loc.items() } print(len(plays_by_loc), len(plays_by_loc[3])) song_ids = set() for p in plays_by_loc.values(): song_ids.update([e[1] for e in p]) songs = session.query(Songs).filter(Songs.tunes_id.in_(song_ids)) i = 1 for song in songs.all(): i = i + 1 song_name = get_song_name(song.tunes_id) song.name = song_name if i % 10 == 0: print(i) session.flush() artist_ids = list(v[0] for v in songs.values(Songs.artist_id)) artists = session.query(Artists).filter(Artists.id.in_(artist_ids)) for artist in artists.all(): i = i + 1 artist_name = get_artist_name(artist.id) artist.name = artist_name or '' if i % 10 == 0: print(i) session.commit()
def calc_all_metrics(pid: int, idcode: str) -> None: has_dssp = (session.query(Residue_props.resid).filter( Residue_props.resid == Residue.resid).filter(Residue.pid == 1).filter( Residue_props.sec_struct != None).first()) if has_dssp: to_print = f"{idcode} already had dssp! Skipping..." print(to_print) return _, _, pdb_name = get_pdb(pid, idcode, prefix="tmp_solv_") chain_sites = get_sites(pid) for chain in chain_sites: for resnumb in chain_sites[chain]: _, resid = chain_sites[chain][resnumb] new_res = session.query(Residue_props).filter_by( resid=resid).first() if not new_res: new_res = Residue_props(resid=resid) session.add(new_res) chain_sites[chain][resnumb].append(new_res) session.flush() # biopython parser parser = PDBParser() structure = parser.get_structure("test", pdb_name) model = structure[0] calc_hseCA(model, chain_sites) calc_hseCB(model, chain_sites) calc_hseCN(model, chain_sites) calc_msms(model, chain_sites) calc_dssp(model, chain_sites, pdb_name) os.system(f"rm -f {pdb_name}")
from db import Events, session, Fighters from network import get_content from parser import parse content = get_content() data = parse(content) event = session.query(Events).filter(Events.name == data['name']).one_or_none() if event is None: event = Events(**{'name': data['name'], 'date': data['date']}) session.add(event) session.flush() for fighter_name in data['fighters_names']: fighter = Fighters(name=fighter_name, event_id=event.id) session.add(fighter) session.commit()
def save(self): session.add(self) session.flush()
def add_skill_to_team(team_name, tech_name): session.add(TeamTechs(team_name=team_name, tech_name=tech_name)) session.commit() session.flush()
def save_and_flush(self): session.add(self) session.flush()
def save_settings(tit: pypka.Titration) -> None: # Save settings pypka_params, delphi_params, mc_params = tit.getParametersDict() to_keep = [ "CpHMD_mode", "ffID", "ff_family", "ffinput", "clean_pdb", "LIPIDS", "keep_ions", "ser_thr_titration", "cutoff", "slice", ] pypka_params = { key: value for key, value in pypka_params.items() if key in to_keep } pypka_params["version"] = pypka_version mc_params["pH_values"] = list(mc_params["pH_values"]) bool_params = [ "CpHMD_mode", "ser_thr_titration", "clean_pdb", "keep_ions", "pbx", "pby", ] query = session.query(Sim_settings.settid) for param in pypka_params: to_compare = str(pypka_params[param]) if param in bool_params: to_compare = to_compare.lower() query = query.filter( Sim_settings.pypka_params[param].as_string() == to_compare) for param in delphi_params: to_compare = str(delphi_params[param]) if param in bool_params: to_compare = to_compare.lower() query = query.filter( Sim_settings.delphi_params[param].as_string() == to_compare) for param in mc_params: to_compare = str(mc_params[param]) if param in bool_params: to_compare = to_compare.lower() query = query.filter( Sim_settings.mc_params[param].as_string() == to_compare) settid = query.first() if not settid: new_sim_settings = Sim_settings(pypka_params=pypka_params, delphi_params=delphi_params, mc_params=mc_params) session.add(new_sim_settings) session.flush() settid = new_sim_settings.settid else: settid = settid[0] NEW_PK_SIM.settid = settid session.commit()
def parse_comments(self, bs, content, params): comments = session.query(models.Comment).\ filter(models.Comment.cid == content.id).\ all() if len(comments) > 0: print('alread updated') return comment_top = bs.find('div', id='comment_top') last_page = self.get_comment_last_page(comment_top) before_comment = None for i in range(last_page + 1): param = params params['cpage'] = i res = BeautifulSoup(requests.get(self.base_url, params).text, 'html.parser') comment_list = res.find('div', id='commentbox').find('div', attrs={'class': 'comment-list'}) # for comment_box in comment_list.findAll(lambda x: x.name == 'div' and 'class' in x.attrs and 'depth' not in x.attrs['class'] and 'comment-item' in x.attrs['class']): for comment_box in comment_list.findAll(lambda x: x.name == 'div' and 'class' in x.attrs and 'comment-item' in x.attrs['class']): box = comment_box.select('> div')[0].select('> div')[0] try: text = box.find('div', attrs={'class': 'xe_content'}).text except Exception as e: continue if not text: before_comment = None print('continued') break date = box.find('div').findAll('div')[-1].find('span').text delta = None if '일 전' in date: delta = timedelta(days=int(date[0])) if '시간 전' in date: delta = timedelta(hours=int(date[0])) if '분 전' in date: delta = timedelta(minutes=int(date[0])) if delta is not None: date = datetime.utcnow() + timedelta(hours=9) - delta else: date = datetime.strptime(date, '%Y.%m.%d') selected = box.select('div.comment-bar > div') if len(selected) == 0: selected = box.select('div.comment-bar-author > div') writer = selected[0].text.strip() # writer = box.select('a.ed.link-reset')[0].text writer = hashlib.shake_128(writer.encode()).hexdigest(length=4) user = session.query(models.User).\ filter(models.User.nickname == writer).\ first() if user is None: user = models.User(nickname=writer) session.add(user) session.flush() comment = models.Comment(data=text, cid=content.id, created_at=date, uid=user.id) if 'depth' in comment_box.attrs['class'] and before_comment: target = box.select('span.ed.label-primary')[0].text.strip()[1:] target = hashlib.shake_128(target.encode()).hexdigest(length=4) comment.data = f'@{target} {comment.data}' comment.parent_id = before_comment.id else: before_comment = comment exist = session.query(models.Comment).\ filter(models.Comment.uid == user.id).\ first() if not exist: session.add(comment) session.flush() print(text) session.commit() return content
def parse_content(self, bs): print('parse content') try: new = bs print(bs) title = new.select('h4')[0].text m = hashlib.blake2b(digest_size=12) m.update(title.encode()) hashed = m.hexdigest() date = None exist = session.query(models.Content).filter(models.Content.permanent_id == hashed).first() if exist: if date is None: exist.created_at = datetime.utcnow() + timedelta(hours=9) else: exist.created_at = date exist.origin = enums.DataOriginEnum.DOGDRIP session.commit() print('passed') return exist date = None for date_obj in new.select('div.ed.flex.flex-wrap.flex-left.flex-middle.title-toolbar span.ed.text-xsmall.text-muted'): text = date_obj.text delta = None if '일 전' in text: delta = timedelta(days=int(text[0])) if '시간 전' in text: delta = timedelta(hours=int(text[0])) if '분 전' in text: delta = timedelta(minutes=int(text[0])) if delta is not None: date = datetime.utcnow() + timedelta(hours=9) - delta else: try: date = datetime.strptime(text, '%Y.%m.%d') except: print('continued') continue break '''delta = None if '일 전' in date: delta = timedelta(days=int(date[0])) if '시간 전' in date: delta = timedelta(hours=int(date[0])) if '분 전' in date: delta = timedelta(minutes=int(date[0])) if delta is not None: date = datetime.utcnow() + timedelta(hours=9) - delta else: breakpoint() date = datetime.strptime(date, '%Y.%m.%d')''' # writer = new.select('div.ed.flex.flex-wrap.flex-left.flex-middle.title-toolbar > div.ed.flex.flex-wrap a') writer = new.select('div.title-toolbar span')[0].text.strip() writer = hashlib.shake_128(writer.encode()).hexdigest(length=4) user = models.User(nickname=writer) session.add(user) session.flush() # TODO 2: 작성자 아이디 구해와서 해싱 # DOIT! content = new.select('div.ed.article-wrapper.inner-container > div.ed > div')[1] content = content.select('div')[0] # breakpoint() # content = new.select('div#article_1')[0] for img in content.select('img'): if 'img.sscroll.net' in img['src']: print('continued') continue if './' in img['src']: img['src'] = img['src'].replace('./', 'http://www.dogdrip.net/') elif img['src'].startswith('/'): img['src'] = 'https://www.dogdrip.net' + img['src'] elif not img['src'].startswith('http'): img['src'] = 'https://www.dogdrip.net/' + img['src'] if 'transparent' in img['src']: return for_img = hashlib.sha256(img['src'].encode()) last = img['src'].split('.')[-1] rename = for_img.hexdigest() rename += '.' + last urllib.request.urlretrieve(img['src'], rename) s3.upload_file(rename, bucket, 'upload/' + rename, ExtraArgs={'ACL': 'public-read', 'CacheControl': 'max-age=2592000'}) os.remove(rename) img['src'] = 'http://img.sscroll.net/upload/'+ rename for video in content.select('source'): if 'img.sscroll.net' in video['src']: print('continued') continue if './' in video['src']: video['src'] = video['src'].replace('./', 'http://www.dogdrip.net/') elif video['src'].startswith('/'): video['src'] = 'https://www.dogdrip.net' + video['src'] elif not video['src'].startswith('http'): video['src'] = 'https://www.dogdrip.net/' + video['src'] if 'transparent' in video['src']: return for_img = hashlib.sha256(video['src'].encode()) last = video['src'].split('.')[-1] rename = for_img.hexdigest() rename += '.' + last urllib.request.urlretrieve(video['src'], rename) s3.upload_file(rename, bucket, 'upload/' + rename, ExtraArgs={'ACL': 'public-read', 'CacheControl': 'max-age=2592000'}) os.remove(rename) video['src'] = 'http://img.sscroll.net/upload/'+ rename content = content.decode() except Exception as e: print('exit') traceback.print_tb(e.__traceback__) return item = models.Content(title=title, data=content, permanent_id=hashed, created_at=date, origin=enums.DataOriginEnum.DOGDRIP, uid=user.id) if item.created_at is None: item.created_at = datetime.utcnow() + timedelta(hours=9) data = new.select('script[type="text/javascript"]')[0].text try: up, down = filter(lambda x: x != '', re.compile('[0-9]*').findall(data)) item.up = up item.down = down except: pass session.add(item) session.commit() print('added!') return item
def save(self): session.add(self) session.flush() session.commit()