def __process_paper_line(line, exist_old_db, name_old_db, file_name): dc = DoiClient() db_authors_new = DBManager('bioinfo_authors', db_name=get_db_name()) db_papers_new = DBManager('bioinfo_papers', db_name=get_db_name()) db_papers_old = None if exist_old_db: db_papers_old = DBManager('bioinfo_papers', db_name=name_old_db) paper_new_db = db_papers_new.find_record({'DOI': line['DOI']}) if not paper_new_db: paper_old_db = None if db_papers_old: paper_old_db = db_papers_old.find_record({'DOI': line['DOI']}) pubmed_id = None if paper_old_db: paper_categories = paper_old_db['edamCategory'] link = paper_old_db['link'] authors = paper_old_db.get('authors') authors_gender = paper_old_db.get('authors_gender') pubmed_id = paper_old_db.get('pubmed_id') else: paper_categories = '' logging.info(f"Obtaining the link of the paper {line['DOI']}") link = dc.get_paper_link_from_doi(line['DOI']) authors = [] authors_gender = [] abstract, _pubmed_id, paper_full = obtain_paper_abstract_and_pubmedid( file_name, line['EID']) if not pubmed_id: pubmed_id = _pubmed_id record_to_save = { 'title': line['Title'], 'year': line['Year'], 'DOI': line['DOI'], 'source': line['Source title'].title(), 'volume': line['Volume'], 'issue': line['Issue'], 'scopus_id': line['Art. No.'], 'link': link, 'e_id': line['EID'], 'citations': line['Cited by'], 'edamCategory': paper_categories, 'pubmed_id': pubmed_id, 'abstract': abstract } db_papers_new.store_record(record_to_save) if paper_full: __process_paper_authors(line, paper_full, db_authors_new, authors, authors_gender) else: logging.error( f"Could not find the full details of the paper {line['DOI']}") return 1 else: logging.info(f"Paper {line['DOI']} already in the database!") return 0
def __init__(self, data_dir, coord, symbol_list, year_range, symbol_first, data_win_len, receptive_field, queue_size=500): # system initialize self.db_manager = DBManager(data_dir) self.preprocessor = Preprocessor() self.coord = coord self.threads = [] # processing params self.data_dir = data_dir self.symbol_list = symbol_list self.year_range = year_range self.symbol_first = symbol_first self.data_win_len = data_win_len self.receptive_field = receptive_field # queue setup self.trans_placeholder = tf.placeholder(dtype=tf.float32, shape=None) self.trans_queue = tf.PaddingFIFOQueue(queue_size, ['float32'], shapes=[(None, 1)]) self.trans = self.trans_queue.enqueue([self.trans_placeholder]) # for multithreading: self.yield_list = itertools.product( self.symbol_list, self.year_range) if self.symbol_first else itertools.product( self.year_range, self.symbol_list)
def _on_key_release(self, widget, event): if event.keyval == 65535: # delete key sel = widget.get_selection().get_selected() item_id = self.liststore.get(sel[1], 3)[0] dbm = DBManager(db_loc) dbm.delete(item_id) self.liststore.remove(sel[1])
def __init__(self, is_coinbase, encryption_pass=None): if not encryption_pass: encryption_pass = getpass.getpass("Encryption password: "******"gmail_password"], default_config.notification_receiver, ) else: self.email_notification = None if is_coinbase and default_config.withdraw_btc_threshold: self.address_selector = AddressSelector( self.secrets["master_public_key"], default_config.withdraw_beginning_address, ) self.db_manager = DBManager() self.next_robinhood_buy_datetime = self.calcRobinhoodFirstBuyTime() if is_coinbase: Logger.info("\n\n\n") Logger.info("----------------------") Logger.info("----------------------") Logger.info("Coinbase DCA started") Logger.info("") self.coinbase_pro = self.newCoinbaseProClient() self.next_buy_datetime = self.calcFirstBuyTime()
def player_check(userId, token): db = DBManager().get_db_cursor(Database) db.execute("select * from %s where UserId=%s" % (Table, userId)) player = db.fetchone() if player and player[3] == token: return True return False
def enroll_student(): print("request.data = ", request.data) data = request.json print("data = ", data) course_id = data['courseid'] p_email = data['email'] p_fname = data['firstname'] p_lname = data['lastname'] dbm = DBManager() qry = 'select Name, Image_File from Courses where Id = %s and Status = "1"' % course_id course_details = dbm.execute_query(qry) course_name = course_details[0][0] course_img_file = course_details[0][1] course_url = { '1': 'http://tinyurl.com/yc5omxjl', '2': 'http://tinyurl.com/ya8st6uo', '3': 'http://tinyurl.com/y92rul68', '4': 'http://tinyurl.com/ya6cvm3j', } send_email_with_link(p_fname, p_lname, p_email, course_id, course_name, course_url[course_id], course_img_file) response = app.response_class(response=json.dumps('Success'), status=200, mimetype='application/json') return response
def upload_file(): if request.method == 'POST': # check if the post request has the file part if 'file' not in request.files: flash('No file part') return redirect(request.url) file = request.files['file'] # if user does not select file, browser also # submit a empty part without filename if file.filename == '': flash('No selected file') return redirect(request.url) if file and allowed_file(file.filename): filename = secure_filename(file.filename) filename_w_prefix = ''.join([str(time.time()), '_', filename.lower()]) file.save(os.path.join(application.config['UPLOAD_FOLDER'], filename_w_prefix)) return redirect(url_for('uploaded_file', filename=filename_w_prefix)) sql_lite_file = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'db.sqlite') dbm = DBManager(sql_lite_file) happy = dbm.load_results(1) unhappy = dbm.load_results(0) return render_template('home.html', title='Happiness Recognizer', happy=happy, unhappy=unhappy)
def main(): #db = DBManager("sqlite:///sqlalchemy_example.db") db = DBManager( "postgresql://*****:*****@localhost:5432/bourse") # db.dropTables() db.createAll() # import_csv(db, "data/ALL.txt") #getISINData(["FR0000130007", "LU0323134006"]) # getISINData(all_tickers) #db.buildActionHourTable() #db.fillHoles() #ha = IndicatorHeikinAshi() #db.runIndicator(ha) #db.convertResolution() # ha = IndicatorHeikinAshi(ActionHour, HeikinAshiHour) # db.runIndicator(ha) # ha = IndicatorHeikinAshi(Action30Minutes, HeikinAshi30Minutes) # db.runIndicator(ha) #db.plot(HeikinAshiHour) #db.plot(ActionHour) # db.runIndicator(ha) # db.convertResolution() # ha = IndicatorHeikinAshi(ActionDay, HeikinAshiDay) # db.runIndicator(ha) # db.plot(ActionDay) #ha = IndicatorHeikinAshi(ActionDay, HeikinAshiDay) ha = IndicatorHeikinAshi(ActionHour, HeikinAshiHour) # db.plot(HeikinAshiDay) db.testIndicatorPerformance(ha) print "[+] DONE"
def main(): base_url = 'https://www.ecva.net/' db_manager = DBManager() for conf_id in CONFERENCES: list_url = base_url + 'papers.php' fetch_papers(db_manager, base_url, list_url, conf_id, 'Main', conf_id) db_manager.write_db()
def main() -> None: db_manager = DBManager() base_url = 'https://www.aaai.org/Library/' for conf_link, conf_name in zip(CONF_LINKS, CONF_NAMES): list_url = base_url + conf_link fetch_papers(db_manager, base_url, list_url, conf_name, 'Main', conf_name) db_manager.write_db()
def load_data_from_file_into_db(filename): db = DBManager('bioinfo_papers') current_dir = pathlib.Path(__file__).parents[0] bio_file_name = current_dir.joinpath('data', filename) with open(str(bio_file_name), 'r', encoding='ISO-8859-1') as f: file = csv.DictReader(f, delimiter='\t') for line in file: line['source'] = line['source'].lower() db.store_record(line)
def _on_refresh_clicked(self, arg): dbm = DBManager(db_loc) self.liststore.clear() for i in dbm.retrieve_all(): self.liststore.append([i[1], i[2], repr(i[4]), i[0]]) self.show_all()
def load_author_data_from_scopus_files(): db_name = get_db_name() db_authors = DBManager('bioinfo_authors', db_name=db_name) db_papers = DBManager('bioinfo_papers', db_name=db_name) dir_summary = pathlib.Path('data', 'raw', 'summary') file_names = sorted(os.listdir(dir_summary)) for file_name in file_names: logging.info(f"\nProcessing: {file_name}") journal_file_name = dir_summary.joinpath(file_name) with open(str(journal_file_name), 'r') as f: file = csv.DictReader(f, delimiter=',') for line in file: paper_db = db_papers.find_record({'DOI': line['DOI']}) if paper_db: logging.info( f"Processing the authors of the paper: {line['DOI']}") abstract, _pubmed_id, paper_full = obtain_paper_abstract_and_pubmedid( file_name, line['EID']) __process_paper_authors(line, paper_full, db_authors, [], [])