def delete(self, book_id): is_lent = query_db("select recipient from loan where book = ? and status = 2", (book_id,), one=True) if is_lent: return ('Cannot delete lent book', 403) else: query_db("delete from book where id = ?", (book_id,)) return ('', 204)
def get(self, loan_id=None): if loan_id: loan = query_db("select id, book, recipient, start, span, status from loan where id = ?", (loan_id,), one=True) if loan: return jsonify({ "id": loan[0], "book": loan[1], "recipient": loan[2], "start": loan[3], "span": loan[4], "status": loan[5] }) else: return ('', 404) else: loans = query_db( "select id, book, recipient, start, span, status from loan " "where book in (select id from book where owner = ?) and status in (0, 2)", (g.user,)) return jsonify([{ "id": loan[0], "book": { field: value for (field, value) in zip( ["id", "owner", "title", "author", "year"], query_db("select * from book where id = ?", (loan[1],), one=True))}, "recipient": loan[2], "start": loan[3], "span": loan[4], "status": loan[5] } for loan in loans])
def put(self, book_id): updatable_fields = ["title", "author", "year"] query = ( "update book set " + (", ".join([ "%s = ?" % (field,) for (field, value) in request.form.items() if field in updatable_fields and value])) + " where id = ?") query_db(query, [value for (field, value) in request.form.items() if field in updatable_fields and value] + [book_id]) return self.get(book_id)
def test_query_db_exception(self): dbconnection = Mock(name="dbconnection") mycursor = Mock(name="mycursor") mycursor.fetchall.side_effect = Exception dbconnection.cursor.return_value = mycursor with self.assertRaises(Exception) as cm: utils.query_db(dbconnection, 12345) self.assertEqual(str( cm.exception), 'Error while executing query : 12345. Please check the logs for details')
def submitmessage(): message = request.form.get("message", '') if len(message) > 140: return "message too long" if badword_in_str(message): return "forbidden word in message" # insert new message in DB try: query_db("insert into messages values ('%s')" % message) except sqlite3.Error as e: return str(e) return "OK"
def handler(): global last_msg_name, last_timestamp q = json.loads(request.args.get('q')) if q['suid'] == '995': return '0' if tester.finished: cprint('wrong, should not receive question', 'red') msg_name = get_message_name(q) print(msg_name) now = datetime.datetime.now() enjoyable = 'enjoyable' in last_msg_name other_actions = 'timeout' in last_msg_name or \ 'breathing' in last_msg_name or \ 'bodyscan' in last_msg_name if not tester.at_correct_state(q): cprint('state wrong', 'red') elif not tester.at_expected_time(now): cprint('time wrong', 'red') print(tester.expected_time) print(now) elif enjoyable and \ not (datetime.timedelta(seconds=35) < now - last_timestamp < datetime.timedelta(seconds=37)): cprint('enjoyable time wrong', 'red') elif other_actions and \ not (datetime.timedelta(seconds=17) < now - last_timestamp < datetime.timedelta(seconds=19)): cprint('breathing/bodyscan/timeout time wrong', 'red') else: cprint('right', 'green') ans = tester.cur_state_response if ans != None: primkey = f'{q["id"]}:{q["empathid"]}' retrieval_code = get_message_info(q)["retrieval_object"] query_db(f'INSERT INTO ema_data(suid, primkey, variablename, answer, language, mode, version, completed) \ VALUES ("{q["suid"]}", "{primkey}", "{retrieval_code}", "{ans}", 1, 3, 1, 1) ') tester.increment() last_msg_name = msg_name last_timestamp = now return '0'
def debts(): debts = query_db("select id, book, start, span, status from loan where recipient = ? and status = 2", (g.user,)) return jsonify([{ "id": debt[0], "book": { field: value for (field, value) in zip( ["id", "owner", "title", "author", "year"], query_db("select * from book where id = ?", (debt[1],), one=True))}, "lender": query_db("select owner from book where id = ?", (debt[1],), one=True)[0], "start": debt[2], "span": debt[3], "status": debt[4] } for debt in debts])
def get_token(): username = request.form['username'] password = request.form['password'] hashed_password = query_db("select password from user where username = ?", (username,), one=True) if ( hashed_password is not None and bcrypt.checkpw(password.encode("utf-8"), hashed_password[0].encode("utf-8")) ): token = query_db("select token from user_token where username = ?", (username,), one=True) if token is None: token = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(40)) query_db("insert into user_token (username, token) values (?, ?)", (username, token)) return jsonify({"token": token}) else: return jsonify({"error": "Bad credentials"}), 401
def test(args): import sqlite3 # Create the test db in RAM db = sqlite3.connect(":memory:") #db.text_factory = str # Create the whitelist to be tested whitelist = Whitelist(db) # Add a record call_record = { "NAME": "Bruce", "NMBR": "1234567890", "DATE": "1012", "TIME": "0600" } whitelist.add_caller(call_record) # List the records query = 'SELECT * from Whitelist' results = utils.query_db(db, query) print "Query results:" print results number = "1234567890" print "Check number: " + number print whitelist.check_number(number) print "Check wrong number:" print whitelist.check_number("1111111111") print "Get number:" print whitelist.get_number(number) return 0
def log_caller(self, callerid): query = """INSERT INTO CallLog( Name, Number, Date, Time, SystemDateTime) VALUES(?,?,?,?,?)""" arguments = [ callerid['NAME'], callerid['NMBR'], datetime.strptime(callerid['DATE'], '%m%d').strftime('%d-%b'), datetime.strptime(callerid['TIME'], '%H%M').strftime('%I:%M %p'), (datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')[:-3]) ] self.db.execute(query, arguments) self.db.commit() query = "select last_insert_rowid()" result = utils.query_db(self.db, query, (), True) call_no = result[0] if self.config["DEBUG"]: print("> New call log entry #{}".format(call_no)) pprint(arguments) return call_no
def keywords_cases(keywords): keywords = keywords.split(',') query_str = query_str_start() + 'WHERE ' for keyword in keywords: query_str += '"' + keyword + '" IN case.keywords AND ' query_str += '1 = 1' + query_str_end() return form_json(query_db(query_str))
def appeal(reference_p1, reference_p2, reference_p3): reference_p1 = str(reference_p1) reference_p2 = str(reference_p2) reference_p3 = str(reference_p3) query_str = query_str_start(True) + 'WHERE appeal.reference = "' + reference_p1 + '/' + reference_p2 + '/' + reference_p3 + '"' + query_str_end(True) + ' LIMIT 1' result = query_db(query_str) return form_json(result)
def test(args): import sqlite3 import utils # Create the test db in RAM db = sqlite3.connect(":memory:") # Create the logger to be tested logger = CallLogger(db) # Add a record callerid = { "NAME": "Bruce", "NMBR": "1234567890", "DATE": "1012", "TIME": "0600" } logger.log_caller(callerid) # List the records query = "SELECT * FROM CallLog" results = utils.query_db(db, query) print "Query results:" print results return 0
def case(reference_p1, reference_p2): reference_p1 = str(reference_p1) reference_p2 = str(reference_p2) query_str = query_str_start( ) + 'WHERE case.reference = "' + reference_p1 + '/' + reference_p2 + '"' + query_str_end( ) + ' LIMIT 1' result = query_db(query_str) return form_json(result)
def get(self): profile = query_db("select username, first_name, last_name from user where username = ?", (g.user,), one=True) if profile is not None: username, first_name, last_name = profile return jsonify({ "username": username, "first_name": first_name, "last_name": last_name })
def appeal(reference_p1, reference_p2, reference_p3): reference_p1 = str(reference_p1) reference_p2 = str(reference_p2) reference_p3 = str(reference_p3) query_str = query_str_start( True ) + 'WHERE appeal.reference = "' + reference_p1 + '/' + reference_p2 + '/' + reference_p3 + '"' + query_str_end( True) + ' LIMIT 1' result = query_db(query_str) return form_json(result)
def test_get_client_object(self): dbconnection = Mock(name="dbconnection") mycursor = Mock(name="mycursor") mycursor.fetchall.return_value = "testing_return_value" dbconnection.cursor.return_value = mycursor self.assertEqual("testing_return_value", utils.query_db(dbconnection, 12345)) dbconnection.cursor.assert_called_once() mycursor.execute.assert_called_once_with(12345) mycursor.fetchall.assert_called_once()
def book_search(): query = request.args["q"] page = int(request.args.get("page", 1)) size = int(request.args.get("size", 10)) book_count = int(query_db( "select count(*) from book where title like ? or author like ?", ("%" + query + "%", "%" + query + "%"), one=True)[0]) books = query_db( "select id, owner, title, author, year from book where title like ? or author like ? limit ? offset ?", ("%" + query + "%", "%" + query + "%", size, (page - 1) * size)) response = jsonify([{ "id": id, "owner": owner, "title": title, "author": author, "year": year, "bearer": ( query_db("select recipient from loan where book = ? and status = 2", (id,), one=True)[0] if query_db("select recipient from loan where book = ? and status = 2", (id,), one=True) else None), } for (id, owner, title, author, year) in books]) response.headers["page-count"] = math.ceil(book_count / size) return response
def get(self, book_id=None): if not book_id: page = int(request.args.get("page", 1)) - 1 size = int(request.args.get("size", 10)) book_count = int(query_db("select count(*) from book where owner = ?", (g.user,), one=True)[0]) books = query_db( "select id, title, author, year " "from book where owner = ? " "order by title collate nocase " "limit ? offset ?", (g.user, size, page * size)) response = jsonify([{ "id": id, "title": title, "owner": g.user, "author": author, "year": year, "bearer": ( query_db("select recipient from loan where book = ? and status = 2", (id,), one=True)[0] if query_db("select recipient from loan where book = ? and status = 2", (id,), one=True) else None), } for (id, title, author, year) in books]) response.headers["page-count"] = math.ceil(book_count / size) return response else: try: id_, owner, title, author, year = query_db( "select id, owner, title, author, year from book where id = ?", (book_id,), one=True) except TypeError: return ('', 404) return jsonify({ "id": id_, "owner": owner, "title": title, "author": author, "year": year })
def processFailedJobs(jobs, conn): finishedJobs = query_db(conn, "SELECT * FROM jobs WHERE status='FAILED'") for finishedJob in finishedJobs: c = conn.cursor() c.execute( "UPDATE jobs SET status='ERROR', lastchange=? WHERE id=? and status='FAILED'", (time.time(), finishedJob['id'])) conn.commit() if c.rowcount: nextJobID = finishedJob['next_job'] c = conn.cursor() c.execute( "UPDATE requests SET status='ERROR', lastchange=? WHERE uuid=?", (time.time(), finishedJob['uuid'])) conn.commit()
def test(db, config): """ Unit Tests """ print("*** Running Blacklist Unit Tests ***") # Create the blacklist to be tested blacklist = Blacklist(db, config) # Add a record callerid = { "NAME": "Bruce", "NMBR": "1234567890", "DATE": "1012", "TIME": "0600", } blacklist.add_caller(callerid, "Test") # List the records query = 'select * from Blacklist' results = utils.query_db(db, query) print(query + " results:") pprint(results) try: number = "1234567890" print("Assert is blacklisted: " + number) assert blacklist.check_number( number), number + " should be blacklisted" number = "1111111111" print("Assert not blacklisted: " + number) assert not blacklist.check_number( number), number + " should not be blacklisted" number = "1234567890" print("Get number: " + number) caller = blacklist.get_number(number) pprint(caller) assert caller[0][ 0] == number, number + " should match get_number " + caller[0][0] except AssertionError as e: print("*** Unit Test FAILED ***") pprint(e) return 1 print("*** Unit Tests PASSED ***") return 0
def generate_recs_from_model(meta_path, tfidf_path, model_path): print("Generating Recommendations...") sqldb = connect_db(db_path) records = query_db( sqldb, '''select feedurl, author, id, title, content, flags from rss_item order by pubDate DESC LIMIT 200;''' ) content_list = [] outcome_list = [] id_list = [] title_list = [] for record in records: # We should not judge the book by it's cover content_list.append('||' + record['feedurl'] + '|| \n ||' + record['author'] + '|| \n ||' + record['title'] + '|| \n' + record['content']) outcome_list.append( (record['flags'] is not None and 'r' not in record['flags'] and 's' in record['flags']) * 1) id_list.append(record['id']) # Yes, we are judging the book by it's cover but we are using the cool NLP model to judge title_list.append(record['title']) print("Total %d feed items found" % (len(content_list))) print(content_list[0]) #Loading the pickle files meta = pickle.load(open(meta_path, 'rb')) out = pickle.load(open(tfidf_path, 'rb')) model = pickle.load(open(model_path, 'rb')) v = out['v'] print("Projecting them to a mathematical space..") X_tfidf = v.transform(content_list) X_smart = cool_nlp_model.encode(title_list) clf = model['clf'] beclf = model['beclf'] y = out['y'] X_tfidf = X_tfidf.todense().astype(np.float32) y = np.array(y).astype(np.float32) print("Recommending...") s_tfidf = clf.decision_function(X_tfidf) s_smart = beclf.decision_function(X_smart) s = s_smart * 0.65 + s_tfidf * 0.35 sortix = np.argsort(-s) recs = sortix[y[sortix] == 0] recs = recs[:max_recommendations] print(recs) print([id_list[x] for x in recs]) return [id_list[x] for x in recs]
def processNewRequests(jobs, conn): pendingRequests = query_db( conn, "SELECT * FROM requests WHERE status='INSERTED'") for pendingRequest in pendingRequests: c = conn.cursor() c.execute( "UPDATE requests SET status='PROCESSING', lastchange=? WHERE uuid=? and status='INSERTED'", (time.time(), pendingRequest['uuid'])) conn.commit() if c.rowcount: jobID = jobs.keys()[0] insertJOB(conn, jobID=jobID, next_job=jobs[jobID][1], jobCommand=jobs[jobID][0], requestUUID=pendingRequest['uuid'])
def user_login(): params = request.form user_name = params.get('username') passwd = params.get('pwd') query_res = query_db("SELECT * FROM userinfo WHERE username=?", args=(user_name, ), one=True) if query_res is not None and query_res['pwd'] == passwd: response = {'code': 0, 'msg': 'success!'} session['username'] = user_name logger.info('{} log in success!'.format(user_name)) else: response = {'code': 1, 'msg': 'user does not exist!'} logger.warning( '{} attempt to log in the system with the wrong password!'.format( user_name)) return jsonify(response)
def generate_tfidf_pickles(): """Gets all the read articles and considers those articles flagged as 's' as 1 and rest as 0 and produces the embeddings """ sqldb = connect_db(db_path) records = query_db(sqldb, '''select feedurl, author, id, title, content, flags from rss_item where unread=0 order by pubDate DESC;''') content_list = [] outcome_list = [] id_list = [] title_list = [] for record in records: # We should not judge the book by it's cover content_list.append('||'+ record['feedurl'] + '|| \n ||' + record['author'] + '|| \n ||' + record['title'] + '|| \n' + record['content']) outcome_list.append((record['flags'] is not None and 'r' not in record['flags'] and 's' in record['flags']) * 1) id_list.append(record['id']) # Yes, we are judging the book by it's cover but we are using the cool NLP model to judge title_list.append(record['title']) print("Total %d feed items found" %(len(content_list))) print(content_list[0]) # compute tfidf vectors with scikits v = TfidfVectorizer(input='content', encoding='utf-8', decode_error='replace', strip_accents='unicode', lowercase=True, analyzer='word', stop_words='english', token_pattern=r'(?u)\b[a-zA-Z_][a-zA-Z0-9_]+\b', ngram_range=(1, 2), max_features = max_features, norm='l2', use_idf=True, smooth_idf=True, sublinear_tf=True, max_df=1.0, min_df=1) v.fit(content_list) print("Projecting them to a mathematical space..") X_tfidf = v.transform(content_list) X_smart = cool_nlp_model.encode(title_list) out = {} out['X_tfidf'] = X_tfidf out['X_smart'] = X_smart out['y'] = outcome_list out['v'] = v #print("writing", tfidf_path) safe_pickle_dump(out, tfidf_path) out = {} out['vocab'] = v.vocabulary_ out['idf'] = v._tfidf.idf_ out['ids'] = id_list out['idtoi'] = {x:i for i,x in enumerate(id_list)} #print("Writing Meta Data") safe_pickle_dump(out, meta_path)
def get_data(): query = "SELECT top 1000 * FROM obsPointing WHERE obs_collection='K2' AND dataproduct_type='image'" df = query_db(CAOM, query) if len(df) == 0: return None df['coords'] = df.apply(lambda x: parse_s_region(x['s_region']), axis=1) # Generate MOC results = [get_polygon_moc(row) for _, row in df.iterrows()] # Union of MOCs if len(results) > 1: moc = MOC.union(*results) else: moc = results return moc
def create_resource(ontology_): ont = query_db('select * from ontologies where prefix = ?', [ontology_], one=True) # if this is a post, call the "autosave" function and redirect to the resource uri if request.method == 'POST': uri = request.form.get('uri') autosave() return redirect(url_for('ontology', ontology_=ontology_, resource_=uri)) properties = {} properties[RDF.type] = {'value': [{'type':"URI", 'value':"", 'class':'added'}], 'qname': 'rdf:type', 'label': 'type'} properties[RDFS.label] = {'value': [{'type':"Literal", 'value':"", 'class':'added'}], 'qname': 'rdfs:label', 'label': 'label'} return render_template('resource.html', ontology_=ont, properties_=properties, uri=ont['context'], name='Create New Resource', is_new=True, auto_save=False)
def test(db, config): ''' Unit Tests ''' print("*** Running Whitelist Unit Tests ***") import utils # Create the logger to be tested logger = CallLogger(db, config) # Caller to be added callerid = { "NAME": "Bruce", "NMBR": "1234567890", "DATE": "1012", "TIME": "0600" } print("Adding caller:") pprint(callerid) try: print("Assert log_caller returns #1") assert logger.log_caller(callerid) == 1, "call # should be 1" print("Assert log_caller returns #2") assert logger.log_caller(callerid) == 2, "call # should be 2" # List the records query = "SELECT * FROM CallLog" results = utils.query_db(db, query) print(query + " results:") pprint(results) except AssertionError as e: print("*** Unit Test FAILED ***") pprint(e) return 1 print("*** Unit Tests PASSED ***") return 0
def processFinishedJobs(jobs, conn): finishedJobs = query_db(conn, "SELECT * FROM jobs WHERE status='FINISHED'") for finishedJob in finishedJobs: print('HAVE') c = conn.cursor() c.execute( "UPDATE jobs SET status='DONE', lastchange=? WHERE id=? and status='FINISHED'", (time.time(), finishedJob['id'])) conn.commit() if c.rowcount: nextJobID = finishedJob['next_job'] if nextJobID != 'DONE': insertJOB(conn, jobID=nextJobID, next_job=jobs[nextJobID][1], jobCommand=jobs[nextJobID][0], requestUUID=finishedJob['uuid']) else: c = conn.cursor() c.execute( "UPDATE requests SET status='DONE', lastchange=? WHERE uuid=?", (time.time(), finishedJob['uuid'])) conn.commit()
def main(): print "START" sys.stdout.flush() while True: job = query_db( conn, "SELECT * FROM jobs WHERE status='PENDING' ORDER BY lastchange ASC", one=True) if job: c = conn.cursor() c.execute( "UPDATE jobs SET status='RUNNING', lastchange=? WHERE uuid=? and status='PENDING'", (time.time(), job['uuid'])) conn.commit() if c.rowcount: result = runScript(job['command']) status = 'FINISHED' if result else 'FAILED' c.execute("UPDATE jobs SET status=?, lastchange=? WHERE id=?", (status, time.time(), job['id'])) conn.commit() time.sleep(0.2)
def get_number(self, number): query = "SELECT * FROM Whitelist WHERE PhoneNo = ?" args = (number, ) results = utils.query_db(self.db, query, args, False) return results
def ontology(ontology_): ont = query_db('select * from ontologies where prefix = ?', [ontology_], one=True) # get the graph for the relevant ontology graph = get_graph(ontology_) if graph is None: # TODO: 404 return redirect(url_for('index')) resource_ = request.args.get('resource_', None) errors = [] # for recreating a page if there are errors changes = {'addition':[], 'removal':[]} # if ?resource_=xxx is present and xxx is not None then go into "resource viewing mode" if resource_ is not None: properties = {} # get the label of the resource res_name = get_label(resource_) # force the resource to a URIRef uri2res = lambda uri: uri if isinstance(uri, rdflib.URIRef) else rdflib.URIRef(uri) r = uri2res(resource_) # build list of (type,predicate,object)s, using an empty string for the type of original triples tpos = [('', p, o) for p, o in graph.predicate_objects(subject=r)] # include additions/removals from uncommited proposal if session.get('logged_in', False): tpos.extend([(s['type'], uri2res(s['predicate']), parse_string_rdfobject(s['object'])) for s in get_uncommited_quads(g.userid, resource_)]) # TODO: these 2 lines may be redundant now as most form validation is done in the UI, remove them completely when this is confirmed # include additions from changes (only present if errors in form submission) tpos.extend([('addition', uri2res(stmt['pred']), parse_string_rdfobject(stmt['val'])) for stmt in changes['addition']]) # include removals from changes (only present if errors in form submission) tpos.extend([('removal', uri2res(stmt['pred']), parse_string_rdfobject(stmt['val'])) for stmt in changes['removal']]) # TODO: add "modified" type (maybe) for t,p,o in tpos: # get existing values for this predicate item = properties.get(p, {'value': [], 'qname': get_qname(p), 'label': get_label(p)}) # convert rdf object to a dict v = rdfobject2dict(o) # add 'deleted' or 'added' 'class' value (used by templates) if t == 'removal': try: # if it's a removal, it should already exist in the values list # find it and add the class to the existing entry idx = item['value'].index(v) v['class'] = 'deleted' item['value'][idx] = v except ValueError: pass # caused when .index fails else: if t == 'addition': v['class'] = 'added' item['value'].append(v) # update the changes properties[p] = item # TODO: this may be redundant with the get_label call above # simply sets the resource name variable to the value of the RDFS.label predicate if res_name is '' and p == RDFS.label: res_name = v['value'] # if there were no predicates, consider this a "new resource" and present the "create resource" view # with the URI already filled in # TODO: a lot of this is duplicate code from the create_resource function is_new = False if len(properties) == 0: # create new resource properties = {} properties[RDF.type] = {'value': [{'type':"URI", 'value':"", 'class':'added'}], 'qname': 'rdf:type', 'label': 'type'} properties[RDFS.label] = {'value': [{'type':"Literal", 'value':"", 'class':'added'}], 'qname': 'rdfs:label', 'label': 'label'} res_name='Create New Resource' is_new=True # TODO: proposal/history stuff proposals = [] history = [] return render_template('resource.html', ontology_=ont, uri=resource_, name=res_name, properties_=properties, proposals=proposals, history=history, is_new=is_new, auto_save=False) # if no resource is requested, go to the ontology view, retrieving a list of all the subjects in the ontology resources = [{'uri':s[0], 'qname':get_qname(s[0]), 'label':get_label(s[0])} for s in graph.triples((None, RDF.type, None))] proposals = None #[s for s,_ in groupby(pgraph.subjects()) if isinstance(s, rdflib.URIRef)] # TODO and not s.startswith(changeset_base_uri) return render_template('ontology.html', ontology_=ont, resources=resources, proposals=proposals)
def check_number(self, number): query = "SELECT COUNT(*) FROM Whitelist WHERE PhoneNo=:number" args = {"number": number} result = utils.query_db(self.db, query, args, True) return result[0] > 0
def case(reference_p1, reference_p2): reference_p1 = str(reference_p1) reference_p2 = str(reference_p2) query_str = query_str_start() + 'WHERE case.reference = "' + reference_p1 + '/' + reference_p2 + '"' + query_str_end() + ' LIMIT 1' result = query_db(query_str) return form_json(result)
def date_cases(date): query_str = query_str_start() + 'WHERE case.date = "' + date + '"' + query_str_end() result = query_db(query_str) return form_json(result)
def appeals(): result = query_db(query_str_start(True) + query_str_end(True)) return form_json(result)
def appeals_by_case_reference(reference_p1, reference_p2): query_str = query_str_start(True) + 'WHERE case.reference = "' + str(reference_p1) + '/' + str(reference_p2) + '"' + query_str_end(True) + ' LIMIT 1' result = query_db(query_str) return form_json(result)